Roll V8 back to 3.6

Roll back to V8 3.6 to fix x86 build, we don't have ucontext.h.

This reverts commits:
5d4cdbf7a67d3662fa0bee4efdb7edd8daec9b0b
c7cc028aaeedbbfa11c11d0b7b243b3d9e837ed9
592a9fc1d8ea420377a2e7efd0600e20b058be2b

Bug: 5688872
Change-Id: Ic961bb5e65b778e98bbfb71cce71d99fa949e995
diff --git a/src/SConscript b/src/SConscript
old mode 100755
new mode 100644
index 0d0b535..52607f1
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,4 +1,4 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -59,7 +59,6 @@
     counters.cc
     cpu-profiler.cc
     data-flow.cc
-    date.cc
     dateparser.cc
     debug-agent.cc
     debug.cc
@@ -79,13 +78,12 @@
     fast-dtoa.cc
     fixed-dtoa.cc
     handles.cc
+    hashmap.cc
     heap-profiler.cc
     heap.cc
     hydrogen.cc
     hydrogen-instructions.cc
     ic.cc
-    incremental-marking.cc
-    interface.cc
     inspector.cc
     interpreter-irregexp.cc
     isolate.cc
@@ -101,7 +99,6 @@
     objects.cc
     objects-printer.cc
     objects-visiting.cc
-    once.cc
     parser.cc
     preparser.cc
     preparse-data.cc
@@ -136,7 +133,6 @@
     v8utils.cc
     variables.cc
     version.cc
-    store-buffer.cc
     zone.cc
     extensions/gc-extension.cc
     extensions/externalize-string-extension.cc
@@ -174,9 +170,6 @@
     mips/frames-mips.cc
     mips/full-codegen-mips.cc
     mips/ic-mips.cc
-    mips/lithium-codegen-mips.cc
-    mips/lithium-gap-resolver-mips.cc
-    mips/lithium-mips.cc
     mips/macro-assembler-mips.cc
     mips/regexp-macro-assembler-mips.cc
     mips/stub-cache-mips.cc
@@ -248,6 +241,7 @@
     dtoa.cc
     fast-dtoa.cc
     fixed-dtoa.cc
+    hashmap.cc
     preparse-data.cc
     preparser.cc
     preparser-api.cc
@@ -325,7 +319,7 @@
 
 EXPERIMENTAL_LIBRARY_FILES = '''
 proxy.js
-collection.js
+weakmap.js
 '''.split()
 
 
diff --git a/src/accessors.cc b/src/accessors.cc
index 8048738..951209d 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,16 +26,15 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
-#include "accessors.h"
 
-#include "contexts.h"
+#include "accessors.h"
+#include "ast.h"
 #include "deoptimizer.h"
 #include "execution.h"
 #include "factory.h"
-#include "frames-inl.h"
-#include "isolate.h"
 #include "list-inl.h"
-#include "property-details.h"
+#include "safepoint-table.h"
+#include "scopeinfo.h"
 
 namespace v8 {
 namespace internal {
@@ -487,6 +486,16 @@
                                                     NONE);
   }
 
+  if (function->has_initial_map()) {
+    // If the function has allocated the initial map
+    // replace it with a copy containing the new prototype.
+    Object* new_map;
+    { MaybeObject* maybe_new_map =
+          function->initial_map()->CopyDropTransitions();
+      if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
+    }
+    function->set_initial_map(Map::cast(new_map));
+  }
   Object* prototype;
   { MaybeObject* maybe_prototype = function->SetPrototype(value);
     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
@@ -518,9 +527,7 @@
     // correctly yet. Compile it now and return the right length.
     HandleScope scope;
     Handle<JSFunction> handle(function);
-    if (!JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
-      return Failure::Exception();
-    }
+    if (!CompileLazy(handle, KEEP_EXCEPTION)) return Failure::Exception();
     return Smi::FromInt(handle->shared()->length());
   } else {
     return Smi::FromInt(function->shared()->length());
@@ -565,12 +572,11 @@
     Handle<JSFunction> inlined_function,
     int inlined_frame_index) {
   Factory* factory = Isolate::Current()->factory();
-  Vector<SlotRef> args_slots =
-      SlotRef::ComputeSlotMappingForArguments(
-          frame,
-          inlined_frame_index,
-          inlined_function->shared()->formal_parameter_count());
-  int args_count = args_slots.length();
+  int args_count = inlined_function->shared()->formal_parameter_count();
+  ScopedVector<SlotRef> args_slots(args_count);
+  SlotRef::ComputeSlotMappingForArguments(frame,
+                                          inlined_frame_index,
+                                          &args_slots);
   Handle<JSObject> arguments =
       factory->NewArgumentsObject(inlined_function, args_count);
   Handle<FixedArray> array = factory->NewFixedArray(args_count);
@@ -579,7 +585,6 @@
     array->set(i, *value);
   }
   arguments->set_elements(*array);
-  args_slots.Dispose();
 
   // Return the freshly allocated arguments object.
   return *arguments;
@@ -614,9 +619,8 @@
 
       if (!frame->is_optimized()) {
         // If there is an arguments variable in the stack, we return that.
-        Handle<ScopeInfo> scope_info(function->shared()->scope_info());
-        int index = scope_info->StackSlotIndex(
-            isolate->heap()->arguments_symbol());
+        Handle<SerializedScopeInfo> info(function->shared()->scope_info());
+        int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
         if (index >= 0) {
           Handle<Object> arguments(frame->GetExpression(index), isolate);
           if (!arguments->IsArgumentsMarker()) return *arguments;
@@ -668,7 +672,7 @@
     Isolate* isolate,
     JSFunction* caller) {
   DisableAssertNoAllocation enable_allocation;
-  if (!caller->shared()->is_classic_mode()) {
+  if (caller->shared()->strict_mode()) {
     return isolate->Throw(
         *isolate->factory()->NewTypeError("strict_caller",
                                           HandleVector<Object>(NULL, 0)));
@@ -755,12 +759,7 @@
     caller = potential_caller;
     potential_caller = it.next();
   }
-  // If caller is bound, return null. This is compatible with JSC, and
-  // allows us to make bound functions use the strict function map
-  // and its associated throwing caller and arguments.
-  if (caller->shared()->bound()) {
-    return isolate->heap()->null_value();
-  }
+
   return CheckNonStrictCallerOrThrow(isolate, caller);
 }
 
diff --git a/src/accessors.h b/src/accessors.h
index 36b9a99..385536d 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,6 @@
 #define V8_ACCESSORS_H_
 
 #include "allocation.h"
-#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/allocation.cc b/src/allocation.cc
index 6c7a08c..119b087 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,11 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "allocation.h"
-
-#include <stdlib.h>  // For free, malloc.
-#include <string.h>  // For memcpy.
+#include "../include/v8stdint.h"
+#include "globals.h"
 #include "checks.h"
+#include "allocation.h"
 #include "utils.h"
 
 namespace v8 {
diff --git a/src/allocation.h b/src/allocation.h
index 31067dd..75aba35 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_ALLOCATION_H_
 #define V8_ALLOCATION_H_
 
+#include "checks.h"
 #include "globals.h"
 
 namespace v8 {
@@ -80,7 +81,7 @@
 
 
 template <typename T>
-T* NewArray(size_t size) {
+static T* NewArray(int size) {
   T* result = new T[size];
   if (result == NULL) Malloced::FatalProcessOutOfMemory();
   return result;
@@ -88,7 +89,7 @@
 
 
 template <typename T>
-void DeleteArray(T* array) {
+static void DeleteArray(T* array) {
   delete[] array;
 }
 
diff --git a/src/api.cc b/src/api.cc
index 49a026b..39c0d02 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,39 +25,34 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
 #include "api.h"
 
-#include <math.h>  // For isnan.
-#include <string.h>  // For memcpy, strlen.
-#include "../include/v8-debug.h"
-#include "../include/v8-profiler.h"
-#include "../include/v8-testing.h"
+#include "arguments.h"
 #include "bootstrapper.h"
 #include "compiler.h"
-#include "conversions-inl.h"
-#include "counters.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "execution.h"
+#include "flags.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
 #include "messages.h"
-#ifdef COMPRESS_STARTUP_DATA_BZ2
 #include "natives.h"
-#endif
 #include "parser.h"
 #include "platform.h"
 #include "profile-generator-inl.h"
-#include "property-details.h"
-#include "property.h"
 #include "runtime-profiler.h"
 #include "scanner-character-streams.h"
+#include "serialize.h"
 #include "snapshot.h"
-#include "unicode-inl.h"
 #include "v8threads.h"
 #include "version.h"
 #include "vm-state-inl.h"
 
+#include "../include/v8-profiler.h"
+#include "../include/v8-testing.h"
 
 #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
 
@@ -83,7 +78,7 @@
   bool has_pending_exception = false
 
 
-#define EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, do_callback)           \
+#define EXCEPTION_BAILOUT_CHECK(isolate, value)                                \
   do {                                                                         \
     i::HandleScopeImplementer* handle_scope_implementer =                      \
         (isolate)->handle_scope_implementer();                                 \
@@ -96,22 +91,11 @@
       }                                                                        \
       bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero();   \
       (isolate)->OptionalRescheduleException(call_depth_is_zero);              \
-      do_callback                                                              \
       return value;                                                            \
     }                                                                          \
-    do_callback                                                                \
   } while (false)
 
 
-#define EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, value)                    \
-  EXCEPTION_BAILOUT_CHECK_GENERIC(                                             \
-      isolate, value, i::V8::FireCallCompletedCallback(isolate);)
-
-
-#define EXCEPTION_BAILOUT_CHECK(isolate, value)                                \
-  EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, ;)
-
-
 #define API_ENTRY_CHECK(isolate, msg)                                          \
   do {                                                                         \
     if (v8::Locker::IsActive()) {                                              \
@@ -201,10 +185,7 @@
   int end_marker;
   heap_stats.end_marker = &end_marker;
   i::Isolate* isolate = i::Isolate::Current();
-  // BUG(1718):
-  // Don't use the take_snapshot since we don't support HeapIterator here
-  // without doing a special GC.
-  isolate->heap()->RecordStats(&heap_stats, false);
+  isolate->heap()->RecordStats(&heap_stats, take_snapshot);
   i::V8::SetFatalError();
   FatalErrorCallback callback = GetFatalErrorHandler();
   {
@@ -360,7 +341,6 @@
     compressed_data[i].data = decompressed;
   }
   V8::SetDecompressedStartupData(compressed_data);
-  i::DeleteArray(compressed_data);
   return 0;
 }
 
@@ -503,7 +483,7 @@
 
 
 RegisteredExtension::RegisteredExtension(Extension* extension)
-    : extension_(extension) { }
+    : extension_(extension), state_(UNVISITED) { }
 
 
 void RegisteredExtension::Register(RegisteredExtension* that) {
@@ -521,13 +501,9 @@
 Extension::Extension(const char* name,
                      const char* source,
                      int dep_count,
-                     const char** deps,
-                     int source_length)
+                     const char** deps)
     : name_(name),
-      source_length_(source_length >= 0 ?
-                     source_length :
-                     (source ? static_cast<int>(strlen(source)) : 0)),
-      source_(source, source_length_),
+      source_(source),
       dep_count_(dep_count),
       deps_(deps),
       auto_enable_(false) { }
@@ -748,7 +724,6 @@
   i::Context* last_context =
       isolate->handle_scope_implementer()->RestoreContext();
   isolate->set_context(last_context);
-  isolate->set_context_exit_happened(true);
 }
 
 
@@ -1430,21 +1405,21 @@
 
 
 ScriptData* ScriptData::PreCompile(const char* input, int length) {
-  i::Utf8ToUtf16CharacterStream stream(
+  i::Utf8ToUC16CharacterStream stream(
       reinterpret_cast<const unsigned char*>(input), length);
-  return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+  return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
 }
 
 
 ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
   i::Handle<i::String> str = Utils::OpenHandle(*source);
   if (str->IsExternalTwoByteString()) {
-    i::ExternalTwoByteStringUtf16CharacterStream stream(
+    i::ExternalTwoByteStringUC16CharacterStream stream(
       i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
   } else {
-    i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
-    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+    i::GenericStringUC16CharacterStream stream(str, 0, str->length());
+    return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_block_scoping);
   }
 }
 
@@ -1587,7 +1562,7 @@
         isolate->context()->global_proxy(), isolate);
     i::Handle<i::Object> result =
         i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
     raw_result = *result;
   }
   i::Handle<i::Object> result(raw_result, isolate);
@@ -1813,7 +1788,7 @@
 static i::Handle<i::Object> CallV8HeapFunction(const char* name,
                                                i::Handle<i::Object> recv,
                                                int argc,
-                                               i::Handle<i::Object> argv[],
+                                               i::Object** argv[],
                                                bool* has_pending_exception) {
   i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
@@ -1830,10 +1805,10 @@
 static i::Handle<i::Object> CallV8HeapFunction(const char* name,
                                                i::Handle<i::Object> data,
                                                bool* has_pending_exception) {
-  i::Handle<i::Object> argv[] = { data };
+  i::Object** argv[1] = { data.location() };
   return CallV8HeapFunction(name,
                             i::Isolate::Current()->js_builtins_object(),
-                            ARRAY_SIZE(argv),
+                            1,
                             argv,
                             has_pending_exception);
 }
@@ -2173,11 +2148,6 @@
   if (obj->IsSmi()) return true;
   if (obj->IsNumber()) {
     double value = obj->Number();
-    static const i::DoubleRepresentation minus_zero(-0.0);
-    i::DoubleRepresentation rep(value);
-    if (rep.bits == minus_zero.bits) {
-      return false;
-    }
     return i::FastI2D(i::FastD2I(value)) == value;
   }
   return false;
@@ -2190,11 +2160,6 @@
   if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0;
   if (obj->IsNumber()) {
     double value = obj->Number();
-    static const i::DoubleRepresentation minus_zero(-0.0);
-    i::DoubleRepresentation rep(value);
-    if (rep.bits == minus_zero.bits) {
-      return false;
-    }
     return i::FastUI2D(i::FastD2UI(value)) == value;
   }
   return false;
@@ -2663,11 +2628,10 @@
   if (obj->IsJSObject() && other->IsJSObject()) {
     return *obj == *other;
   }
-  i::Handle<i::Object> args[] = { other };
+  i::Object** args[1] = { other.location() };
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result =
-      CallV8HeapFunction("EQUALS", obj, ARRAY_SIZE(args), args,
-                         &has_pending_exception);
+      CallV8HeapFunction("EQUALS", obj, 1, args, &has_pending_exception);
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return *result == i::Smi::FromInt(i::EQUAL);
 }
@@ -2757,11 +2721,10 @@
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> obj = i::JSObject::SetElement(
+  i::Handle<i::Object> obj = i::SetElement(
       self,
       index,
       value_obj,
-      NONE,
       i::kNonStrictMode);
   has_pending_exception = obj.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, false);
@@ -2831,7 +2794,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::Object::GetElement(self, index);
+  i::Handle<i::Object> result = i::GetElement(self, index);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
   return Utils::ToLocal(result);
@@ -2864,7 +2827,7 @@
              return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::Object> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> result(self->GetPrototype());
+  i::Handle<i::Object> result = i::GetPrototype(self);
   return Utils::ToLocal(result);
 }
 
@@ -2911,10 +2874,8 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  bool threw = false;
   i::Handle<i::FixedArray> value =
-      i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS, &threw);
-  if (threw) return Local<v8::Array>();
+      i::GetKeysInFixedArrayFor(self, i::INCLUDE_PROTOS);
   // Because we use caching to speed up enumeration it is important
   // to never change the result of the basic enumeration function so
   // we clone the result.
@@ -2932,10 +2893,8 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  bool threw = false;
   i::Handle<i::FixedArray> value =
-      i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY, &threw);
-  if (threw) return Local<v8::Array>();
+      i::GetKeysInFixedArrayFor(self, i::LOCAL_ONLY);
   // Because we use caching to speed up enumeration it is important
   // to never change the result of the basic enumeration function so
   // we clone the result.
@@ -3018,7 +2977,7 @@
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
+  return i::DeleteProperty(self, key_obj)->IsTrue();
 }
 
 
@@ -3039,7 +2998,7 @@
   ENTER_V8(isolate);
   HandleScope scope;
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  return i::JSObject::DeleteElement(self, index)->IsTrue();
+  return i::DeleteElement(self, index)->IsTrue();
 }
 
 
@@ -3064,11 +3023,8 @@
   i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
                                                      getter, setter, data,
                                                      settings, attributes);
-  bool fast = Utils::OpenHandle(this)->HasFastProperties();
   i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
-  if (result.is_null() || result->IsUndefined()) return false;
-  if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0);
-  return true;
+  return !result.is_null() && !result->IsUndefined();
 }
 
 
@@ -3137,10 +3093,7 @@
   // If the property being looked up is a callback, it can throw
   // an exception.
   EXCEPTION_PREAMBLE(isolate);
-  PropertyAttributes ignored;
-  i::Handle<i::Object> result =
-      i::Object::GetProperty(receiver, receiver, lookup, name,
-                             &ignored);
+  i::Handle<i::Object> result = i::GetProperty(receiver, name, lookup);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
 
@@ -3157,7 +3110,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup(isolate);
+  i::LookupResult lookup;
   self_obj->LookupRealNamedPropertyInPrototypes(*key_obj, &lookup);
   return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
@@ -3170,7 +3123,7 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup(isolate);
+  i::LookupResult lookup;
   self_obj->LookupRealNamedProperty(*key_obj, &lookup);
   return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
 }
@@ -3247,7 +3200,7 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  return i::JSObject::GetIdentityHash(self);
+  return i::GetIdentityHash(self);
 }
 
 
@@ -3258,11 +3211,21 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+      self,
+      i::JSObject::ALLOW_CREATION));
+  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
-  i::Handle<i::Object> result =
-      i::JSObject::SetHiddenProperty(self, key_obj, value_obj);
-  return *result == *self;
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> obj = i::SetProperty(
+      hidden_props,
+      key_obj,
+      value_obj,
+      static_cast<PropertyAttributes>(None),
+      i::kNonStrictMode);
+  has_pending_exception = obj.is_null();
+  EXCEPTION_BAILOUT_CHECK(isolate, false);
+  return true;
 }
 
 
@@ -3272,9 +3235,20 @@
              return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+      self,
+      i::JSObject::OMIT_CREATION));
+  if (hidden_props->IsUndefined()) {
+    return v8::Local<v8::Value>();
+  }
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
-  if (result->IsUndefined()) return v8::Local<v8::Value>();
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
+  has_pending_exception = result.is_null();
+  EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
+  if (result->IsUndefined()) {
+    return v8::Local<v8::Value>();
+  }
   return Utils::ToLocal(result);
 }
 
@@ -3285,9 +3259,15 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
+      self,
+      i::JSObject::OMIT_CREATION));
+  if (hidden_props->IsUndefined()) {
+    return true;
+  }
+  i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  self->DeleteHiddenProperty(*key_obj);
-  return true;
+  return i::DeleteProperty(js_obj, key_obj)->IsTrue();
 }
 
 
@@ -3337,12 +3317,22 @@
   i::Handle<i::ExternalArray> array =
       isolate->factory()->NewExternalArray(length, array_type, data);
 
-  i::Handle<i::Map> external_array_map =
-      isolate->factory()->GetElementsTransitionMap(
-          object,
-          GetElementsKindFromExternalArrayType(array_type));
-
-  object->set_map(*external_array_map);
+  // If the object already has external elements, create a new, unique
+  // map if the element type is now changing, because assumptions about
+  // generated code based on the receiver's map will be invalid.
+  i::Handle<i::HeapObject> elements(object->elements());
+  bool cant_reuse_map =
+      elements->map()->IsUndefined() ||
+      !elements->map()->has_external_array_elements() ||
+      elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
+  if (cant_reuse_map) {
+    i::Handle<i::Map> external_array_map =
+        isolate->factory()->GetElementsTransitionMap(
+            i::Handle<i::Map>(object->map()),
+            GetElementsKindFromExternalArrayType(array_type),
+            object->HasFastProperties());
+    object->set_map(*external_array_map);
+  }
   object->set_elements(*array);
 }
 
@@ -3501,8 +3491,7 @@
 }
 
 
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
-                                        int argc,
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, int argc,
                                         v8::Handle<v8::Value> argv[]) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
@@ -3513,7 +3502,7 @@
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
   i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
   if (obj->IsJSFunction()) {
     fun = i::Handle<i::JSFunction>::cast(obj);
@@ -3528,7 +3517,7 @@
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> returned =
       i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
-  EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
   return Utils::ToLocal(scope.CloseAndEscape(returned));
 }
 
@@ -3543,13 +3532,13 @@
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
   if (obj->IsJSFunction()) {
     i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> returned =
         i::Execution::New(fun, argc, args, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
     return Utils::ToLocal(scope.CloseAndEscape(
         i::Handle<i::JSObject>::cast(returned)));
   }
@@ -3562,7 +3551,7 @@
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> returned =
         i::Execution::Call(fun, obj, argc, args, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
     ASSERT(!delegate->IsUndefined());
     return Utils::ToLocal(scope.CloseAndEscape(returned));
   }
@@ -3585,11 +3574,11 @@
   HandleScope scope;
   i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-  i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+  i::Object*** args = reinterpret_cast<i::Object***>(argv);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> returned =
       i::Execution::New(function, argc, args, &has_pending_exception);
-  EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
   return scope.Close(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
 }
 
@@ -3606,11 +3595,11 @@
     i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
     i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
     STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
-    i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
+    i::Object*** args = reinterpret_cast<i::Object***>(argv);
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::Object> returned =
         i::Execution::Call(fun, recv_obj, argc, args, &has_pending_exception);
-    EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
     raw_result = *returned;
   }
   i::Handle<i::Object> result(raw_result);
@@ -3633,12 +3622,6 @@
 }
 
 
-Handle<Value> Function::GetInferredName() const {
-  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
-  return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name()));
-}
-
-
 ScriptOrigin Function::GetScriptOrigin() const {
   i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
   if (func->shared()->script()->IsScript()) {
@@ -3666,23 +3649,6 @@
 }
 
 
-int Function::GetScriptColumnNumber() const {
-  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
-  if (func->shared()->script()->IsScript()) {
-    i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
-    return i::GetScriptColumnNumber(script, func->shared()->start_position());
-  }
-  return kLineOffsetNotFound;
-}
-
-Handle<Value> Function::GetScriptId() const {
-  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
-  if (!func->shared()->script()->IsScript())
-    return v8::Undefined();
-  i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
-  return Utils::ToLocal(i::Handle<i::Object>(script->id()));
-}
-
 int String::Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0;
@@ -3693,104 +3659,7 @@
 int String::Utf8Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
-  return i::Utf8Length(str);
-}
-
-
-// Will fail with a negative answer if the recursion depth is too high.
-static int RecursivelySerializeToUtf8(i::String* string,
-                                      char* buffer,
-                                      int start,
-                                      int end,
-                                      int recursion_budget,
-                                      int32_t previous_character,
-                                      int32_t* last_character) {
-  int utf8_bytes = 0;
-  while (true) {
-    if (string->IsAsciiRepresentation()) {
-      i::String::WriteToFlat(string, buffer, start, end);
-      *last_character = unibrow::Utf16::kNoPreviousCharacter;
-      return utf8_bytes + end - start;
-    }
-    switch (i::StringShape(string).representation_tag()) {
-      case i::kExternalStringTag: {
-        const uint16_t* data = i::ExternalTwoByteString::cast(string)->
-          ExternalTwoByteStringGetData(0);
-        char* current = buffer;
-        for (int i = start; i < end; i++) {
-          uint16_t character = data[i];
-          current +=
-              unibrow::Utf8::Encode(current, character, previous_character);
-          previous_character = character;
-        }
-        *last_character = previous_character;
-        return static_cast<int>(utf8_bytes + current - buffer);
-      }
-      case i::kSeqStringTag: {
-        const uint16_t* data =
-            i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
-        char* current = buffer;
-        for (int i = start; i < end; i++) {
-          uint16_t character = data[i];
-          current +=
-              unibrow::Utf8::Encode(current, character, previous_character);
-          previous_character = character;
-        }
-        *last_character = previous_character;
-        return static_cast<int>(utf8_bytes + current - buffer);
-      }
-      case i::kSlicedStringTag: {
-        i::SlicedString* slice = i::SlicedString::cast(string);
-        unsigned offset = slice->offset();
-        string = slice->parent();
-        start += offset;
-        end += offset;
-        continue;
-      }
-      case i::kConsStringTag: {
-        i::ConsString* cons_string = i::ConsString::cast(string);
-        i::String* first = cons_string->first();
-        int boundary = first->length();
-        if (start >= boundary) {
-          // Only need RHS.
-          string = cons_string->second();
-          start -= boundary;
-          end -= boundary;
-          continue;
-        } else if (end <= boundary) {
-          // Only need LHS.
-          string = first;
-        } else {
-          if (recursion_budget == 0) return -1;
-          int extra_utf8_bytes =
-              RecursivelySerializeToUtf8(first,
-                                         buffer,
-                                         start,
-                                         boundary,
-                                         recursion_budget - 1,
-                                         previous_character,
-                                         &previous_character);
-          if (extra_utf8_bytes < 0) return extra_utf8_bytes;
-          buffer += extra_utf8_bytes;
-          utf8_bytes += extra_utf8_bytes;
-          string = cons_string->second();
-          start = 0;
-          end -= boundary;
-        }
-      }
-    }
-  }
-  UNREACHABLE();
-  return 0;
-}
-
-
-bool String::MayContainNonAscii() const {
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
-    return false;
-  }
-  return !str->HasOnlyAsciiChars();
+  return str->Utf8Length();
 }
 
 
@@ -3802,67 +3671,13 @@
   if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0;
   LOG_API(isolate, "String::WriteUtf8");
   ENTER_V8(isolate);
-  i::Handle<i::String> str = Utils::OpenHandle(this);
-  int string_length = str->length();
-  if (str->IsAsciiRepresentation()) {
-    int len;
-    if (capacity == -1) {
-      capacity = str->length() + 1;
-      len = string_length;
-    } else {
-      len = i::Min(capacity, str->length());
-    }
-    i::String::WriteToFlat(*str, buffer, 0, len);
-    if (nchars_ref != NULL) *nchars_ref = len;
-    if (!(options & NO_NULL_TERMINATION) && capacity > len) {
-      buffer[len] = '\0';
-      return len + 1;
-    }
-    return len;
-  }
-
-  if (capacity == -1 || capacity / 3 >= string_length) {
-    int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
-    const int kMaxRecursion = 100;
-    int utf8_bytes =
-        RecursivelySerializeToUtf8(*str,
-                                   buffer,
-                                   0,
-                                   string_length,
-                                   kMaxRecursion,
-                                   previous,
-                                   &previous);
-    if (utf8_bytes >= 0) {
-      // Success serializing with recursion.
-      if ((options & NO_NULL_TERMINATION) == 0 &&
-          (capacity > utf8_bytes || capacity == -1)) {
-        buffer[utf8_bytes++] = '\0';
-      }
-      if (nchars_ref != NULL) *nchars_ref = string_length;
-      return utf8_bytes;
-    }
-    FlattenString(str);
-    // Recurse once.  This time around the string is flat and the serializing
-    // with recursion will certainly succeed.
-    return WriteUtf8(buffer, capacity, nchars_ref, options);
-  } else if (capacity >= string_length) {
-    // First check that the buffer is large enough.  If it is, then recurse
-    // once without a capacity limit, which will get into the other branch of
-    // this 'if'.
-    int utf8_bytes = i::Utf8Length(str);
-    if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
-    if (utf8_bytes <= capacity) {
-      return WriteUtf8(buffer, -1, nchars_ref, options);
-    }
-  }
-
-  // Slow case.
   i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
+  i::Handle<i::String> str = Utils::OpenHandle(this);
   isolate->string_tracker()->RecordWrite(str);
   if (options & HINT_MANY_WRITES_EXPECTED) {
     // Flatten the string for efficiency.  This applies whether we are
     // using StringInputBuffer or Get(i) to access the characters.
-    FlattenString(str);
+    str->TryFlatten();
   }
   write_input_buffer.Reset(0, *str);
   int len = str->length();
@@ -3873,13 +3688,11 @@
   int i;
   int pos = 0;
   int nchars = 0;
-  int previous = unibrow::Utf16::kNoPreviousCharacter;
   for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
     i::uc32 c = write_input_buffer.GetNext();
-    int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
+    int written = unibrow::Utf8::Encode(buffer + pos, c);
     pos += written;
     nchars++;
-    previous = c;
   }
   if (i < len) {
     // For the last characters we need to check the length for each one
@@ -3888,33 +3701,16 @@
     char intermediate[unibrow::Utf8::kMaxEncodedSize];
     for (; i < len && pos < capacity; i++) {
       i::uc32 c = write_input_buffer.GetNext();
-      if (unibrow::Utf16::IsTrailSurrogate(c) &&
-          unibrow::Utf16::IsLeadSurrogate(previous)) {
-        // We can't use the intermediate buffer here because the encoding
-        // of surrogate pairs is done under assumption that you can step
-        // back and fix the UTF8 stream.  Luckily we only need space for one
-        // more byte, so there is always space.
-        ASSERT(pos < capacity);
-        int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
-        ASSERT(written == 1);
+      int written = unibrow::Utf8::Encode(intermediate, c);
+      if (pos + written <= capacity) {
+        for (int j = 0; j < written; j++)
+          buffer[pos + j] = intermediate[j];
         pos += written;
         nchars++;
       } else {
-        int written =
-            unibrow::Utf8::Encode(intermediate,
-                                  c,
-                                  unibrow::Utf16::kNoPreviousCharacter);
-        if (pos + written <= capacity) {
-          for (int j = 0; j < written; j++)
-            buffer[pos + j] = intermediate[j];
-          pos += written;
-          nchars++;
-        } else {
-          // We've reached the end of the buffer
-          break;
-        }
+        // We've reached the end of the buffer
+        break;
       }
-      previous = c;
     }
   }
   if (nchars_ref != NULL) *nchars_ref = nchars;
@@ -4010,11 +3806,10 @@
 void v8::String::VerifyExternalStringResource(
     v8::String::ExternalStringResource* value) const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  const v8::String::ExternalStringResource* expected;
+  v8::String::ExternalStringResource* expected;
   if (i::StringShape(*str).IsExternalTwoByte()) {
-    const void* resource =
-        i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
-    expected = reinterpret_cast<const ExternalStringResource*>(resource);
+    void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+    expected = reinterpret_cast<ExternalStringResource*>(resource);
   } else {
     expected = NULL;
   }
@@ -4022,7 +3817,7 @@
 }
 
 
-const v8::String::ExternalAsciiStringResource*
+v8::String::ExternalAsciiStringResource*
       v8::String::GetExternalAsciiStringResource() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(),
@@ -4030,9 +3825,8 @@
     return NULL;
   }
   if (i::StringShape(*str).IsExternalAscii()) {
-    const void* resource =
-        i::Handle<i::ExternalAsciiString>::cast(str)->resource();
-    return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
+    void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+    return reinterpret_cast<ExternalAsciiStringResource*>(resource);
   } else {
     return NULL;
   }
@@ -4170,7 +3964,7 @@
 
 
 bool v8::V8::Initialize() {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
   if (isolate != NULL && isolate->IsInitialized()) {
     return true;
   }
@@ -4183,12 +3977,6 @@
 }
 
 
-void v8::V8::SetReturnAddressLocationResolver(
-      ReturnAddressLocationResolver return_address_resolver) {
-  i::V8::SetReturnAddressLocationResolver(return_address_resolver);
-}
-
-
 bool v8::V8::Dispose() {
   i::Isolate* isolate = i::Isolate::Current();
   if (!ApiCheck(isolate != NULL && isolate->IsDefaultIsolate(),
@@ -4208,15 +3996,6 @@
 
 
 void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
-  if (!i::Isolate::Current()->IsInitialized()) {
-    // Isolate is unitialized thus heap is not configured yet.
-    heap_statistics->set_total_heap_size(0);
-    heap_statistics->set_total_heap_size_executable(0);
-    heap_statistics->set_used_heap_size(0);
-    heap_statistics->set_heap_size_limit(0);
-    return;
-  }
-
   i::Heap* heap = i::Isolate::Current()->heap();
   heap_statistics->set_total_heap_size(heap->CommittedMemory());
   heap_statistics->set_total_heap_size_executable(
@@ -4226,26 +4005,18 @@
 }
 
 
-void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
-  i::Isolate* isolate = i::Isolate::Current();
-  IsDeadCheck(isolate, "v8::V8::VisitExternalResources");
-  isolate->heap()->VisitExternalResources(visitor);
-}
-
-
-bool v8::V8::IdleNotification(int hint) {
+bool v8::V8::IdleNotification() {
   // Returning true tells the caller that it need not
   // continue to call IdleNotification.
-  i::Isolate* isolate = i::Isolate::Current();
-  if (isolate == NULL || !isolate->IsInitialized()) return true;
-  return i::V8::IdleNotification(hint);
+  if (!i::Isolate::Current()->IsInitialized()) return true;
+  return i::V8::IdleNotification();
 }
 
 
 void v8::V8::LowMemoryNotification() {
   i::Isolate* isolate = i::Isolate::Current();
-  if (isolate == NULL || !isolate->IsInitialized()) return;
-  isolate->heap()->CollectAllAvailableGarbage("low memory notification");
+  if (!isolate->IsInitialized()) return;
+  isolate->heap()->CollectAllGarbage(true);
 }
 
 
@@ -4339,9 +4110,8 @@
   }
   // Leave V8.
 
-  if (env.is_null()) {
+  if (env.is_null())
     return Persistent<Context>();
-  }
   return Persistent<Context>(Utils::ToLocal(env));
 }
 
@@ -4481,20 +4251,6 @@
 }
 
 
-bool Context::IsCodeGenerationFromStringsAllowed() {
-  i::Isolate* isolate = i::Isolate::Current();
-  if (IsDeadCheck(isolate,
-                  "v8::Context::IsCodeGenerationFromStringsAllowed()")) {
-    return false;
-  }
-  ENTER_V8(isolate);
-  i::Object** ctx = reinterpret_cast<i::Object**>(this);
-  i::Handle<i::Context> context =
-      i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
-  return !context->allow_code_gen_from_strings()->IsFalse();
-}
-
-
 void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
   i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
 }
@@ -4543,7 +4299,7 @@
 }
 
 static void* ExternalValueImpl(i::Handle<i::Object> obj) {
-  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
+  return reinterpret_cast<void*>(i::Foreign::cast(*obj)->address());
 }
 
 
@@ -4569,7 +4325,7 @@
   if (value->IsSmi()) {
     return i::Internals::GetExternalPointerFromSmi(value);
   } else if (value->IsForeign()) {
-    return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
+    return reinterpret_cast<void*>(i::Foreign::cast(value)->address());
   } else {
     return NULL;
   }
@@ -4779,13 +4535,15 @@
 
 
 bool v8::String::CanMakeExternal() {
-  if (!internal::FLAG_clever_optimizations) return false;
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
   if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false;
-  if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
+  if (isolate->string_tracker()->IsFreshUnusedString(obj)) {
+    return false;
+  }
   int size = obj->Size();  // Byte size of the original string.
-  if (size < i::ExternalString::kShortSize) return false;
+  if (size < i::ExternalString::kSize)
+    return false;
   i::StringShape shape(*obj);
   return !shape.IsExternal();
 }
@@ -4891,8 +4649,8 @@
   if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0;
   LOG_API(isolate, "Date::NumberValue");
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj);
-  return jsdate->value()->Number();
+  i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+  return jsvalue->value()->Number();
 }
 
 
@@ -4903,10 +4661,8 @@
   LOG_API(isolate, "Date::DateTimeConfigurationChangeNotification");
   ENTER_V8(isolate);
 
-  isolate->date_cache()->ResetDateCache();
-
   i::HandleScope scope(isolate);
-  // Get the function ResetDateCache (defined in date.js).
+  // Get the function ResetDateCache (defined in date-delay.js).
   i::Handle<i::String> func_name_str =
       isolate->factory()->LookupAsciiSymbol("ResetDateCache");
   i::MaybeObject* result =
@@ -5063,7 +4819,7 @@
 
 
 Local<Integer> v8::Integer::New(int32_t value) {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
   EnsureInitializedForIsolate(isolate, "v8::Integer::New()");
   if (i::Smi::IsValid(value)) {
     return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
@@ -5121,7 +4877,7 @@
 
     NeanderObject listener(i::JSObject::cast(listeners.get(i)));
     i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
-    if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
+    if (callback_obj->address() == FUNCTION_ADDR(that)) {
       listeners.set(i, isolate->heap()->undefined_value());
     }
   }
@@ -5267,21 +5023,6 @@
 }
 
 
-void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
-  if (callback == NULL) return;
-  i::Isolate* isolate = i::Isolate::Current();
-  if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return;
-  i::V8::AddCallCompletedCallback(callback);
-}
-
-
-void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
-  i::Isolate* isolate = i::Isolate::Current();
-  if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return;
-  i::V8::RemoveCallCompletedCallback(callback);
-}
-
-
 void V8::PauseProfiler() {
   i::Isolate* isolate = i::Isolate::Current();
   isolate->logger()->PauseProfiler();
@@ -5341,7 +5082,7 @@
 
 
 Isolate* Isolate::GetCurrent() {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
   return reinterpret_cast<Isolate*>(isolate);
 }
 
@@ -5396,8 +5137,7 @@
   TryCatch try_catch;
   Handle<String> str = obj->ToString();
   if (str.IsEmpty()) return;
-  i::Handle<i::String> i_str = Utils::OpenHandle(*str);
-  length_ = i::Utf8Length(i_str);
+  length_ = str->Utf8Length();
   str_ = i::NewArray<char>(length_ + 1);
   str->WriteUtf8(str_);
 }
@@ -5747,14 +5487,8 @@
                                                        wait_for_connection);
 }
 
-
-void Debug::DisableAgent() {
-  return i::Isolate::Current()->debugger()->StopAgent();
-}
-
-
 void Debug::ProcessDebugMessages() {
-  i::Execution::ProcessDebugMessages(true);
+  i::Execution::ProcessDebugMesssages(true);
 }
 
 Local<Context> Debug::GetDebugContext() {
@@ -6033,10 +5767,10 @@
 }
 
 
-int HeapGraphNode::GetRetainedSize() const {
+int HeapGraphNode::GetRetainedSize(bool exact) const {
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize");
-  return ToInternal(this)->retained_size();
+  return ToInternal(this)->RetainedSize(exact);
 }
 
 
@@ -6138,7 +5872,7 @@
   i::Isolate* isolate = i::Isolate::Current();
   IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById");
   return reinterpret_cast<const HeapGraphNode*>(
-      ToInternal(this)->GetEntryById(static_cast<i::SnapshotObjectId>(id)));
+      ToInternal(this)->GetEntryById(id));
 }
 
 
@@ -6231,11 +5965,6 @@
 }
 
 
-int HeapProfiler::GetPersistentHandleCount() {
-  i::Isolate* isolate = i::Isolate::Current();
-  return isolate->global_handles()->NumberOfGlobalHandles();
-}
-
 
 v8::Testing::StressType internal::Testing::stress_type_ =
     v8::Testing::kStressTypeOpt;
@@ -6264,7 +5993,9 @@
 
 void Testing::PrepareStressRun(int run) {
   static const char* kLazyOptimizations =
-      "--prepare-always-opt --nolimit-inlining --noalways-opt";
+      "--prepare-always-opt --nolimit-inlining "
+      "--noalways-opt --noopt-eagerly";
+  static const char* kEagerOptimizations = "--opt-eagerly";
   static const char* kForcedOptimizations = "--always-opt";
 
   // If deoptimization stressed turn on frequent deoptimization. If no value
@@ -6281,12 +6012,15 @@
   if (run == GetStressRuns() - 1) {
     SetFlagsFromString(kForcedOptimizations);
   } else {
+    SetFlagsFromString(kEagerOptimizations);
     SetFlagsFromString(kLazyOptimizations);
   }
 #else
   if (run == GetStressRuns() - 1) {
     SetFlagsFromString(kForcedOptimizations);
-  } else if (run != GetStressRuns() - 2) {
+  } else if (run == GetStressRuns() - 2) {
+    SetFlagsFromString(kEagerOptimizations);
+  } else {
     SetFlagsFromString(kLazyOptimizations);
   }
 #endif
diff --git a/src/api.h b/src/api.h
index 89cf0c8..07723cb 100644
--- a/src/api.h
+++ b/src/api.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,14 +28,10 @@
 #ifndef V8_API_H_
 #define V8_API_H_
 
-#include "v8.h"
+#include "apiutils.h"
+#include "factory.h"
 
 #include "../include/v8-testing.h"
-#include "apiutils.h"
-#include "contexts.h"
-#include "factory.h"
-#include "isolate.h"
-#include "list-inl.h"
 
 namespace v8 {
 
@@ -116,16 +112,15 @@
 }
 
 
-template <typename T> inline T ToCData(v8::internal::Object* obj) {
+template <typename T> static inline T ToCData(v8::internal::Object* obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return reinterpret_cast<T>(
-      reinterpret_cast<intptr_t>(
-          v8::internal::Foreign::cast(obj)->foreign_address()));
+      reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
 }
 
 
 template <typename T>
-inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
+static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
   STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
   return FACTORY->NewForeign(
       reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
@@ -141,6 +136,10 @@
 };
 
 
+enum ExtensionTraversalState {
+  UNVISITED, VISITED, INSTALLED
+};
+
 
 class RegisteredExtension {
  public:
@@ -149,11 +148,14 @@
   Extension* extension() { return extension_; }
   RegisteredExtension* next() { return next_; }
   RegisteredExtension* next_auto() { return next_auto_; }
+  ExtensionTraversalState state() { return state_; }
+  void set_state(ExtensionTraversalState value) { state_ = value; }
   static RegisteredExtension* first_extension() { return first_extension_; }
  private:
   Extension* extension_;
   RegisteredExtension* next_;
   RegisteredExtension* next_auto_;
+  ExtensionTraversalState state_;
   static RegisteredExtension* first_extension_;
 };
 
@@ -240,7 +242,7 @@
 
 
 template <class T>
-inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
+static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) {
   return reinterpret_cast<T*>(obj.location());
 }
 
@@ -481,7 +483,7 @@
 };
 
 
-const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
+static const int kHandleBlockSize = v8::internal::KB - 2;  // fit in one page
 
 
 void HandleScopeImplementer::SaveContext(Context* context) {
diff --git a/src/apinatives.js b/src/apinatives.js
index 79b41dd..e94da9f 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -37,8 +37,8 @@
 }
 
 
-var kApiFunctionCache = {};
-var functionCache = kApiFunctionCache;
+const kApiFunctionCache = {};
+const functionCache = kApiFunctionCache;
 
 
 function Instantiate(data, name) {
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index d5db686..5ad7b5a 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -38,7 +38,6 @@
 #define V8_ARM_ASSEMBLER_ARM_INL_H_
 
 #include "arm/assembler-arm.h"
-
 #include "cpu.h"
 #include "debug.h"
 
@@ -72,26 +71,19 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
-                              || rmode_ == EMBEDDED_OBJECT
-                              || rmode_ == EXTERNAL_REFERENCE);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
 }
 
 
 int RelocInfo::target_address_size() {
-  return kPointerSize;
+  return Assembler::kExternalTargetSize;
 }
 
 
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
-  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-        host(), this, HeapObject::cast(target_code));
-  }
 }
 
 
@@ -113,15 +105,9 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-  if (mode == UPDATE_WRITE_BARRIER &&
-      host() != NULL &&
-      target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
-  }
 }
 
 
@@ -148,17 +134,10 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
-                                WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
-  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
-  }
 }
 
 
@@ -175,11 +154,6 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
-  if (host() != NULL) {
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-        host(), this, HeapObject::cast(target_code));
-  }
 }
 
 
@@ -228,13 +202,13 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitEmbeddedPointer(this);
+    visitor->VisitPointer(target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(this);
+    visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
@@ -254,13 +228,13 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitEmbeddedPointer(heap, this);
+    StaticVisitor::VisitPointer(heap, target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(this);
+    StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
              ((RelocInfo::IsJSReturn(mode) &&
@@ -364,14 +338,8 @@
 }
 
 
-void Assembler::deserialization_set_special_target_at(
-    Address constant_pool_entry, Address target) {
-  Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_external_target_at(Address constant_pool_entry,
-                                       Address target) {
+void Assembler::set_target_at(Address constant_pool_entry,
+                              Address target) {
   Memory::Address_at(constant_pool_entry) = target;
 }
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index ec28da4..0ec3692 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -66,13 +66,11 @@
 
 #ifdef __arm__
   // If the compiler is allowed to use VFP then we can use VFP too in our code
-  // generation even when generating snapshots. ARMv7 and hardware floating
-  // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
-#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
-    && !defined(__SOFTFP__)
+  // generation even when generating snapshots.  This won't work for cross
+  // compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+#if defined(__VFP_FP__) && !defined(__SOFTFP__)
   answer |= 1u << VFP3 | 1u << ARMv7;
-#endif  // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
-        // && !defined(__SOFTFP__)
+#endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
 #endif  // def __arm__
 
   return answer;
@@ -80,9 +78,7 @@
 
 
 void CpuFeatures::Probe() {
-  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
-                                CpuFeaturesImpliedByCompiler());
-  ASSERT(supported_ == 0 || supported_ == standard_features);
+  ASSERT(!initialized_);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -90,7 +86,8 @@
   // Get the features implied by the OS and the compiler settings. This is the
   // minimal set of features which is also alowed for generated code in the
   // snapshot.
-  supported_ |= standard_features;
+  supported_ |= OS::CpuFeaturesImpliedByPlatform();
+  supported_ |= CpuFeaturesImpliedByCompiler();
 
   if (Serializer::enabled()) {
     // No probing for features if we might serialize (generate snapshot).
@@ -139,6 +136,7 @@
 }
 
 
+
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -237,27 +235,25 @@
 
 // add(sp, sp, 4) instruction (aka Pop())
 const Instr kPopInstruction =
-    al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
-        kRegister_sp_Code * B12;
+    al | PostIndex | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
 // register r is not encoded.
 const Instr kPushRegPattern =
-    al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
+    al | B26 | 4 | NegPreIndex | sp.code() * B16;
 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
 // register r is not encoded.
 const Instr kPopRegPattern =
-    al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
+    al | B26 | L | 4 | PostIndex | sp.code() * B16;
 // mov lr, pc
-const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
+const Instr kMovLrPc = al | MOV | pc.code() | lr.code() * B12;
 // ldr rd, [pc, #offset]
 const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCPattern = al | 5 * B24 | L | pc.code() * B16;
 // blxcc rm
 const Instr kBlxRegMask =
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
 const Instr kBlxRegPattern =
     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
-const Instr kBlxIp = al | kBlxRegPattern | ip.code();
 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
 const Instr kMovMvnPattern = 0xd * B21;
 const Instr kMovMvnFlip = B22;
@@ -274,13 +270,13 @@
 
 // A mask for the Rd register for push, pop, ldr, str instructions.
 const Instr kLdrRegFpOffsetPattern =
-    al | B26 | L | Offset | kRegister_fp_Code * B16;
+    al | B26 | L | Offset | fp.code() * B16;
 const Instr kStrRegFpOffsetPattern =
-    al | B26 | Offset | kRegister_fp_Code * B16;
+    al | B26 | Offset | fp.code() * B16;
 const Instr kLdrRegFpNegOffsetPattern =
-    al | B26 | L | NegOffset | kRegister_fp_Code * B16;
+    al | B26 | L | NegOffset | fp.code() * B16;
 const Instr kStrRegFpNegOffsetPattern =
-    al | B26 | NegOffset | kRegister_fp_Code * B16;
+    al | B26 | NegOffset | fp.code() * B16;
 const Instr kLdrStrInstrTypeMask = 0xffff0000;
 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
 const Instr kLdrStrOffsetMask = 0x00000fff;
@@ -320,7 +316,7 @@
     own_buffer_ = false;
   }
 
-  // Set up buffer pointers.
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -352,7 +348,7 @@
   CheckConstPool(true, false);
   ASSERT(num_pending_reloc_info_ == 0);
 
-  // Set up code descriptor.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -2449,7 +2445,7 @@
   }
   CHECK_GT(desc.buffer_size, 0);  // no overflow
 
-  // Set up new buffer.
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
 
   desc.instr_size = pc_offset();
@@ -2509,8 +2505,7 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  // We do not try to reuse pool constants.
-  RelocInfo rinfo(pc_, rmode, data, NULL);
+  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2542,7 +2537,7 @@
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
-      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
       ClearRecordedAstId();
       reloc_info_writer.Write(&reloc_info_with_ast_id);
     } else {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e2d5f59..eeadaca 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -124,47 +124,24 @@
   int code_;
 };
 
-// These constants are used in several locations, including static initializers
-const int kRegister_no_reg_Code = -1;
-const int kRegister_r0_Code = 0;
-const int kRegister_r1_Code = 1;
-const int kRegister_r2_Code = 2;
-const int kRegister_r3_Code = 3;
-const int kRegister_r4_Code = 4;
-const int kRegister_r5_Code = 5;
-const int kRegister_r6_Code = 6;
-const int kRegister_r7_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_fp_Code = 11;
-const int kRegister_ip_Code = 12;
-const int kRegister_sp_Code = 13;
-const int kRegister_lr_Code = 14;
-const int kRegister_pc_Code = 15;
+const Register no_reg = { -1 };
 
-const Register no_reg = { kRegister_no_reg_Code };
-
-const Register r0  = { kRegister_r0_Code };
-const Register r1  = { kRegister_r1_Code };
-const Register r2  = { kRegister_r2_Code };
-const Register r3  = { kRegister_r3_Code };
-const Register r4  = { kRegister_r4_Code };
-const Register r5  = { kRegister_r5_Code };
-const Register r6  = { kRegister_r6_Code };
-const Register r7  = { kRegister_r7_Code };
-// Used as context register.
-const Register r8  = { kRegister_r8_Code };
-// Used as lithium codegen scratch register.
-const Register r9  = { kRegister_r9_Code };
-// Used as roots register.
-const Register r10 = { kRegister_r10_Code };
-const Register fp  = { kRegister_fp_Code };
-const Register ip  = { kRegister_ip_Code };
-const Register sp  = { kRegister_sp_Code };
-const Register lr  = { kRegister_lr_Code };
-const Register pc  = { kRegister_pc_Code };
-
+const Register r0  = {  0 };
+const Register r1  = {  1 };
+const Register r2  = {  2 };
+const Register r3  = {  3 };
+const Register r4  = {  4 };
+const Register r5  = {  5 };
+const Register r6  = {  6 };
+const Register r7  = {  7 };
+const Register r8  = {  8 };  // Used as context register.
+const Register r9  = {  9 };  // Used as lithium codegen scratch register.
+const Register r10 = { 10 };  // Used as roots register.
+const Register fp  = { 11 };
+const Register ip  = { 12 };
+const Register sp  = { 13 };
+const Register lr  = { 14 };
+const Register pc  = { 15 };
 
 // Single word VFP register.
 struct SwVfpRegister {
@@ -323,13 +300,11 @@
 const DwVfpRegister d14 = { 14 };
 const DwVfpRegister d15 = { 15 };
 
-// Aliases for double registers.  Defined using #define instead of
-// "static const DwVfpRegister&" because Clang complains otherwise when a
-// compilation unit that includes this header doesn't use the variables.
-#define kFirstCalleeSavedDoubleReg d8
-#define kLastCalleeSavedDoubleReg d15
-#define kDoubleRegZero d14
-#define kScratchDoubleReg d15
+// Aliases for double registers.
+const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
+const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
+const DwVfpRegister kDoubleRegZero = d14;
+const DwVfpRegister kScratchDoubleReg = d15;
 
 
 // Coprocessor register
@@ -604,7 +579,6 @@
 extern const Instr kLdrPCPattern;
 extern const Instr kBlxRegMask;
 extern const Instr kBlxRegPattern;
-extern const Instr kBlxIp;
 
 extern const Instr kMovMvnMask;
 extern const Instr kMovMvnPattern;
@@ -686,18 +660,20 @@
 
   // This sets the branch destination (which is in the constant pool on ARM).
   // This is for calls and branches within generated code.
-  inline static void deserialization_set_special_target_at(
-      Address constant_pool_entry, Address target);
+  inline static void set_target_at(Address constant_pool_entry, Address target);
 
   // This sets the branch destination (which is in the constant pool on ARM).
   // This is for calls and branches to runtime code.
   inline static void set_external_target_at(Address constant_pool_entry,
-                                            Address target);
+                                            Address target) {
+    set_target_at(constant_pool_entry, target);
+  }
 
   // Here we are patching the address in the constant pool, not the actual call
   // instruction.  The address in the constant pool is the same size as a
   // pointer.
-  static const int kSpecialTargetSize = kPointerSize;
+  static const int kCallTargetSize = kPointerSize;
+  static const int kExternalTargetSize = kPointerSize;
 
   // Size of an instruction.
   static const int kInstrSize = sizeof(Instr);
@@ -1231,10 +1207,6 @@
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
   // Read/patch instructions
-  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
-  void instr_at_put(int pos, Instr instr) {
-    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
-  }
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
     *reinterpret_cast<Instr*>(pc) = instr;
@@ -1289,6 +1261,12 @@
 
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
+  // Read/patch instructions
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
+
   // Decode branch instruction at pos and return branch target pos
   int target_at(int pos);
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index c99e778..ae8cb56 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -72,22 +72,6 @@
 }
 
 
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
-                                              Register result) {
-  // Load the global context.
-
-  __ ldr(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ ldr(result,
-         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
-  // Load the InternalArray function from the global context.
-  __ ldr(result,
-         MemOperand(result,
-                    Context::SlotOffset(
-                        Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
-}
-
-
 // Load the built-in Array function from the current context.
 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
   // Load the global context.
@@ -102,6 +86,12 @@
 }
 
 
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. An elements backing store is allocated with size initial_capacity
 // and filled with the hole values.
@@ -111,17 +101,16 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
+                                 int initial_capacity,
                                  Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ ldr(scratch1, FieldMemOperand(array_function,
+                                   JSFunction::kPrototypeOrInitialMapOffset));
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
-  int size = JSArray::kSize;
-  if (initial_capacity > 0) {
-    size += FixedArray::SizeFor(initial_capacity);
-  }
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
   __ AllocateInNewSpace(size,
                         result,
                         scratch2,
@@ -141,11 +130,6 @@
   __ mov(scratch3,  Operand(0, RelocInfo::NONE));
   __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
 
-  if (initial_capacity == 0) {
-    __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-    return;
-  }
-
   // Calculate the location of the elements array and set elements array member
   // of the JSArray.
   // result: JSObject
@@ -154,6 +138,7 @@
   __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
 
   // Clear the heap tag on the elements array.
+  STATIC_ASSERT(kSmiTag == 0);
   __ sub(scratch1, scratch1, Operand(kHeapObjectTag));
 
   // Initialize the FixedArray and fill it with holes. FixedArray length is
@@ -162,29 +147,18 @@
   // scratch1: elements array (untagged)
   // scratch2: start of next object
   __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
   __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
   __ mov(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
 
-  // Fill the FixedArray with the hole value. Inline the code if short.
-  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
   __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  static const int kLoopUnfoldLimit = 4;
-  if (initial_capacity <= kLoopUnfoldLimit) {
-    for (int i = 0; i < initial_capacity; i++) {
-      __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-    }
-  } else {
-    Label loop, entry;
-    __ add(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
-    __ b(&entry);
-    __ bind(&loop);
+  for (int i = 0; i < initial_capacity; i++) {
     __ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
-    __ bind(&entry);
-    __ cmp(scratch1, scratch2);
-    __ b(lt, &loop);
   }
 }
 
@@ -199,7 +173,7 @@
 // register elements_array_storage is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi, cannot be 0.
+                            Register array_size,  // As a smi.
                             Register result,
                             Register elements_array_storage,
                             Register elements_array_end,
@@ -207,16 +181,32 @@
                             Register scratch2,
                             bool fill_with_hole,
                             Label* gc_required) {
-  // Load the initial map from the array function.
-  __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
+  Label not_empty, allocated;
 
-  if (FLAG_debug_code) {  // Assert that array size is not zero.
-    __ tst(array_size, array_size);
-    __ Assert(ne, "array size is unexpectedly 0");
-  }
+  // Load the initial map from the array function.
+  __ ldr(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ tst(array_size, array_size);
+  __ b(ne, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested number of elements.
+  __ bind(&not_empty);
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ mov(elements_array_end,
          Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize));
@@ -236,6 +226,7 @@
   // result: JSObject
   // elements_array_storage: initial map
   // array_size: size of array (smi)
+  __ bind(&allocated);
   __ str(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
   __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
   __ str(elements_array_storage,
@@ -265,6 +256,14 @@
   ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
   __ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
   STATIC_ASSERT(kSmiTag == 0);
+  __ tst(array_size, array_size);
+  // Length of the FixedArray is the number of pre-allocated elements if
+  // the actual JSArray has length 0 and the size of the JSArray for non-empty
+  // JSArrays. The length of a FixedArray is stored as a smi.
+  __ mov(array_size,
+         Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
+         LeaveCC,
+         eq);
   ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ str(array_size,
          MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -312,24 +311,23 @@
 static void ArrayNativeCode(MacroAssembler* masm,
                             Label* call_generic_code) {
   Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
-      has_non_smi_element, finish, cant_transition_map, not_double;
+  Label argc_one_or_more, argc_two_or_more;
 
   // Check for array construction with zero arguments or one.
   __ cmp(r0, Operand(0, RelocInfo::NONE));
   __ b(ne, &argc_one_or_more);
 
   // Handle construction of an empty array.
-  __ bind(&empty_array);
   AllocateEmptyJSArray(masm,
                        r1,
                        r2,
                        r3,
                        r4,
                        r5,
+                       JSArray::kPreallocatedArrayElements,
                        call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, r3, r4);
-  // Set up return value, remove receiver from stack and return.
+  // Setup return value, remove receiver from stack and return.
   __ mov(r0, r2);
   __ add(sp, sp, Operand(kPointerSize));
   __ Jump(lr);
@@ -341,13 +339,6 @@
   __ b(ne, &argc_two_or_more);
   STATIC_ASSERT(kSmiTag == 0);
   __ ldr(r2, MemOperand(sp));  // Get the argument from the stack.
-  __ tst(r2, r2);
-  __ b(ne, &not_empty_array);
-  __ Drop(1);  // Adjust stack.
-  __ mov(r0, Operand(0));  // Treat this as a call with argc of zero.
-  __ b(&empty_array);
-
-  __ bind(&not_empty_array);
   __ and_(r3, r2, Operand(kIntptrSignBit | kSmiTagMask), SetCC);
   __ b(ne, call_generic_code);
 
@@ -372,7 +363,7 @@
                   true,
                   call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, r2, r4);
-  // Set up return value, remove receiver and argument from stack and return.
+  // Setup return value, remove receiver and argument from stack and return.
   __ mov(r0, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
   __ Jump(lr);
@@ -407,21 +398,14 @@
   // r5: elements_array_end (untagged)
   // sp[0]: last argument
   Label loop, entry;
-  __ mov(r7, sp);
   __ jmp(&entry);
   __ bind(&loop);
-  __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(r2, &has_non_smi_element);
-  }
+  __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
   __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
   __ bind(&entry);
   __ cmp(r4, r5);
   __ b(lt, &loop);
 
-  __ bind(&finish);
-  __ mov(sp, r7);
-
   // Remove caller arguments and receiver from the stack, setup return value and
   // return.
   // r0: argc
@@ -430,75 +414,6 @@
   __ add(sp, sp, Operand(kPointerSize));
   __ mov(r0, r3);
   __ Jump(lr);
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(
-      r2, r9, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  __ UndoAllocationInNewSpace(r3, r4);
-  __ b(call_generic_code);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
-  // r3: JSArray
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         r2,
-                                         r9,
-                                         &cant_transition_map);
-  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ RecordWriteField(r3,
-                      HeapObject::kMapOffset,
-                      r2,
-                      r9,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  Label loop2;
-  __ sub(r7, r7, Operand(kPointerSize));
-  __ bind(&loop2);
-  __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
-  __ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
-  __ cmp(r4, r5);
-  __ b(lt, &loop2);
-  __ b(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0     : number of arguments
-  //  -- lr     : return address
-  //  -- sp[...]: constructor arguments
-  // -----------------------------------
-  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
-  // Get the InternalArray function.
-  GenerateLoadInternalArrayFunction(masm, r1);
-
-  if (FLAG_debug_code) {
-    // Initial map for the builtin InternalArray functions should be maps.
-    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ tst(r2, Operand(kSmiTagMask));
-    __ Assert(ne, "Unexpected initial map for InternalArray function");
-    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
-    __ Assert(eq, "Unexpected initial map for InternalArray function");
-  }
-
-  // Run the native code for the InternalArray function called as a normal
-  // function.
-  ArrayNativeCode(masm, &generic_array_code);
-
-  // Jump to the generic array code if the specialized code cannot handle the
-  // construction.
-  __ bind(&generic_array_code);
-
-  Handle<Code> array_code =
-      masm->isolate()->builtins()->InternalArrayCodeGeneric();
-  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -667,11 +582,10 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(r0);
-    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  }
+  __ EnterInternalFrame();
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  __ LeaveInternalFrame();
   __ pop(function);
   __ mov(argument, r0);
   __ b(&argument_is_string);
@@ -687,18 +601,15 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(argument);
-    __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  }
+  __ EnterInternalFrame();
+  __ push(argument);
+  __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  __ LeaveInternalFrame();
   __ Ret();
 }
 
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool count_constructions) {
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0     : number of arguments
   //  -- r1     : constructor function
@@ -706,324 +617,354 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
+  Label non_function_call;
+  // Check that the function is not a smi.
+  __ JumpIfSmi(r1, &non_function_call);
+  // Check that the function is a JSFunction.
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &non_function_call);
+
+  // Jump to the function-specific construct stub.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
+  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // r0: number of arguments
+  // r1: called object
+  __ bind(&non_function_call);
+  // Set expected number of arguments to zero (not changing r0).
+  __ mov(r2, Operand(0, RelocInfo::NONE));
+  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(r5, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
   Isolate* isolate = masm->isolate();
 
   // Enter a construct frame.
-  {
-    FrameScope scope(masm, StackFrame::CONSTRUCT);
+  __ EnterConstructFrame();
 
-    // Preserve the two incoming parameters on the stack.
-    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-    __ push(r0);  // Smi-tagged arguments count.
-    __ push(r1);  // Constructor function.
+  // Preserve the two incoming parameters on the stack.
+  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+  __ push(r0);  // Smi-tagged arguments count.
+  __ push(r1);  // Constructor function.
 
-    // Try to allocate the object without transitioning into C code. If any of
-    // the preconditions is not met, the code bails out to the runtime call.
-    Label rt_call, allocated;
-    if (FLAG_inline_new) {
-      Label undo_allocation;
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  Label rt_call, allocated;
+  if (FLAG_inline_new) {
+    Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-      ExternalReference debug_step_in_fp =
-          ExternalReference::debug_step_in_fp_address(isolate);
-      __ mov(r2, Operand(debug_step_in_fp));
-      __ ldr(r2, MemOperand(r2));
-      __ tst(r2, r2);
-      __ b(ne, &rt_call);
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address(isolate);
+    __ mov(r2, Operand(debug_step_in_fp));
+    __ ldr(r2, MemOperand(r2));
+    __ tst(r2, r2);
+    __ b(ne, &rt_call);
 #endif
 
-      // Load the initial map and verify that it is in fact a map.
-      // r1: constructor function
-      __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-      __ JumpIfSmi(r2, &rt_call);
-      __ CompareObjectType(r2, r3, r4, MAP_TYPE);
-      __ b(ne, &rt_call);
+    // Load the initial map and verify that it is in fact a map.
+    // r1: constructor function
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ JumpIfSmi(r2, &rt_call);
+    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+    __ b(ne, &rt_call);
 
-      // Check that the constructor is not constructing a JSFunction (see
-      // comments in Runtime_NewObject in runtime.cc). In which case the
-      // initial map's instance type would be JS_FUNCTION_TYPE.
-      // r1: constructor function
-      // r2: initial map
-      __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
-      __ b(eq, &rt_call);
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // r1: constructor function
+    // r2: initial map
+    __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+    __ b(eq, &rt_call);
 
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+      MemOperand constructor_count =
+          FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+      __ ldrb(r4, constructor_count);
+      __ sub(r4, r4, Operand(1), SetCC);
+      __ strb(r4, constructor_count);
+      __ b(ne, &allocate);
+
+      __ Push(r1, r2);
+
+      __ push(r1);  // constructor
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(r2);
+      __ pop(r1);
+
+      __ bind(&allocate);
+    }
+
+    // Now allocate the JSObject on the heap.
+    // r1: constructor function
+    // r2: initial map
+    __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+    __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+    // Allocated the JSObject, now initialize the fields. Map is set to initial
+    // map and properties and elements are set to empty fixed array.
+    // r1: constructor function
+    // r2: initial map
+    // r3: object size
+    // r4: JSObject (not tagged)
+    __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+    __ mov(r5, r4);
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+    // Fill all the in-object properties with the appropriate filler.
+    // r1: constructor function
+    // r2: initial map
+    // r3: object size (in words)
+    // r4: JSObject (not tagged)
+    // r5: First in-object property of JSObject (not tagged)
+    __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+    { Label loop, entry;
       if (count_constructions) {
-        Label allocate;
-        // Decrease generous allocation count.
-        __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-        MemOperand constructor_count =
-            FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
-        __ ldrb(r4, constructor_count);
-        __ sub(r4, r4, Operand(1), SetCC);
-        __ strb(r4, constructor_count);
-        __ b(ne, &allocate);
-
-        __ Push(r1, r2);
-
-        __ push(r1);  // constructor
-        // The call will replace the stub, so the countdown is only done once.
-        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-        __ pop(r2);
-        __ pop(r1);
-
-        __ bind(&allocate);
-      }
-
-      // Now allocate the JSObject on the heap.
-      // r1: constructor function
-      // r2: initial map
-      __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-      __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
-      // Allocated the JSObject, now initialize the fields. Map is set to
-      // initial map and properties and elements are set to empty fixed array.
-      // r1: constructor function
-      // r2: initial map
-      // r3: object size
-      // r4: JSObject (not tagged)
-      __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
-      __ mov(r5, r4);
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-      __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
-      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
-      // Fill all the in-object properties with the appropriate filler.
-      // r1: constructor function
-      // r2: initial map
-      // r3: object size (in words)
-      // r4: JSObject (not tagged)
-      // r5: First in-object property of JSObject (not tagged)
-      __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-      __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-      if (count_constructions) {
-        __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
-        __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
-                kBitsPerByte);
-        __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
-        // r0: offset of first field after pre-allocated fields
-        if (FLAG_debug_code) {
-          __ cmp(r0, r6);
-          __ Assert(le, "Unexpected number of pre-allocated property fields.");
-        }
-        __ InitializeFieldsWithFiller(r5, r0, r7);
         // To allow for truncation.
         __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
+      } else {
+        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
       }
-      __ InitializeFieldsWithFiller(r5, r6, r7);
-
-      // Add the object tag to make the JSObject real, so that we can continue
-      // and jump into the continuation code at any time from now on. Any
-      // failures need to undo the allocation, so that the heap is in a
-      // consistent state and verifiable.
-      __ add(r4, r4, Operand(kHeapObjectTag));
-
-      // Check if a non-empty properties array is needed. Continue with
-      // allocated object if not fall through to runtime call if it is.
-      // r1: constructor function
-      // r4: JSObject
-      // r5: start of next object (not tagged)
-      __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
-      // The field instance sizes contains both pre-allocated property fields
-      // and in-object properties.
-      __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
-      __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
-              kBitsPerByte);
-      __ add(r3, r3, Operand(r6));
-      __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
-              kBitsPerByte);
-      __ sub(r3, r3, Operand(r6), SetCC);
-
-      // Done if no extra properties are to be allocated.
-      __ b(eq, &allocated);
-      __ Assert(pl, "Property allocation count failed.");
-
-      // Scale the number of elements by pointer size and add the header for
-      // FixedArrays to the start of the next object calculation from above.
-      // r1: constructor
-      // r3: number of elements in properties array
-      // r4: JSObject
-      // r5: start of next object
-      __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
-      __ AllocateInNewSpace(
-          r0,
-          r5,
-          r6,
-          r2,
-          &undo_allocation,
-          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
-      // Initialize the FixedArray.
-      // r1: constructor
-      // r3: number of elements in properties array
-      // r4: JSObject
-      // r5: FixedArray (not tagged)
-      __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
-      __ mov(r2, r5);
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-      __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
-      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-      __ mov(r0, Operand(r3, LSL, kSmiTagSize));
-      __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
-      // Initialize the fields to undefined.
-      // r1: constructor function
-      // r2: First element of FixedArray (not tagged)
-      // r3: number of elements in properties array
-      // r4: JSObject
-      // r5: FixedArray (not tagged)
-      __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-      { Label loop, entry;
-        if (count_constructions) {
-          __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-        } else if (FLAG_debug_code) {
-          __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
-          __ cmp(r7, r8);
-          __ Assert(eq, "Undefined value not loaded.");
-        }
-        __ b(&entry);
-        __ bind(&loop);
-        __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
-        __ bind(&entry);
-        __ cmp(r2, r6);
-        __ b(lt, &loop);
-      }
-
-      // Store the initialized FixedArray into the properties field of
-      // the JSObject
-      // r1: constructor function
-      // r4: JSObject
-      // r5: FixedArray (not tagged)
-      __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
-      __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
-
-      // Continue with JSObject being successfully allocated
-      // r1: constructor function
-      // r4: JSObject
-      __ jmp(&allocated);
-
-      // Undo the setting of the new top so that the heap is verifiable. For
-      // example, the map's unused properties potentially do not match the
-      // allocated objects unused properties.
-      // r4: JSObject (previous new top)
-      __ bind(&undo_allocation);
-      __ UndoAllocationInNewSpace(r4, r5);
+      __ b(&entry);
+      __ bind(&loop);
+      __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
+      __ bind(&entry);
+      __ cmp(r5, r6);
+      __ b(lt, &loop);
     }
 
-    // Allocate the new receiver object using the runtime call.
-    // r1: constructor function
-    __ bind(&rt_call);
-    __ push(r1);  // argument for Runtime_NewObject
-    __ CallRuntime(Runtime::kNewObject, 1);
-    __ mov(r4, r0);
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    __ add(r4, r4, Operand(kHeapObjectTag));
 
-    // Receiver for constructor call allocated.
+    // Check if a non-empty properties array is needed. Continue with allocated
+    // object if not fall through to runtime call if it is.
+    // r1: constructor function
     // r4: JSObject
-    __ bind(&allocated);
-    __ push(r4);
-    __ push(r4);
+    // r5: start of next object (not tagged)
+    __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+    // The field instance sizes contains both pre-allocated property fields and
+    // in-object properties.
+    __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+    __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
+    __ add(r3, r3, Operand(r6));
+    __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
+    __ sub(r3, r3, Operand(r6), SetCC);
 
-    // Reload the number of arguments and the constructor from the stack.
-    // sp[0]: receiver
-    // sp[1]: receiver
-    // sp[2]: constructor function
-    // sp[3]: number of arguments (smi-tagged)
-    __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-    __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
+    // Done if no extra properties are to be allocated.
+    __ b(eq, &allocated);
+    __ Assert(pl, "Property allocation count failed.");
 
-    // Set up pointer to last argument.
-    __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // r1: constructor
+    // r3: number of elements in properties array
+    // r4: JSObject
+    // r5: start of next object
+    __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+    __ AllocateInNewSpace(
+        r0,
+        r5,
+        r6,
+        r2,
+        &undo_allocation,
+        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
 
-    // Set up number of arguments for function call below
-    __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+    // Initialize the FixedArray.
+    // r1: constructor
+    // r3: number of elements in properties array
+    // r4: JSObject
+    // r5: FixedArray (not tagged)
+    __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+    __ mov(r2, r5);
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+    __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+    __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
 
-    // Copy arguments and receiver to the expression stack.
-    // r0: number of arguments
+    // Initialize the fields to undefined.
     // r1: constructor function
-    // r2: address of last argument (caller sp)
-    // r3: number of arguments (smi-tagged)
-    // sp[0]: receiver
-    // sp[1]: receiver
-    // sp[2]: constructor function
-    // sp[3]: number of arguments (smi-tagged)
-    Label loop, entry;
-    __ b(&entry);
-    __ bind(&loop);
-    __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
-    __ push(ip);
-    __ bind(&entry);
-    __ sub(r3, r3, Operand(2), SetCC);
-    __ b(ge, &loop);
-
-    // Call the function.
-    // r0: number of arguments
-    // r1: constructor function
-    if (is_api_function) {
-      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      ParameterCount expected(0);
-      __ InvokeCode(code, expected, expected,
-                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-    } else {
-      ParameterCount actual(r0);
-      __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
+    // r2: First element of FixedArray (not tagged)
+    // r3: number of elements in properties array
+    // r4: JSObject
+    // r5: FixedArray (not tagged)
+    __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+      } else if (FLAG_debug_code) {
+        __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+        __ cmp(r7, r8);
+        __ Assert(eq, "Undefined value not loaded.");
+      }
+      __ b(&entry);
+      __ bind(&loop);
+      __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+      __ bind(&entry);
+      __ cmp(r2, r6);
+      __ b(lt, &loop);
     }
 
-    // Store offset of return address for deoptimizer.
-    if (!is_api_function && !count_constructions) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
-    }
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject
+    // r1: constructor function
+    // r4: JSObject
+    // r5: FixedArray (not tagged)
+    __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
+    __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
 
-    // Restore context from the frame.
-    // r0: result
-    // sp[0]: receiver
-    // sp[1]: constructor function
-    // sp[2]: number of arguments (smi-tagged)
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Continue with JSObject being successfully allocated
+    // r1: constructor function
+    // r4: JSObject
+    __ jmp(&allocated);
 
-    // If the result is an object (in the ECMA sense), we should get rid
-    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-    // on page 74.
-    Label use_receiver, exit;
-
-    // If the result is a smi, it is *not* an object in the ECMA sense.
-    // r0: result
-    // sp[0]: receiver (newly allocated object)
-    // sp[1]: constructor function
-    // sp[2]: number of arguments (smi-tagged)
-    __ JumpIfSmi(r0, &use_receiver);
-
-    // If the type of the result (stored in its map) is less than
-    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-    __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
-    __ b(ge, &exit);
-
-    // Throw away the result of the constructor invocation and use the
-    // on-stack receiver as the result.
-    __ bind(&use_receiver);
-    __ ldr(r0, MemOperand(sp));
-
-    // Remove receiver from the stack, remove caller arguments, and
-    // return.
-    __ bind(&exit);
-    // r0: result
-    // sp[0]: receiver (newly allocated object)
-    // sp[1]: constructor function
-    // sp[2]: number of arguments (smi-tagged)
-    __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-
-    // Leave construct frame.
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // r4: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(r4, r5);
   }
 
+  // Allocate the new receiver object using the runtime call.
+  // r1: constructor function
+  __ bind(&rt_call);
+  __ push(r1);  // argument for Runtime_NewObject
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(r4, r0);
+
+  // Receiver for constructor call allocated.
+  // r4: JSObject
+  __ bind(&allocated);
+  __ push(r4);
+
+  // Push the function and the allocated receiver from the stack.
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ ldr(r1, MemOperand(sp, kPointerSize));
+  __ push(r1);  // Constructor function.
+  __ push(r4);  // Receiver.
+
+  // Reload the number of arguments from the stack.
+  // r1: constructor function
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+  // Setup pointer to last argument.
+  __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // Setup number of arguments for function call below
+  __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+  // Copy arguments and receiver to the expression stack.
+  // r0: number of arguments
+  // r2: address of last argument (caller sp)
+  // r1: constructor function
+  // r3: number of arguments (smi-tagged)
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  Label loop, entry;
+  __ b(&entry);
+  __ bind(&loop);
+  __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+  __ push(ip);
+  __ bind(&entry);
+  __ sub(r3, r3, Operand(2), SetCC);
+  __ b(ge, &loop);
+
+  // Call the function.
+  // r0: number of arguments
+  // r1: constructor function
+  if (is_api_function) {
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    Handle<Code> code =
+        masm->isolate()->builtins()->HandleApiCallConstruct();
+    ParameterCount expected(0);
+    __ InvokeCode(code, expected, expected,
+                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+  } else {
+    ParameterCount actual(r0);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+  }
+
+  // Pop the function from the stack.
+  // sp[0]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ pop();
+
+  // Restore context from the frame.
+  // r0: result
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  // r0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ JumpIfSmi(r0, &use_receiver);
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+  __ b(ge, &exit);
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ ldr(r0, MemOperand(sp));
+
+  // Remove receiver from the stack, remove caller arguments, and
+  // return.
+  __ bind(&exit);
+  // r0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+  __ LeaveConstructFrame();
   __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
   __ add(sp, sp, Operand(kPointerSize));
   __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -1056,62 +997,60 @@
   // r4: argv
   // r5-r7, cp may be clobbered
 
-  // Clear the context before we push it when entering the internal frame.
+  // Clear the context before we push it when entering the JS frame.
   __ mov(cp, Operand(0, RelocInfo::NONE));
 
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Set up the context from the function argument.
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  // Set up the context from the function argument.
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
-    __ InitializeRootRegister();
+  __ InitializeRootRegister();
 
-    // Push the function and the receiver onto the stack.
-    __ push(r1);
-    __ push(r2);
+  // Push the function and the receiver onto the stack.
+  __ push(r1);
+  __ push(r2);
 
-    // Copy arguments to the stack in a loop.
-    // r1: function
-    // r3: argc
-    // r4: argv, i.e. points to first arg
-    Label loop, entry;
-    __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
-    // r2 points past last arg.
-    __ b(&entry);
-    __ bind(&loop);
-    __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
-    __ ldr(r0, MemOperand(r0));  // dereference handle
-    __ push(r0);  // push parameter
-    __ bind(&entry);
-    __ cmp(r4, r2);
-    __ b(ne, &loop);
+  // Copy arguments to the stack in a loop.
+  // r1: function
+  // r3: argc
+  // r4: argv, i.e. points to first arg
+  Label loop, entry;
+  __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+  // r2 points past last arg.
+  __ b(&entry);
+  __ bind(&loop);
+  __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
+  __ ldr(r0, MemOperand(r0));  // dereference handle
+  __ push(r0);  // push parameter
+  __ bind(&entry);
+  __ cmp(r4, r2);
+  __ b(ne, &loop);
 
-    // Initialize all JavaScript callee-saved registers, since they will be seen
-    // by the garbage collector as part of handlers.
-    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-    __ mov(r5, Operand(r4));
-    __ mov(r6, Operand(r4));
-    __ mov(r7, Operand(r4));
-    if (kR9Available == 1) {
-      __ mov(r9, Operand(r4));
-    }
-
-    // Invoke the code and pass argc as r0.
-    __ mov(r0, Operand(r3));
-    if (is_construct) {
-      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
-      __ CallStub(&stub);
-    } else {
-      ParameterCount actual(r0);
-      __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
-    }
-    // Exit the JS frame and remove the parameters (except function), and
-    // return.
-    // Respect ABI stack constraint.
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  __ mov(r5, Operand(r4));
+  __ mov(r6, Operand(r4));
+  __ mov(r7, Operand(r4));
+  if (kR9Available == 1) {
+    __ mov(r9, Operand(r4));
   }
+
+  // Invoke the code and pass argc as r0.
+  __ mov(r0, Operand(r3));
+  if (is_construct) {
+    __ Call(masm->isolate()->builtins()->JSConstructCall());
+  } else {
+    ParameterCount actual(r0);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+  }
+
+  // Exit the JS frame and remove the parameters (except function), and return.
+  // Respect ABI stack constraint.
+  __ LeaveInternalFrame();
   __ Jump(lr);
 
   // r0: result
@@ -1130,27 +1069,26 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Preserve the function.
-    __ push(r1);
-    // Push call kind information.
-    __ push(r5);
+  // Preserve the function.
+  __ push(r1);
+  // Push call kind information.
+  __ push(r5);
 
-    // Push the function on the stack as the argument to the runtime function.
-    __ push(r1);
-    __ CallRuntime(Runtime::kLazyCompile, 1);
-    // Calculate the entry point.
-    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(r1);
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  // Calculate the entry point.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-    // Restore call kind information.
-    __ pop(r5);
-    // Restore saved function.
-    __ pop(r1);
+  // Restore call kind information.
+  __ pop(r5);
+  // Restore saved function.
+  __ pop(r1);
 
-    // Tear down internal frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1159,27 +1097,26 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Preserve the function.
-    __ push(r1);
-    // Push call kind information.
-    __ push(r5);
+  // Preserve the function.
+  __ push(r1);
+  // Push call kind information.
+  __ push(r5);
 
-    // Push the function on the stack as the argument to the runtime function.
-    __ push(r1);
-    __ CallRuntime(Runtime::kLazyRecompile, 1);
-    // Calculate the entry point.
-    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(r1);
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+  // Calculate the entry point.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-    // Restore call kind information.
-    __ pop(r5);
-    // Restore saved function.
-    __ pop(r1);
+  // Restore call kind information.
+  __ pop(r5);
+  // Restore saved function.
+  __ pop(r1);
 
-    // Tear down internal frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1188,13 +1125,12 @@
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    // Pass the function and deoptimization type to the runtime system.
-    __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(r0);
-    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  }
+  __ EnterInternalFrame();
+  // Pass the function and deoptimization type to the runtime system.
+  __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+  __ push(r0);
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  __ LeaveInternalFrame();
 
   // Get the full codegen state from the stack and untag it -> r6.
   __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1234,10 +1170,9 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kNotifyOSR, 0);
-  }
+  __ EnterInternalFrame();
+  __ CallRuntime(Runtime::kNotifyOSR, 0);
+  __ LeaveInternalFrame();
   __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
   __ Ret();
 }
@@ -1253,11 +1188,10 @@
   // Lookup the function in the JavaScript frame and push it as an
   // argument to the on-stack replacement function.
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(r0);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  }
+  __ EnterInternalFrame();
+  __ push(r0);
+  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  __ LeaveInternalFrame();
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1282,7 +1216,7 @@
   // 1. Make sure we have at least one argument.
   // r0: actual number of arguments
   { Label done;
-    __ cmp(r0, Operand(0));
+    __ tst(r0, Operand(r0));
     __ b(ne, &done);
     __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
     __ push(r2);
@@ -1339,23 +1273,17 @@
     __ b(ge, &shift_arguments);
 
     __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
+    __ push(r0);
 
-    {
-      // Enter an internal frame in order to preserve argument count.
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
-      __ push(r0);
+    __ push(r2);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(r2, r0);
 
-      __ push(r2);
-      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-      __ mov(r2, r0);
-
-      __ pop(r0);
-      __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-
-      // Exit the internal frame.
-    }
-
+    __ pop(r0);
+    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+    __ LeaveInternalFrame();
     // Restore the function to r1, and the flag to r4.
     __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
     __ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1475,157 +1403,156 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
-    __ push(r0);
-    __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
-    __ push(r0);
-    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+  __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
+  __ push(r0);
+  __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-    // Check the stack for overflow. We are not trying to catch
-    // interruptions (e.g. debug break and preemption) here, so the "real stack
-    // limit" is checked.
-    Label okay;
-    __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
-    // Make r2 the space we have left. The stack might already be overflowed
-    // here which will cause r2 to become negative.
-    __ sub(r2, sp, r2);
-    // Check if the arguments will overflow the stack.
-    __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-    __ b(gt, &okay);  // Signed comparison.
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+  // Make r2 the space we have left. The stack might already be overflowed
+  // here which will cause r2 to become negative.
+  __ sub(r2, sp, r2);
+  // Check if the arguments will overflow the stack.
+  __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ b(gt, &okay);  // Signed comparison.
 
-    // Out of stack space.
-    __ ldr(r1, MemOperand(fp, kFunctionOffset));
-    __ push(r1);
-    __ push(r0);
-    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-    // End of stack check.
+  // Out of stack space.
+  __ ldr(r1, MemOperand(fp, kFunctionOffset));
+  __ push(r1);
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  // End of stack check.
 
-    // Push current limit and index.
-    __ bind(&okay);
-    __ push(r0);  // limit
-    __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
-    __ push(r1);
+  // Push current limit and index.
+  __ bind(&okay);
+  __ push(r0);  // limit
+  __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
+  __ push(r1);
 
-    // Get the receiver.
-    __ ldr(r0, MemOperand(fp, kRecvOffset));
+  // Get the receiver.
+  __ ldr(r0, MemOperand(fp, kRecvOffset));
 
-    // Check that the function is a JS function (otherwise it must be a proxy).
-    Label push_receiver;
-    __ ldr(r1, MemOperand(fp, kFunctionOffset));
-    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-    __ b(ne, &push_receiver);
+  // Check that the function is a JS function (otherwise it must be a proxy).
+  Label push_receiver;
+  __ ldr(r1, MemOperand(fp, kFunctionOffset));
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &push_receiver);
 
-    // Change context eagerly to get the right global object if necessary.
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-    // Load the shared function info while the function is still in r1.
-    __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  // Change context eagerly to get the right global object if necessary.
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  // Load the shared function info while the function is still in r1.
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
 
-    // Compute the receiver.
-    // Do not transform the receiver for strict mode functions.
-    Label call_to_object, use_global_receiver;
-    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                             kSmiTagSize)));
-    __ b(ne, &push_receiver);
+  // Compute the receiver.
+  // Do not transform the receiver for strict mode functions.
+  Label call_to_object, use_global_receiver;
+  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+  __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                           kSmiTagSize)));
+  __ b(ne, &push_receiver);
 
-    // Do not transform the receiver for strict mode functions.
-    __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-    __ b(ne, &push_receiver);
+  // Do not transform the receiver for strict mode functions.
+  __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+  __ b(ne, &push_receiver);
 
-    // Compute the receiver in non-strict mode.
-    __ JumpIfSmi(r0, &call_to_object);
-    __ LoadRoot(r1, Heap::kNullValueRootIndex);
-    __ cmp(r0, r1);
-    __ b(eq, &use_global_receiver);
-    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
-    __ cmp(r0, r1);
-    __ b(eq, &use_global_receiver);
+  // Compute the receiver in non-strict mode.
+  __ JumpIfSmi(r0, &call_to_object);
+  __ LoadRoot(r1, Heap::kNullValueRootIndex);
+  __ cmp(r0, r1);
+  __ b(eq, &use_global_receiver);
+  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, r1);
+  __ b(eq, &use_global_receiver);
 
-    // Check if the receiver is already a JavaScript object.
-    // r0: receiver
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-    __ b(ge, &push_receiver);
+  // Check if the receiver is already a JavaScript object.
+  // r0: receiver
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+  __ b(ge, &push_receiver);
 
-    // Convert the receiver to a regular object.
-    // r0: receiver
-    __ bind(&call_to_object);
-    __ push(r0);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ b(&push_receiver);
+  // Convert the receiver to a regular object.
+  // r0: receiver
+  __ bind(&call_to_object);
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ b(&push_receiver);
 
-    // Use the current global receiver object as the receiver.
-    __ bind(&use_global_receiver);
-    const int kGlobalOffset =
-        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-    __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
-    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
-    __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
-    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+  __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
 
-    // Push the receiver.
-    // r0: receiver
-    __ bind(&push_receiver);
-    __ push(r0);
+  // Push the receiver.
+  // r0: receiver
+  __ bind(&push_receiver);
+  __ push(r0);
 
-    // Copy all arguments from the array to the stack.
-    Label entry, loop;
-    __ ldr(r0, MemOperand(fp, kIndexOffset));
-    __ b(&entry);
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ ldr(r0, MemOperand(fp, kIndexOffset));
+  __ b(&entry);
 
-    // Load the current argument from the arguments array and push it to the
-    // stack.
-    // r0: current argument index
-    __ bind(&loop);
-    __ ldr(r1, MemOperand(fp, kArgsOffset));
-    __ push(r1);
-    __ push(r0);
+  // Load the current argument from the arguments array and push it to the
+  // stack.
+  // r0: current argument index
+  __ bind(&loop);
+  __ ldr(r1, MemOperand(fp, kArgsOffset));
+  __ push(r1);
+  __ push(r0);
 
-    // Call the runtime to access the property in the arguments array.
-    __ CallRuntime(Runtime::kGetProperty, 2);
-    __ push(r0);
+  // Call the runtime to access the property in the arguments array.
+  __ CallRuntime(Runtime::kGetProperty, 2);
+  __ push(r0);
 
-    // Use inline caching to access the arguments.
-    __ ldr(r0, MemOperand(fp, kIndexOffset));
-    __ add(r0, r0, Operand(1 << kSmiTagSize));
-    __ str(r0, MemOperand(fp, kIndexOffset));
+  // Use inline caching to access the arguments.
+  __ ldr(r0, MemOperand(fp, kIndexOffset));
+  __ add(r0, r0, Operand(1 << kSmiTagSize));
+  __ str(r0, MemOperand(fp, kIndexOffset));
 
-    // Test if the copy loop has finished copying all the elements from the
-    // arguments object.
-    __ bind(&entry);
-    __ ldr(r1, MemOperand(fp, kLimitOffset));
-    __ cmp(r0, r1);
-    __ b(ne, &loop);
+  // Test if the copy loop has finished copying all the elements from the
+  // arguments object.
+  __ bind(&entry);
+  __ ldr(r1, MemOperand(fp, kLimitOffset));
+  __ cmp(r0, r1);
+  __ b(ne, &loop);
 
-    // Invoke the function.
-    Label call_proxy;
-    ParameterCount actual(r0);
-    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-    __ ldr(r1, MemOperand(fp, kFunctionOffset));
-    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-    __ b(ne, &call_proxy);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+  // Invoke the function.
+  Label call_proxy;
+  ParameterCount actual(r0);
+  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+  __ ldr(r1, MemOperand(fp, kFunctionOffset));
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+  __ b(ne, &call_proxy);
+  __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
-    frame_scope.GenerateLeaveFrame();
-    __ add(sp, sp, Operand(3 * kPointerSize));
-    __ Jump(lr);
+  // Tear down the internal frame and remove function, receiver and args.
+  __ LeaveInternalFrame();
+  __ add(sp, sp, Operand(3 * kPointerSize));
+  __ Jump(lr);
 
-    // Invoke the function proxy.
-    __ bind(&call_proxy);
-    __ push(r1);  // add function proxy as last argument
-    __ add(r0, r0, Operand(1));
-    __ mov(r2, Operand(0, RelocInfo::NONE));
-    __ SetCallKind(r5, CALL_AS_METHOD);
-    __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
-    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-            RelocInfo::CODE_TARGET);
+  // Invoke the function proxy.
+  __ bind(&call_proxy);
+  __ push(r1);  // add function proxy as last argument
+  __ add(r0, r0, Operand(1));
+  __ mov(r2, Operand(0, RelocInfo::NONE));
+  __ SetCallKind(r5, CALL_AS_METHOD);
+  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+  __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
 
-    // Tear down the internal frame and remove function, receiver and args.
-  }
+  __ LeaveInternalFrame();
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Jump(lr);
 }
@@ -1745,9 +1672,6 @@
   __ bind(&invoke);
   __ Call(r3);
 
-  // Store offset of return address for deoptimizer.
-  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
   // Exit frame and return.
   LeaveArgumentsAdaptorFrame(masm);
   __ Jump(lr);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index f772db9..36450c9 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -98,9 +98,9 @@
                         &gc,
                         TAG_OBJECT);
 
-  int map_index = (language_mode_ == CLASSIC_MODE)
-      ? Context::FUNCTION_MAP_INDEX
-      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+  int map_index = strict_mode_ == kStrictMode
+      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+      : Context::FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -122,6 +122,7 @@
   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
   __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
 
+
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
   __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
@@ -155,19 +156,21 @@
   // Load the function from the stack.
   __ ldr(r3, MemOperand(sp, 0));
 
-  // Set up the object header.
-  __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
+  // Setup the object header.
+  __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
+  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ mov(r2, Operand(Smi::FromInt(length)));
   __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-  __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
 
-  // Set up the fixed slots, copy the global object from the previous context.
-  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Setup the fixed slots.
   __ mov(r1, Operand(Smi::FromInt(0)));
   __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
   __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-  __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+  // Copy the global object from the previous context.
+  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
 
   // Initialize the rest of the slots to undefined.
   __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -186,119 +189,6 @@
 }
 
 
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [sp]: function.
-  // [sp + kPointerSize]: serialized scope info
-
-  // Try to allocate the context in new space.
-  Label gc;
-  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-  __ AllocateInNewSpace(FixedArray::SizeFor(length),
-                        r0, r1, r2, &gc, TAG_OBJECT);
-
-  // Load the function from the stack.
-  __ ldr(r3, MemOperand(sp, 0));
-
-  // Load the serialized scope info from the stack.
-  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
-
-  // Set up the object header.
-  __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
-  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ mov(r2, Operand(Smi::FromInt(length)));
-  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
-
-  // If this block context is nested in the global context we get a smi
-  // sentinel instead of a function. The block context should get the
-  // canonical empty function of the global context as its closure which
-  // we still have to look up.
-  Label after_sentinel;
-  __ JumpIfNotSmi(r3, &after_sentinel);
-  if (FLAG_debug_code) {
-    const char* message = "Expected 0 as a Smi sentinel";
-    __ cmp(r3, Operand::Zero());
-    __ Assert(eq, message);
-  }
-  __ ldr(r3, GlobalObjectOperand());
-  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
-  __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
-  __ bind(&after_sentinel);
-
-  // Set up the fixed slots, copy the global object from the previous context.
-  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
-  __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
-  __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
-  __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
-
-  // Initialize the rest of the slots to the hole value.
-  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
-  for (int i = 0; i < slots_; i++) {
-    __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
-  }
-
-  // Remove the on-stack argument and return.
-  __ mov(cp, r0);
-  __ add(sp, sp, Operand(2 * kPointerSize));
-  __ Ret();
-
-  // Need to collect. Call into runtime system.
-  __ bind(&gc);
-  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
-    MacroAssembler* masm,
-    int length,
-    FastCloneShallowArrayStub::Mode mode,
-    Label* fail) {
-  // Registers on entry:
-  //
-  // r3: boilerplate literal array.
-  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = 0;
-  if (length > 0) {
-    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        ? FixedDoubleArray::SizeFor(length)
-        : FixedArray::SizeFor(length);
-  }
-  int size = JSArray::kSize + elements_size;
-
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size,
-                        r0,
-                        r1,
-                        r2,
-                        fail,
-                        TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length == 0)) {
-      __ ldr(r1, FieldMemOperand(r3, i));
-      __ str(r1, FieldMemOperand(r0, i));
-    }
-  }
-
-  if (length > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
-    __ add(r2, r0, Operand(JSArray::kSize));
-    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
-    // Copy the elements array.
-    ASSERT((elements_size % kPointerSize) == 0);
-    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
-  }
-}
-
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -306,6 +196,10 @@
   // [sp + kPointerSize]: literal index.
   // [sp + (2 * kPointerSize)]: literals array.
 
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
   // Load boilerplate object into r3 and check if we need to create a
   // boilerplate.
   Label slow_case;
@@ -313,59 +207,57 @@
   __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
   __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r3, ip);
   __ b(eq, &slow_case);
 
-  FastCloneShallowArrayStub::Mode mode = mode_;
-  if (mode == CLONE_ANY_ELEMENTS) {
-    Label double_elements, check_fast_elements;
-    __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
-    __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
-    __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
-    __ b(ne, &check_fast_elements);
-    GenerateFastCloneShallowArrayCommon(masm, 0,
-                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
-    // Return and remove the on-stack parameters.
-    __ add(sp, sp, Operand(3 * kPointerSize));
-    __ Ret();
-
-    __ bind(&check_fast_elements);
-    __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
-    __ b(ne, &double_elements);
-    GenerateFastCloneShallowArrayCommon(masm, length_,
-                                        CLONE_ELEMENTS, &slow_case);
-    // Return and remove the on-stack parameters.
-    __ add(sp, sp, Operand(3 * kPointerSize));
-    __ Ret();
-
-    __ bind(&double_elements);
-    mode = CLONE_DOUBLE_ELEMENTS;
-    // Fall through to generate the code to handle double elements.
-  }
-
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode == CLONE_ELEMENTS) {
+    if (mode_ == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
-    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
-      message = "Expected (writable) fixed double array";
-      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
     __ push(r3);
     __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
     __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
-    __ CompareRoot(r3, expected_map_index);
+    __ LoadRoot(ip, expected_map_index);
+    __ cmp(r3, ip);
     __ Assert(eq, message);
     __ pop(r3);
   }
 
-  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size,
+                        r0,
+                        r1,
+                        r2,
+                        &slow_case,
+                        TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ ldr(r1, FieldMemOperand(r3, i));
+      __ str(r1, FieldMemOperand(r0, i));
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ add(r2, r0, Operand(JSArray::kSize));
+    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
+
+    // Copy the elements array.
+    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
+  }
 
   // Return and remove the on-stack parameters.
   __ add(sp, sp, Operand(3 * kPointerSize));
@@ -376,49 +268,6 @@
 }
 
 
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [sp]: object literal flags.
-  // [sp + kPointerSize]: constant properties.
-  // [sp + (2 * kPointerSize)]: literal index.
-  // [sp + (3 * kPointerSize)]: literals array.
-
-  // Load boilerplate object into r3 and check if we need to create a
-  // boilerplate.
-  Label slow_case;
-  __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
-  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
-  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
-  __ b(eq, &slow_case);
-
-  // Check that the boilerplate contains only fast properties and we can
-  // statically determine the instance size.
-  int size = JSObject::kHeaderSize + length_ * kPointerSize;
-  __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
-  __ cmp(r0, Operand(size >> kPointerSizeLog2));
-  __ b(ne, &slow_case);
-
-  // Allocate the JS object and copy header together with all in-object
-  // properties from the boilerplate.
-  __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ ldr(r1, FieldMemOperand(r3, i));
-    __ str(r1, FieldMemOperand(r0, i));
-  }
-
-  // Return and remove the on-stack parameters.
-  __ add(sp, sp, Operand(4 * kPointerSize));
-  __ Ret();
-
-  __ bind(&slow_case);
-  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
@@ -480,7 +329,7 @@
   __ b(gt, &not_special);
 
   // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
-  const uint32_t exponent_word_for_1 =
+  static const uint32_t exponent_word_for_1 =
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
   __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
   // 1, 0 and -1 all have 0 for the second word.
@@ -583,9 +432,7 @@
 
   Label is_smi, done;
 
-  // Smi-check
-  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
-  // Heap number check
+  __ JumpIfSmi(object, &is_smi);
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
 
   // Handle loading a double from a heap number.
@@ -607,6 +454,7 @@
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
     // Convert smi to double using VFP instructions.
+    __ SmiUntag(scratch1, object);
     __ vmov(dst.high(), scratch1);
     __ vcvt_f64_s32(dst, dst.high());
     if (destination == kCoreRegisters) {
@@ -641,10 +489,11 @@
                            Heap::kHeapNumberMapRootIndex,
                            "HeapNumberMap register clobbered.");
   }
+  Label is_smi;
   Label done;
   Label not_in_int32_range;
 
-  __ UntagAndJumpIfSmi(dst, object, &done);
+  __ JumpIfSmi(object, &is_smi);
   __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
   __ cmp(scratch1, heap_number_map);
   __ b(ne, not_number);
@@ -664,6 +513,10 @@
                                  scratch1,
                                  scratch2,
                                  scratch3);
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ SmiUntag(dst, object);
   __ bind(&done);
 }
 
@@ -706,7 +559,7 @@
     // Get the absolute value of the object (as an unsigned integer).
     __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
 
-    // Get mantissa[51:20].
+    // Get mantisssa[51:20].
 
     // Get the position of the first set bit.
     __ CountLeadingZeros(dst1, int_scratch, scratch2);
@@ -836,7 +689,10 @@
 
   Label done;
 
-  __ UntagAndJumpIfSmi(dst, object, &done);
+  // Untag the object into the destination register.
+  __ SmiUntag(dst, object);
+  // Just return if the object is a smi.
+  __ JumpIfSmi(object, &done);
 
   if (FLAG_debug_code) {
     __ AbortIfNotRootValue(heap_number_map,
@@ -937,7 +793,7 @@
   // non zero bits left. So we need the (30 - exponent) last bits of the
   // 31 higher bits of the mantissa to be null.
   // Because bits [21:0] are null, we can check instead that the
-  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
+  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
 
   // Get the 32 higher bits of the mantissa in dst.
   __ Ubfx(dst,
@@ -982,11 +838,9 @@
     __ vmov(d0, r0, r1);
     __ vmov(d1, r2, r3);
   }
-  {
-    AllowExternalCallThatCantCauseGC scope(masm);
-    __ CallCFunction(
-        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
-  }
+  // Call C routine that may not cause GC or other trouble.
+  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+                   0, 2);
   // Store answer in the overwritable heap number. Double returned in
   // registers r0 and r1 or in d0.
   if (masm->use_eabi_hardfloat()) {
@@ -1003,29 +857,6 @@
 }
 
 
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
-  // These variants are compiled ahead of time.  See next method.
-  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
-    return true;
-  }
-  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
-    return true;
-  }
-  // Other register combinations are generated as and when they are needed,
-  // so it is unsafe to call them from stubs (we can't generate a stub while
-  // we are generating a stub).
-  return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
-  WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
-  WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
-  stub1.GetCode()->set_is_pregenerated(true);
-  stub2.GetCode()->set_is_pregenerated(true);
-}
-
-
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -1366,8 +1197,6 @@
       __ vmov(d0, r0, r1);
       __ vmov(d1, r2, r3);
     }
-
-    AllowExternalCallThatCantCauseGC scope(masm);
     __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
                      0, 2);
     __ pop(pc);  // Return.
@@ -1385,7 +1214,7 @@
     // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into r2 and compare it with
     // FIRST_SPEC_OBJECT_TYPE.
@@ -1777,8 +1606,6 @@
 // The stub expects its argument in the tos_ register and returns its result in
 // it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
-  // we cannot call anything that could cause a GC from this stub.
   // This stub uses VFP3 instructions.
   CpuFeatures::Scope scope(VFP3);
 
@@ -1886,41 +1713,6 @@
 }
 
 
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
-  // We don't allow a GC during a store buffer overflow so there is no need to
-  // store the registers in any particular way, but we do have to store and
-  // restore them.
-  __ stm(db_w, sp, kCallerSaved | lr.bit());
-  if (save_doubles_ == kSaveFPRegs) {
-    CpuFeatures::Scope scope(VFP3);
-    __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
-    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
-      DwVfpRegister reg = DwVfpRegister::from_code(i);
-      __ vstr(reg, MemOperand(sp, i * kDoubleSize));
-    }
-  }
-  const int argument_count = 1;
-  const int fp_argument_count = 0;
-  const Register scratch = r1;
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
-  __ mov(r0, Operand(ExternalReference::isolate_address()));
-  __ CallCFunction(
-      ExternalReference::store_buffer_overflow_function(masm->isolate()),
-      argument_count);
-  if (save_doubles_ == kSaveFPRegs) {
-    CpuFeatures::Scope scope(VFP3);
-    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
-      DwVfpRegister reg = DwVfpRegister::from_code(i);
-      __ vldr(reg, MemOperand(sp, i * kDoubleSize));
-    }
-    __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
-  }
-  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
-}
-
-
 void UnaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name = NULL;  // Make g++ happy.
@@ -2074,13 +1866,12 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(r0);
-      __ CallRuntime(Runtime::kNumberAlloc, 0);
-      __ mov(r1, Operand(r0));
-      __ pop(r0);
-    }
+    __ EnterInternalFrame();
+    __ push(r0);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(r1, Operand(r0));
+    __ pop(r0);
+    __ LeaveInternalFrame();
 
     __ bind(&heapnumber_allocated);
     __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -2121,14 +1912,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(r0);  // Push the heap number, not the untagged int32.
-      __ CallRuntime(Runtime::kNumberAlloc, 0);
-      __ mov(r2, r0);  // Move the new heap number into r2.
-      // Get the heap number into r0, now that the new heap number is in r2.
-      __ pop(r0);
-    }
+    __ EnterInternalFrame();
+    __ push(r0);  // Push the heap number, not the untagged int32.
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(r2, r0);  // Move the new heap number into r2.
+    // Get the heap number into r0, now that the new heap number is in r2.
+    __ pop(r0);
+    __ LeaveInternalFrame();
 
     // Convert the heap number in r0 to an untagged integer in r1.
     // This can't go slow-case because it's the same number we already
@@ -2238,10 +2028,6 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
-  // Explicitly allow generation of nested stubs. It is safe here because
-  // generation code does not use any raw pointers.
-  AllowStubCallsScope allow_stub_calls(masm, true);
-
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -2324,7 +2110,7 @@
       __ cmp(ip, Operand(scratch2));
       __ b(ne, &not_smi_result);
       // Go slow on zero result to handle -0.
-      __ cmp(scratch1, Operand(0));
+      __ tst(scratch1, Operand(scratch1));
       __ mov(right, Operand(scratch1), LeaveCC, ne);
       __ Ret(ne);
       // We need -0 if we were multiplying a negative number with 0 to get 0.
@@ -3296,12 +3082,10 @@
     // Check if cache matches: Double value is stored in uint32_t[2] array.
     __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
     __ cmp(r2, r4);
-    __ cmp(r3, r5, eq);
+    __ b(ne, &calculate);
+    __ cmp(r3, r5);
     __ b(ne, &calculate);
     // Cache hit. Load result, cleanup and return.
-    Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(
-        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
     if (tagged) {
       // Pop input value from stack and load result into r0.
       __ pop();
@@ -3314,9 +3098,6 @@
   }  // if (CpuFeatures::IsSupported(VFP3))
 
   __ bind(&calculate);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(
-      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
   if (tagged) {
     __ bind(&invalid_cache);
     ExternalReference runtime_function =
@@ -3352,11 +3133,10 @@
     __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
     __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(r0);
-      __ CallRuntime(RuntimeFunction(), 1);
-    }
+    __ EnterInternalFrame();
+    __ push(r0);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
     __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3369,15 +3149,14 @@
 
     // We return the value in d2 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
+    __ EnterInternalFrame();
 
-      // Allocate an aligned object larger than a HeapNumber.
-      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-      __ mov(scratch0, Operand(4 * kPointerSize));
-      __ push(scratch0);
-      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    }
+    // Allocate an aligned object larger than a HeapNumber.
+    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+    __ mov(scratch0, Operand(4 * kPointerSize));
+    __ push(scratch0);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
     __ Ret();
   }
 }
@@ -3394,7 +3173,6 @@
   } else {
     __ vmov(r0, r1, d2);
   }
-  AllowExternalCallThatCantCauseGC scope(masm);
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3404,10 +3182,6 @@
       __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
           0, 1);
       break;
-    case TranscendentalCache::TAN:
-      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
-          0, 1);
-      break;
     case TranscendentalCache::LOG:
       __ CallCFunction(ExternalReference::math_log_double_function(isolate),
           0, 1);
@@ -3425,7 +3199,6 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -3439,207 +3212,105 @@
 }
 
 
-void InterruptStub::Generate(MacroAssembler* masm) {
-  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
-  CpuFeatures::Scope vfp3_scope(VFP3);
-  const Register base = r1;
-  const Register exponent = r2;
-  const Register heapnumbermap = r5;
-  const Register heapnumber = r0;
-  const DoubleRegister double_base = d1;
-  const DoubleRegister double_exponent = d2;
-  const DoubleRegister double_result = d3;
-  const DoubleRegister double_scratch = d0;
-  const SwVfpRegister single_scratch = s0;
-  const Register scratch = r9;
-  const Register scratch2 = r7;
+  Label call_runtime;
 
-  Label call_runtime, done, int_exponent;
-  if (exponent_type_ == ON_STACK) {
-    Label base_is_smi, unpack_exponent;
-    // The exponent and base are supplied as arguments on the stack.
-    // This can only happen if the stub is called from non-optimized code.
-    // Load input parameters from stack to double registers.
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+
+    Label base_not_smi;
+    Label exponent_not_smi;
+    Label convert_exponent;
+
+    const Register base = r0;
+    const Register exponent = r1;
+    const Register heapnumbermap = r5;
+    const Register heapnumber = r6;
+    const DoubleRegister double_base = d0;
+    const DoubleRegister double_exponent = d1;
+    const DoubleRegister double_result = d2;
+    const SwVfpRegister single_scratch = s0;
+    const Register scratch = r9;
+    const Register scratch2 = r7;
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
     __ ldr(base, MemOperand(sp, 1 * kPointerSize));
     __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
 
-    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+    // Convert base to double value and store it in d0.
+    __ JumpIfNotSmi(base, &base_not_smi);
+    // Base is a Smi. Untag and convert it.
+    __ SmiUntag(base);
+    __ vmov(single_scratch, base);
+    __ vcvt_f64_s32(double_base, single_scratch);
+    __ b(&convert_exponent);
 
-    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+    __ bind(&base_not_smi);
     __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
-
+    // Base is a heapnumber. Load it into double register.
     __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
-    __ jmp(&unpack_exponent);
 
-    __ bind(&base_is_smi);
-    __ vmov(single_scratch, scratch);
-    __ vcvt_f64_s32(double_base, single_scratch);
-    __ bind(&unpack_exponent);
+    __ bind(&convert_exponent);
+    __ JumpIfNotSmi(exponent, &exponent_not_smi);
+    __ SmiUntag(exponent);
 
-    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+    // The base is in a double register and the exponent is
+    // an untagged smi. Allocate a heap number and call a
+    // C function for integer exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
+    __ push(lr);
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(double_base, exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(masm->isolate()),
+        1, 1);
+    __ pop(lr);
+    __ GetCFunctionDoubleResult(double_result);
+    __ vstr(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(r0, heapnumber);
+    __ Ret(2 * kPointerSize);
 
+    __ bind(&exponent_not_smi);
     __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
     __ cmp(scratch, heapnumbermap);
     __ b(ne, &call_runtime);
+    // Exponent is a heapnumber. Load it into double register.
     __ vldr(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
-    // Base is already in double_base.
-    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
 
-    __ vldr(double_exponent,
-            FieldMemOperand(exponent, HeapNumber::kValueOffset));
-  }
-
-  if (exponent_type_ != INTEGER) {
-    Label int_exponent_convert;
-    // Detect integer exponents stored as double.
-    __ vcvt_u32_f64(single_scratch, double_exponent);
-    // We do not check for NaN or Infinity here because comparing numbers on
-    // ARM correctly distinguishes NaNs.  We end up calling the built-in.
-    __ vcvt_f64_u32(double_scratch, single_scratch);
-    __ VFPCompareAndSetFlags(double_scratch, double_exponent);
-    __ b(eq, &int_exponent_convert);
-
-    if (exponent_type_ == ON_STACK) {
-      // Detect square root case.  Crankshaft detects constant +/-0.5 at
-      // compile time and uses DoMathPowHalf instead.  We then skip this check
-      // for non-constant cases of +/-0.5 as these hardly occur.
-      Label not_plus_half;
-
-      // Test for 0.5.
-      __ vmov(double_scratch, 0.5);
-      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
-      __ b(ne, &not_plus_half);
-
-      // Calculates square root of base.  Check for the special case of
-      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
-      __ vmov(double_scratch, -V8_INFINITY);
-      __ VFPCompareAndSetFlags(double_base, double_scratch);
-      __ vneg(double_result, double_scratch, eq);
-      __ b(eq, &done);
-
-      // Add +0 to convert -0 to +0.
-      __ vadd(double_scratch, double_base, kDoubleRegZero);
-      __ vsqrt(double_result, double_scratch);
-      __ jmp(&done);
-
-      __ bind(&not_plus_half);
-      __ vmov(double_scratch, -0.5);
-      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
-      __ b(ne, &call_runtime);
-
-      // Calculates square root of base.  Check for the special case of
-      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
-      __ vmov(double_scratch, -V8_INFINITY);
-      __ VFPCompareAndSetFlags(double_base, double_scratch);
-      __ vmov(double_result, kDoubleRegZero, eq);
-      __ b(eq, &done);
-
-      // Add +0 to convert -0 to +0.
-      __ vadd(double_scratch, double_base, kDoubleRegZero);
-      __ vmov(double_result, 1);
-      __ vsqrt(double_scratch, double_scratch);
-      __ vdiv(double_result, double_result, double_scratch);
-      __ jmp(&done);
-    }
-
+    // The base and the exponent are in double registers.
+    // Allocate a heap number and call a C function for
+    // double exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
     __ push(lr);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ PrepareCallCFunction(0, 2, scratch);
-      __ SetCallCDoubleArguments(double_base, double_exponent);
-      __ CallCFunction(
-          ExternalReference::power_double_double_function(masm->isolate()),
-          0, 2);
-    }
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(double_base, double_exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(masm->isolate()),
+        0, 2);
     __ pop(lr);
     __ GetCFunctionDoubleResult(double_result);
-    __ jmp(&done);
-
-    __ bind(&int_exponent_convert);
-    __ vcvt_u32_f64(single_scratch, double_exponent);
-    __ vmov(scratch, single_scratch);
-  }
-
-  // Calculate power with integer exponent.
-  __ bind(&int_exponent);
-
-  // Get two copies of exponent in the registers scratch and exponent.
-  if (exponent_type_ == INTEGER) {
-    __ mov(scratch, exponent);
-  } else {
-    // Exponent has previously been stored into scratch as untagged integer.
-    __ mov(exponent, scratch);
-  }
-  __ vmov(double_scratch, double_base);  // Back up base.
-  __ vmov(double_result, 1.0);
-
-  // Get absolute value of exponent.
-  __ cmp(scratch, Operand(0));
-  __ mov(scratch2, Operand(0), LeaveCC, mi);
-  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
-
-  Label while_true;
-  __ bind(&while_true);
-  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
-  __ vmul(double_result, double_result, double_scratch, cs);
-  __ vmul(double_scratch, double_scratch, double_scratch, ne);
-  __ b(ne, &while_true);
-
-  __ cmp(exponent, Operand(0));
-  __ b(ge, &done);
-  __ vmov(double_scratch, 1.0);
-  __ vdiv(double_result, double_scratch, double_result);
-  // Test whether result is zero.  Bail out to check for subnormal result.
-  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
-  __ VFPCompareAndSetFlags(double_result, 0.0);
-  __ b(ne, &done);
-  // double_exponent may not containe the exponent value if the input was a
-  // smi.  We set it with exponent value before bailing out.
-  __ vmov(single_scratch, exponent);
-  __ vcvt_f64_s32(double_exponent, single_scratch);
-
-  // Returning or bailing out.
-  Counters* counters = masm->isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
-    // The arguments are still on the stack.
-    __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
-    // The stub is called from non-optimized code, which expects the result
-    // as heap number in exponent.
-    __ bind(&done);
-    __ AllocateHeapNumber(
-        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
     __ vstr(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    ASSERT(heapnumber.is(r0));
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
-    __ Ret(2);
-  } else {
-    __ push(lr);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ PrepareCallCFunction(0, 2, scratch);
-      __ SetCallCDoubleArguments(double_base, double_exponent);
-      __ CallCFunction(
-          ExternalReference::power_double_double_function(masm->isolate()),
-          0, 2);
-    }
-    __ pop(lr);
-    __ GetCFunctionDoubleResult(double_result);
-
-    __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
-    __ Ret();
+    __ mov(r0, heapnumber);
+    __ Ret(2 * kPointerSize);
   }
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 }
 
 
@@ -3648,34 +3319,14 @@
 }
 
 
-bool CEntryStub::IsPregenerated() {
-  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
-          result_size_ == 1;
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  __ Throw(r0);
 }
 
 
-void CodeStub::GenerateStubsAheadOfTime() {
-  CEntryStub::GenerateAheadOfTime();
-  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
-  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
-  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  Handle<Code> code = save_doubles.GetCode();
-  code->set_is_pregenerated(true);
-  StoreBufferOverflowStub stub(kSaveFPRegs);
-  stub.GetCode()->set_is_pregenerated(true);
-  code->GetIsolate()->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
-  CEntryStub stub(1, kDontSaveFPRegs);
-  Handle<Code> code = stub.GetCode();
-  code->set_is_pregenerated(true);
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  __ ThrowUncatchable(type, r0);
 }
 
 
@@ -3779,7 +3430,8 @@
   __ b(eq, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
-  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
+  __ ldr(r3, MemOperand(ip));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(ip));
@@ -3817,10 +3469,9 @@
   __ sub(r6, r6, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
-  // Set up argc and the builtin function in callee-saved registers.
+  // Setup argc and the builtin function in callee-saved registers.
   __ mov(r4, Operand(r0));
   __ mov(r5, Operand(r1));
 
@@ -3859,27 +3510,13 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  // Set external caught exception to false.
-  Isolate* isolate = masm->isolate();
-  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
-                                    isolate);
-  __ mov(r0, Operand(false, RelocInfo::NONE));
-  __ mov(r2, Operand(external_caught));
-  __ str(r0, MemOperand(r2));
-
-  // Set pending exception and r0 to out of memory exception.
-  Failure* out_of_memory = Failure::OutOfMemoryException();
-  __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                       isolate)));
-  __ str(r0, MemOperand(r2));
-  // Fall through to the next label.
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
 
   __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(r0);
+  GenerateThrowUncatchable(masm, TERMINATION);
 
   __ bind(&throw_normal_exception);
-  __ Throw(r0);
+  GenerateThrowTOS(masm);
 }
 
 
@@ -3890,7 +3527,7 @@
   // r3: argc
   // [sp+0]: argv
 
-  Label invoke, handler_entry, exit;
+  Label invoke, exit;
 
   // Called from C, so do not pop argc and args on exit (preserve sp)
   // No need to save register-passed args
@@ -3911,7 +3548,7 @@
   // r2: receiver
   // r3: argc
 
-  // Set up argv in r4.
+  // Setup argv in r4.
   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   if (CpuFeatures::IsSupported(VFP3)) {
     offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
@@ -3934,7 +3571,7 @@
   __ ldr(r5, MemOperand(r5));
   __ Push(r8, r7, r6, r5);
 
-  // Set up frame pointer for the frame to be pushed.
+  // Setup frame pointer for the frame to be pushed.
   __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
 
   // If this is the outermost JS call, set js_entry_sp value.
@@ -3953,33 +3590,31 @@
   __ bind(&cont);
   __ push(ip);
 
-  // Jump to a faked try block that does the invoke, with a faked catch
-  // block that sets the pending exception.
-  __ jmp(&invoke);
-  __ bind(&handler_entry);
-  handler_offset_ = handler_entry.pos();
-  // Caught exception: Store result (exception) in the pending exception
-  // field in the JSEnv and return a failure sentinel.  Coming in here the
-  // fp will be invalid because the PushTryHandler below sets it to 0 to
-  // signal the existence of the JSEntry frame.
+  // Call a faked try-block that does the invoke.
+  __ bl(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r0, MemOperand(ip));
   __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
   __ b(&exit);
 
-  // Invoke: Link this frame into the handler chain.  There's only one
-  // handler block in this code object, so its index is 0.
+  // Invoke: Link this frame into the handler chain.
   __ bind(&invoke);
   // Must preserve r0-r4, r5-r7 are available.
-  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
   // If an exception not caught by another handler occurs, this handler
   // returns control to the code after the bl(&invoke) above, which
   // restores all kCalleeSaved registers (including cp and fp) to their
   // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
-  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
+  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
+  __ ldr(r5, MemOperand(ip));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r5, MemOperand(ip));
@@ -4073,7 +3708,7 @@
   const Register inline_site = r9;
   const Register scratch = r2;
 
-  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
+  const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
 
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
@@ -4090,9 +3725,11 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     Label miss;
-    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
+    __ cmp(function, ip);
     __ b(ne, &miss);
-    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
+    __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
+    __ cmp(map, ip);
     __ b(ne, &miss);
     __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
     __ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -4101,7 +3738,7 @@
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -4122,8 +3759,7 @@
     __ sub(inline_site, lr, scratch);
     // Get the map location in scratch and patch it.
     __ GetRelocatedValueLocation(inline_site, scratch);
-    __ ldr(scratch, MemOperand(scratch));
-    __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+    __ str(map, MemOperand(scratch));
   }
 
   // Register mapping: r3 is object map and r4 is function prototype.
@@ -4215,11 +3851,10 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(r0, r1);
-      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    }
+    __ EnterInternalFrame();
+    __ Push(r0, r1);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    __ LeaveInternalFrame();
     __ cmp(r0, Operand::Zero());
     __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
     __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4237,7 +3872,7 @@
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The displacement is the offset of the last parameter (if any)
   // relative to the frame pointer.
-  const int kDisplacement =
+  static const int kDisplacement =
       StandardFrameConstants::kCallerSPOffset - kPointerSize;
 
   // Check that the key is a smi.
@@ -4392,7 +4027,7 @@
     __ str(r3, FieldMemOperand(r0, i));
   }
 
-  // Set up the callee in-object property.
+  // Setup the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
   const int kCalleeOffset = JSObject::kHeaderSize +
@@ -4405,7 +4040,7 @@
       Heap::kArgumentsLengthIndex * kPointerSize;
   __ str(r2, FieldMemOperand(r0, kLengthOffset));
 
-  // Set up the elements pointer in the allocated arguments object.
+  // Setup the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, r4 will point there, otherwise
   // it will point to the backing store.
   __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
@@ -4500,7 +4135,7 @@
   __ Ret();
 
   // Do the runtime call to allocate the arguments object.
-  // r2 = argument count (tagged)
+  // r2 = argument count (taggged)
   __ bind(&runtime);
   __ str(r2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -4573,7 +4208,7 @@
   // Get the parameters pointer from the stack.
   __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
 
-  // Set up the elements pointer in the allocated arguments object and
+  // Setup the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
   __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
@@ -4585,7 +4220,7 @@
 
   // Copy the fixed array slots.
   Label loop;
-  // Set up r4 to point to the first array slot.
+  // Setup r4 to point to the first array slot.
   __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ bind(&loop);
   // Pre-decrement r2 with kPointerSize on each iteration.
@@ -4615,6 +4250,10 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
 
   // Stack frame on entry.
   //  sp[0]: last_match_info (expected JSArray)
@@ -4622,10 +4261,10 @@
   //  sp[8]: subject string
   //  sp[12]: JSRegExp object
 
-  const int kLastMatchInfoOffset = 0 * kPointerSize;
-  const int kPreviousIndexOffset = 1 * kPointerSize;
-  const int kSubjectOffset = 2 * kPointerSize;
-  const int kJSRegExpOffset = 3 * kPointerSize;
+  static const int kLastMatchInfoOffset = 0 * kPointerSize;
+  static const int kPreviousIndexOffset = 1 * kPointerSize;
+  static const int kSubjectOffset = 2 * kPointerSize;
+  static const int kJSRegExpOffset = 3 * kPointerSize;
 
   Label runtime, invoke_regexp;
 
@@ -4646,7 +4285,7 @@
       ExternalReference::address_of_regexp_stack_memory_size(isolate);
   __ mov(r0, Operand(address_of_regexp_stack_memory_size));
   __ ldr(r0, MemOperand(r0, 0));
-  __ cmp(r0, Operand(0));
+  __ tst(r0, Operand(r0));
   __ b(eq, &runtime);
 
   // Check that the first argument is a JSRegExp object.
@@ -4717,7 +4356,8 @@
   __ ldr(last_match_info_elements,
          FieldMemOperand(r0, JSArray::kElementsOffset));
   __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
-  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r0, ip);
   __ b(ne, &runtime);
   // Check that the last match info has space for the capture registers and the
   // additional information.
@@ -4735,39 +4375,25 @@
   Label seq_string;
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  // First check for flat string.  None of the following string type tests will
-  // succeed if subject is not a string or a short external string.
-  __ and_(r1,
-          r0,
-          Operand(kIsNotStringMask |
-                  kStringRepresentationMask |
-                  kShortExternalStringMask),
-          SetCC);
+  // First check for flat string.
+  __ and_(r1, r0, Operand(kIsNotStringMask | kStringRepresentationMask), SetCC);
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ b(eq, &seq_string);
 
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
-  // r1: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, external_string, check_encoding;
+  Label cons_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
-  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(r1, Operand(kExternalStringTag));
   __ b(lt, &cons_string);
-  __ b(eq, &external_string);
-
-  // Catch non-string subject or short external string.
-  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
-  __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
-  __ b(ne, &runtime);
+  __ b(eq, &runtime);
 
   // String is sliced.
   __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -4778,7 +4404,8 @@
   // String is a cons string, check whether it is flat.
   __ bind(&cons_string);
   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
-  __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
+  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
+  __ cmp(r0, r1);
   __ b(ne, &runtime);
   __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
   // Is first part of cons or parent of slice a flat string?
@@ -4787,8 +4414,7 @@
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
-  __ b(ne, &external_string);
-
+  __ b(ne, &runtime);
   __ bind(&seq_string);
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -4824,8 +4450,8 @@
   __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
 
   // Isolates: note we add an additional parameter here (isolate pointer).
-  const int kRegExpExecuteArguments = 8;
-  const int kParameterRegisters = 4;
+  static const int kRegExpExecuteArguments = 8;
+  static const int kParameterRegisters = 4;
   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
 
   // Stack pointer now points to cell where return address is to be written.
@@ -4854,7 +4480,8 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data and
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
-  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4905,7 +4532,8 @@
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
+  __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
+  __ ldr(r1, MemOperand(r1, 0));
   __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(r2, 0));
@@ -4920,10 +4548,10 @@
   Label termination_exception;
   __ b(eq, &termination_exception);
 
-  __ Throw(r0);
+  __ Throw(r0);  // Expects thrown value in r0.
 
   __ bind(&termination_exception);
-  __ ThrowUncatchable(r0);
+  __ ThrowUncatchable(TERMINATION, r0);  // Expects thrown value in r0.
 
   __ bind(&failure);
   // For failure and exception return null.
@@ -4947,25 +4575,16 @@
   __ str(r2, FieldMemOperand(last_match_info_elements,
                              RegExpImpl::kLastCaptureCountOffset));
   // Store last subject and last input.
+  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastSubjectOffset));
-  __ mov(r2, subject);
-  __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastSubjectOffset,
-                      r2,
-                      r7,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs);
+  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastInputOffset));
-  __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastInputOffset,
-                      subject,
-                      r7,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs);
+  __ mov(r3, last_match_info_elements);
+  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -4996,26 +4615,6 @@
   __ add(sp, sp, Operand(4 * kPointerSize));
   __ Ret();
 
-  // External string.  Short external strings have already been ruled out.
-  // r0: scratch
-  __ bind(&external_string);
-  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ tst(r0, Operand(kIsIndirectStringMask));
-    __ Assert(eq, "external string expected, but not found");
-  }
-  __ ldr(subject,
-         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ sub(subject,
-         subject,
-         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ jmp(&seq_string);
-
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -5071,11 +4670,11 @@
 
   // Set input, index and length fields from arguments.
   __ ldr(r1, MemOperand(sp, kPointerSize * 0));
-  __ ldr(r2, MemOperand(sp, kPointerSize * 1));
-  __ ldr(r6, MemOperand(sp, kPointerSize * 2));
   __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
-  __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
-  __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
+  __ ldr(r1, MemOperand(sp, kPointerSize * 1));
+  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
+  __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
 
   // Fill out the elements FixedArray.
   // r0: JSArray, tagged.
@@ -5097,9 +4696,9 @@
   // r3: Start of elements in FixedArray.
   // r5: Number of elements to fill.
   Label loop;
-  __ cmp(r5, Operand(0));
+  __ tst(r5, Operand(r5));
   __ bind(&loop);
-  __ b(le, &done);  // Jump if r5 is negative or zero.
+  __ b(le, &done);  // Jump if r1 is negative or zero.
   __ sub(r5, r5, Operand(1), SetCC);
   __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
   __ jmp(&loop);
@@ -5113,48 +4712,7 @@
 }
 
 
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // r1 : the function to call
-  // r2 : cache cell for call target
-  Label done;
-
-  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
-            masm->isolate()->heap()->undefined_value());
-  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
-            masm->isolate()->heap()->the_hole_value());
-
-  // Load the cache state into r3.
-  __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ cmp(r3, r1);
-  __ b(eq, &done);
-  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
-  __ b(eq, &done);
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
-  __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
-
-  // An uninitialized cache is patched with the function.
-  __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
-  // No need for a write barrier here - cells are rescanned.
-
-  __ bind(&done);
-}
-
-
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  // r1 : the function to call
-  // r2 : cache cell for call target
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
@@ -5169,12 +4727,16 @@
     __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
     __ b(ne, &call);
     // Patch the receiver on the stack with the global receiver object.
-    __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-    __ str(r2, MemOperand(sp, argc_ * kPointerSize));
+    __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+    __ str(r1, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
+  // Get the function to call from the stack.
+  // function, receiver [, arguments]
+  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
   // Check that the function is really a JavaScript function.
   // r1: pushed function (to be verified)
   __ JumpIfSmi(r1, &non_function);
@@ -5212,7 +4774,7 @@
   __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
-  __ SetCallKind(r5, CALL_AS_METHOD);
+  __ SetCallKind(r5, CALL_AS_FUNCTION);
   {
     Handle<Code> adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -5223,7 +4785,7 @@
   // of the original receiver from the call site).
   __ bind(&non_function);
   __ str(r1, MemOperand(sp, argc_ * kPointerSize));
-  __ mov(r0, Operand(argc_));  // Set up the number of arguments.
+  __ mov(r0, Operand(argc_));  // Setup the number of arguments.
   __ mov(r2, Operand(0, RelocInfo::NONE));
   __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
   __ SetCallKind(r5, CALL_AS_METHOD);
@@ -5232,48 +4794,6 @@
 }
 
 
-void CallConstructStub::Generate(MacroAssembler* masm) {
-  // r0 : number of arguments
-  // r1 : the function to call
-  // r2 : cache cell for call target
-  Label slow, non_function_call;
-
-  // Check that the function is not a smi.
-  __ JumpIfSmi(r1, &non_function_call);
-  // Check that the function is a JSFunction.
-  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
-  __ b(ne, &slow);
-
-  if (RecordCallTarget()) {
-    GenerateRecordCallTarget(masm);
-  }
-
-  // Jump to the function-specific construct stub.
-  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
-  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // r0: number of arguments
-  // r1: called object
-  // r3: object type
-  Label do_call;
-  __ bind(&slow);
-  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
-  __ b(ne, &non_function_call);
-  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
-  __ jmp(&do_call);
-
-  __ bind(&non_function_call);
-  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ bind(&do_call);
-  // Set expected number of arguments to zero (not changing r0).
-  __ mov(r2, Operand(0, RelocInfo::NONE));
-  __ SetCallKind(r5, CALL_AS_METHOD);
-  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-}
-
-
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
 void CompareStub::PrintName(StringStream* stream) {
@@ -5335,41 +4855,100 @@
 
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
+
+  // Put smi-tagged index into scratch register.
+  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
   __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
-  __ cmp(ip, Operand(index_));
+  __ cmp(ip, Operand(scratch_));
   __ b(ls, index_out_of_range_);
 
-  __ mov(index_, Operand(index_, ASR, kSmiTagSize));
+  // We need special handling for non-flat strings.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(result_, Operand(kStringRepresentationMask));
+  __ b(eq, &flat_string);
 
-  StringCharLoadGenerator::Generate(masm,
-                                    object_,
-                                    index_,
-                                    result_,
-                                    &call_runtime_);
+  // Handle non-flat strings.
+  __ and_(result_, result_, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmp(result_, Operand(kExternalStringTag));
+  __ b(gt, &sliced_string);
+  __ b(eq, &call_runtime_);
 
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  Label assure_seq_string;
+  __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+  __ cmp(result_, Operand(ip));
+  __ b(ne, &call_runtime_);
+  // Get the first of the two strings and load its instance type.
+  __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ ldr(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
+  __ add(scratch_, scratch_, result_);
+  __ ldr(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
+  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // Check that parent is not an external string. Go to runtime otherwise.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(result_, Operand(kStringRepresentationMask));
+  __ b(ne, &call_runtime_);
+
+  // Check for 1-byte or 2-byte string.
+  __ bind(&flat_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ tst(result_, Operand(kStringEncodingMask));
+  __ b(ne, &ascii_string);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register. We can
+  // add without shifting since the smi tag size is the log2 of the
+  // number of bytes in a two-byte character.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+  __ add(scratch_, object_, Operand(scratch_));
+  __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
+  __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+  __ bind(&got_char_code);
   __ mov(result_, Operand(result_, LSL, kSmiTagSize));
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
   __ CheckMap(index_,
-              result_,
+              scratch_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
-  __ push(object_);
+  __ Push(object_, index_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -5380,14 +4959,15 @@
   }
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
-  __ Move(index_, r0);
+  __ Move(scratch_, r0);
+  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(index_, index_out_of_range_);
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -5396,7 +4976,6 @@
   // is too complex (e.g., when the string needs to be flattened).
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
-  __ mov(index_, Operand(index_, LSL, kSmiTagSize));
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   __ Move(result_, r0);
@@ -5425,15 +5004,15 @@
   STATIC_ASSERT(kSmiTag == 0);
   __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(result_, Operand(ip));
   __ b(eq, &slow_case_);
   __ bind(&exit_);
 }
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5458,8 +5037,7 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
@@ -5714,7 +5292,7 @@
   // scratch: -
 
   // Perform a number of probes in the symbol table.
-  const int kProbes = 4;
+  static const int kProbes = 4;
   Label found_in_symbol_table;
   Label next_probe[kProbes];
   Register candidate = scratch5;  // Scratch register contains candidate.
@@ -5743,11 +5321,11 @@
 
     __ cmp(undefined, candidate);
     __ b(eq, not_found);
-    // Must be the hole (deleted entry).
+    // Must be null (deleted entry).
     if (FLAG_debug_code) {
-      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+      __ LoadRoot(ip, Heap::kNullValueRootIndex);
       __ cmp(ip, candidate);
-      __ Assert(eq, "oddball in symbol table is not undefined or the hole");
+      __ Assert(eq, "oddball in symbol table is not undefined or null");
     }
     __ jmp(&next_probe[i]);
 
@@ -5839,28 +5417,41 @@
   //  0 <= from <= to <= string.length.
   // If any of these assumptions fail, we call the runtime system.
 
-  const int kToOffset = 0 * kPointerSize;
-  const int kFromOffset = 1 * kPointerSize;
-  const int kStringOffset = 2 * kPointerSize;
+  static const int kToOffset = 0 * kPointerSize;
+  static const int kFromOffset = 1 * kPointerSize;
+  static const int kStringOffset = 2 * kPointerSize;
 
-  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
+  // Check bounds and smi-ness.
+  Register to = r6;
+  Register from = r7;
+
+  __ Ldrd(to, from, MemOperand(sp, kToOffset));
   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
 
   // I.e., arithmetic shift right by one un-smi-tags.
-  __ mov(r2, Operand(r2, ASR, 1), SetCC);
-  __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
+  __ mov(r2, Operand(to, ASR, 1), SetCC);
+  __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
   // If either to or from had the smi tag bit set, then carry is set now.
   __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
-  // We want to bailout to runtime here if From is negative.  In that case, the
-  // next instruction is not executed and we fall through to bailing out to
-  // runtime.  pl is the opposite of mi.
-  // Both r2 and r3 are untagged integers.
-  __ sub(r2, r2, Operand(r3), SetCC, pl);
-  __ b(mi, &runtime);  // Fail if from > to.
+  __ b(mi, &runtime);  // From is negative.
 
-  // Make sure first argument is a string.
+  // Both to and from are smis.
+  __ sub(r2, r2, Operand(r3), SetCC);
+  __ b(mi, &runtime);  // Fail if from > to.
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache in
+  // generated code.
+  __ cmp(r2, Operand(2));
+  __ b(lt, &runtime);
+
+  // r2: result string length
+  // r3: from index (untagged smi)
+  // r6 (a.k.a. to): to (smi)
+  // r7 (a.k.a. from): from offset (smi)
+  // Make sure first argument is a sequential (or flat) string.
   __ ldr(r0, MemOperand(sp, kStringOffset));
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfSmi(r0, &runtime);
@@ -5875,15 +5466,67 @@
   __ cmp(r2, Operand(r4, ASR, 1));
   __ b(eq, &return_r0);
 
+  Label create_slice;
+  if (FLAG_string_slices) {
+    __ cmp(r2, Operand(SlicedString::kMinLength));
+    __ b(ge, &create_slice);
+  }
+
+  // r0: original string
+  // r1: instance type
+  // r2: result string length
+  // r3: from index (untagged smi)
+  // r6 (a.k.a. to): to (smi)
+  // r7 (a.k.a. from): from offset (smi)
+  Label seq_string;
+  __ and_(r4, r1, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
+  __ cmp(r4, Operand(kConsStringTag));
+  __ b(gt, &runtime);  // Slices and external strings go to runtime.
+  __ b(lt, &seq_string);  // Sequential strings are handled directly.
+
+  // Cons string. Try to recurse (once) on the first substring.
+  // (This adds a little more generality than necessary to handle flattened
+  // cons strings, but not much).
+  __ ldr(r0, FieldMemOperand(r0, ConsString::kFirstOffset));
+  __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ tst(r1, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ b(ne, &runtime);  // Cons, slices and external strings go to runtime.
+
+  // Definitly a sequential string.
+  __ bind(&seq_string);
+
+  // r0: original string
+  // r1: instance type
+  // r2: result string length
+  // r3: from index (untagged smi)
+  // r6 (a.k.a. to): to (smi)
+  // r7 (a.k.a. from): from offset (smi)
+  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
+  __ cmp(r4, Operand(to));
+  __ b(lt, &runtime);  // Fail if to > length.
+  to = no_reg;
+
+  // r0: original string or left hand side of the original cons string.
+  // r1: instance type
+  // r2: result string length
+  // r3: from index (untagged smi)
+  // r7 (a.k.a. from): from offset (smi)
+  // Check for flat ASCII string.
+  Label non_ascii_flat;
+  __ tst(r1, Operand(kStringEncodingMask));
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ b(eq, &non_ascii_flat);
+
   Label result_longer_than_two;
-  // Check for special case of two character ASCII string, in which case
-  // we do a lookup in the symbol table first.
   __ cmp(r2, Operand(2));
   __ b(gt, &result_longer_than_two);
-  __ b(lt, &runtime);
 
-  __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
-
+  // Sub string of length 2 requested.
   // Get the two characters forming the sub string.
   __ add(r0, r0, Operand(r3));
   __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -5893,6 +5536,7 @@
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
       masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
+  Counters* counters = masm->isolate()->counters();
   __ jmp(&return_r0);
 
   // r2: result string length.
@@ -5903,114 +5547,18 @@
   __ jmp(&return_r0);
 
   __ bind(&result_longer_than_two);
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into r5.
-  // r0: original string
-  // r1: instance type
-  // r2: length
-  // r3: from index (untagged)
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ tst(r1, Operand(kIsIndirectStringMask));
-  __ b(eq, &seq_or_external_string);
 
-  __ tst(r1, Operand(kSlicedNotConsMask));
-  __ b(ne, &sliced_string);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
-  __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
-  __ b(ne, &runtime);
-  __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
-  // Update instance type.
-  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
+  // Locate 'from' character of string.
+  __ add(r5, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(from, ASR, 1));
 
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
-  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
-  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
-  // Update instance type.
-  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
+  // Allocate the result.
+  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
 
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(r5, r0);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // r5: underlying subject string
-    // r1: instance type of underlying subject string
-    // r2: length
-    // r3: adjusted start index (untagged)
-    __ cmp(r2, Operand(SlicedString::kMinLength));
-    // Short slice.  Copy instead of slicing.
-    __ b(lt, &copy_routine);
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ tst(r1, Operand(kStringEncodingMask));
-    __ b(eq, &two_byte_slice);
-    __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
-    __ jmp(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
-    __ bind(&set_slice_header);
-    __ mov(r3, Operand(r3, LSL, 1));
-    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
-    __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
-    __ jmp(&return_r0);
-
-    __ bind(&copy_routine);
-  }
-
-  // r5: underlying subject string
-  // r1: instance type of underlying subject string
-  // r2: length
-  // r3: adjusted start index (untagged)
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(r1, Operand(kExternalStringTag));
-  __ b(eq, &sequential_string);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ tst(r1, Operand(kShortExternalStringTag));
-  __ b(ne, &runtime);
-  __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
-  // r5 already points to the first character of underlying string.
-  __ jmp(&allocate_result);
-
-  __ bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&allocate_result);
-  // Sequential acii string.  Allocate the result.
-  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
-  __ tst(r1, Operand(kStringEncodingMask));
-  __ b(eq, &two_byte_sequential);
-
-  // Allocate and copy the resulting ASCII string.
-  __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
-
-  // Locate first character of substring to copy.
-  __ add(r5, r5, r3);
+  // r0: result string
+  // r2: result string length
+  // r5: first character of substring to copy
+  // r7 (a.k.a. from): from offset (smi)
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
 
@@ -6023,16 +5571,30 @@
                                            COPY_ASCII | DEST_ALWAYS_ALIGNED);
   __ jmp(&return_r0);
 
-  // Allocate and copy the resulting two-byte string.
-  __ bind(&two_byte_sequential);
-  __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
+  __ bind(&non_ascii_flat);
+  // r0: original string
+  // r2: result string length
+  // r7 (a.k.a. from): from offset (smi)
+  // Check for flat two byte string.
 
-  // Locate first character of substring to copy.
+  // Locate 'from' character of string.
+  __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ add(r5, r5, Operand(r3, LSL, 1));
+  __ add(r5, r5, Operand(from));
+
+  // Allocate the result.
+  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
+
+  // r0: result string
+  // r2: result string length
+  // r5: first character of substring to copy
   // Locate first character of result.
   __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
 
+  from = no_reg;
+
   // r0: result string.
   // r1: first character of result.
   // r2: result length.
@@ -6040,9 +5602,72 @@
   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(
       masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
+  __ jmp(&return_r0);
+
+  if (FLAG_string_slices) {
+    __ bind(&create_slice);
+    // r0: original string
+    // r1: instance type
+    // r2: length
+    // r3: from index (untagged smi)
+    // r6 (a.k.a. to): to (smi)
+    // r7 (a.k.a. from): from offset (smi)
+    Label allocate_slice, sliced_string, seq_string;
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ tst(r1, Operand(kStringRepresentationMask));
+    __ b(eq, &seq_string);
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ tst(r1, Operand(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ b(eq, &runtime);
+
+    __ tst(r1, Operand(kSlicedNotConsMask));
+    __ b(ne, &sliced_string);
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
+    __ LoadRoot(r9, Heap::kEmptyStringRootIndex);
+    __ cmp(r5, r9);
+    __ b(ne, &runtime);
+    __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+    __ add(r7, r7, r5);
+    __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ mov(r5, r0);
+
+    __ bind(&allocate_slice);
+    // r1: instance type of original string
+    // r2: length
+    // r5: underlying subject string
+    // r7 (a.k.a. from): from offset (smi)
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ tst(r1, Operand(kStringEncodingMask));
+    __ b(eq, &two_byte_slice);
+    __ AllocateAsciiSlicedString(r0, r2, r3, r4, &runtime);
+    __ jmp(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(r0, r2, r3, r4, &runtime);
+    __ bind(&set_slice_header);
+    __ str(r7, FieldMemOperand(r0, SlicedString::kOffsetOffset));
+    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+  }
 
   __ bind(&return_r0);
-  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Ret();
@@ -6075,7 +5700,7 @@
   Label compare_chars;
   __ bind(&check_zero_length);
   STATIC_ASSERT(kSmiTag == 0);
-  __ cmp(length, Operand(0));
+  __ tst(length, Operand(length));
   __ b(ne, &compare_chars);
   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
   __ Ret();
@@ -6108,7 +5733,7 @@
   __ mov(scratch1, scratch2, LeaveCC, gt);
   Register min_length = scratch1;
   STATIC_ASSERT(kSmiTag == 0);
-  __ cmp(min_length, Operand(0));
+  __ tst(min_length, Operand(min_length));
   __ b(eq, &compare_lengths);
 
   // Compare loop.
@@ -6199,7 +5824,7 @@
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label call_runtime, call_builtin;
+  Label string_add_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   Counters* counters = masm->isolate()->counters();
@@ -6214,7 +5839,7 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfEitherSmi(r0, r1, &call_runtime);
+    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
     // Load instance types.
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -6224,7 +5849,7 @@
     // If either is not a string, go to runtime.
     __ tst(r4, Operand(kIsNotStringMask));
     __ tst(r5, Operand(kIsNotStringMask), eq);
-    __ b(ne, &call_runtime);
+    __ b(ne, &string_add_runtime);
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -6293,7 +5918,7 @@
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
-                                                  &call_runtime);
+                                                  &string_add_runtime);
 
   // Get the two characters forming the sub string.
   __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
@@ -6315,7 +5940,7 @@
   // halfword store instruction (which assumes that processor is
   // in a little endian mode)
   __ mov(r6, Operand(2));
-  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
+  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
   __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
@@ -6323,14 +5948,14 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(r6, Operand(ConsString::kMinLength));
+  __ cmp(r6, Operand(String::kMinNonFlatLength));
   __ b(lt, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   ASSERT(IsPowerOf2(String::kMaxLength + 1));
   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
   __ cmp(r6, Operand(String::kMaxLength + 1));
-  __ b(hs, &call_runtime);
+  __ b(hs, &string_add_runtime);
 
   // If result is not supposed to be flat, allocate a cons string object.
   // If both strings are ASCII the result is an ASCII cons string.
@@ -6348,7 +5973,7 @@
 
   // Allocate an ASCII cons string.
   __ bind(&ascii_data);
-  __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
+  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
   __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
@@ -6373,13 +5998,11 @@
   __ b(eq, &ascii_data);
 
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
+  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
   __ jmp(&allocated);
 
-  // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
-  // Handle creating a flat result from either external or sequential strings.
-  // Locate the first characters' locations.
+  // Handle creating a flat result. First check that both strings are
+  // sequential and that they have the same encoding.
   // r0: first string
   // r1: second string
   // r2: length of first string
@@ -6387,7 +6010,6 @@
   // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // r6: sum of lengths.
-  Label first_prepared, second_prepared;
   __ bind(&string_add_flat_result);
   if (flags_ != NO_STRING_ADD_FLAGS) {
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -6395,88 +6017,97 @@
     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
-
-  // Check whether both strings have same encoding
-  __ eor(r7, r4, Operand(r5));
-  __ tst(r7, Operand(kStringEncodingMask));
-  __ b(ne, &call_runtime);
-
+  // Check that both strings are sequential.
   STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r4, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ add(r7,
-         r0,
-         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
-         LeaveCC,
-         eq);
-  __ b(eq, &first_prepared);
-  // External string: rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ tst(r4, Operand(kShortExternalStringMask));
-  __ b(ne, &call_runtime);
-  __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
-  __ bind(&first_prepared);
-
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(r5, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ add(r1,
-         r1,
-         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
-         LeaveCC,
-         eq);
-  __ b(eq, &second_prepared);
-  // External string: rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ tst(r5, Operand(kShortExternalStringMask));
-  __ b(ne, &call_runtime);
-  __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
-  __ bind(&second_prepared);
-
-  Label non_ascii_string_add_flat_result;
-  // r7: first character of first string
-  // r1: first character of second string
+  __ tst(r5, Operand(kStringRepresentationMask), eq);
+  __ b(ne, &string_add_runtime);
+  // Now check if both strings have the same encoding (ASCII/Two-byte).
+  // r0: first string.
+  // r1: second string.
   // r2: length of first string.
   // r3: length of second string.
-  // r6: sum of lengths.
-  // Both strings have the same encoding.
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ tst(r5, Operand(kStringEncodingMask));
+  // r6: sum of lengths..
+  Label non_ascii_string_add_flat_result;
+  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+  __ eor(r7, r4, Operand(r5));
+  __ tst(r7, Operand(kStringEncodingMask));
+  __ b(ne, &string_add_runtime);
+  // And see if it's ASCII or two-byte.
+  __ tst(r4, Operand(kStringEncodingMask));
   __ b(eq, &non_ascii_string_add_flat_result);
 
-  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
-  __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // r0: result string.
-  // r7: first character of first string.
-  // r1: first character of second string.
+  // Both strings are sequential ASCII strings. We also know that they are
+  // short (since the sum of the lengths is less than kMinNonFlatLength).
+  // r6: length of resulting flat string
+  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
+  // Locate first character of result.
+  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // r0: first character of first string.
+  // r1: second string.
   // r2: length of first string.
   // r3: length of second string.
   // r6: first character of result.
-  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
+  // r7: result string.
+  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
+
+  // Load second argument and locate first character.
+  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // r1: first character of second string.
+  // r3: length of second string.
   // r6: next character of result.
+  // r7: result string.
   StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
+  __ mov(r0, Operand(r7));
   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
   __ Ret();
 
   __ bind(&non_ascii_string_add_flat_result);
-  __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
-  __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // r0: result string.
-  // r7: first character of first string.
-  // r1: first character of second string.
+  // Both strings are sequential two byte strings.
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r6: sum of length of strings.
+  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
+  // r0: first string.
+  // r1: second string.
+  // r2: length of first string.
+  // r3: length of second string.
+  // r7: result string.
+
+  // Locate first character of result.
+  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r0: first character of first string.
+  // r1: second string.
   // r2: length of first string.
   // r3: length of second string.
   // r6: first character of result.
-  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
-  // r6: next character of result.
+  // r7: result string.
+  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
+
+  // Locate first character of second argument.
+  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r1: first character of second string.
+  // r3: length of second string.
+  // r6: next character of result (after copy of first string).
+  // r7: result string.
   StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
+
+  __ mov(r0, Operand(r7));
   __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
   __ Ret();
 
   // Just jump to runtime to add the two strings.
-  __ bind(&call_runtime);
+  __ bind(&string_add_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -6560,15 +6191,15 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered, maybe_undefined1, maybe_undefined2;
+  Label unordered;
   Label miss;
   __ and_(r2, r1, Operand(r0));
   __ JumpIfSmi(r2, &generic_stub);
 
   __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
-  __ b(ne, &maybe_undefined1);
+  __ b(ne, &miss);
   __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
-  __ b(ne, &maybe_undefined2);
+  __ b(ne, &miss);
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or VFP3 is unsupported.
@@ -6592,28 +6223,14 @@
     __ mov(r0, Operand(LESS), LeaveCC, lt);
     __ mov(r0, Operand(GREATER), LeaveCC, gt);
     __ Ret();
+
+    __ bind(&unordered);
   }
 
-  __ bind(&unordered);
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
   __ bind(&generic_stub);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
-  __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
-    __ b(ne, &miss);
-    __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
-    __ b(ne, &maybe_undefined2);
-    __ jmp(&unordered);
-  }
-
-  __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
-    __ b(eq, &unordered);
-  }
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -6661,8 +6278,6 @@
   ASSERT(state_ == CompareIC::STRINGS);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
-
   // Registers containing left and right operands respectively.
   Register left = r1;
   Register right = r0;
@@ -6696,39 +6311,28 @@
 
   // Check that both strings are symbols. If they are, we're done
   // because we already know they are not identical.
-  if (equality) {
-    ASSERT(GetCondition() == eq);
-    STATIC_ASSERT(kSymbolTag != 0);
-    __ and_(tmp3, tmp1, Operand(tmp2));
-    __ tst(tmp3, Operand(kIsSymbolMask));
-    // Make sure r0 is non-zero. At this point input operands are
-    // guaranteed to be non-zero.
-    ASSERT(right.is(r0));
-    __ Ret(ne);
-  }
+  ASSERT(GetCondition() == eq);
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp3, tmp1, Operand(tmp2));
+  __ tst(tmp3, Operand(kIsSymbolMask));
+  // Make sure r0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(r0));
+  __ Ret(ne);
 
   // Check that both strings are sequential ASCII.
   Label runtime;
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
-      tmp1, tmp2, tmp3, tmp4, &runtime);
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+                                                  &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2, tmp3);
-  } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3, tmp4);
-  }
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2, tmp3);
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
   __ Push(left, right);
-  if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
-  } else {
-    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-  }
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -6755,47 +6359,25 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
-  Label miss;
-  __ and_(r2, r1, Operand(r0));
-  __ JumpIfSmi(r2, &miss);
-  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r2, Operand(known_map_));
-  __ b(ne, &miss);
-  __ cmp(r3, Operand(known_map_));
-  __ b(ne, &miss);
-
-  __ sub(r0, r0, Operand(r1));
-  __ Ret();
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  {
-    // Call the runtime system in a fresh internal frame.
-    ExternalReference miss =
-        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+  __ Push(r1, r0);
+  __ push(lr);
 
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r1, r0);
-    __ push(lr);
-    __ Push(r1, r0);
-    __ mov(ip, Operand(Smi::FromInt(op_)));
-    __ push(ip);
-    __ CallExternalReference(miss, 3);
-    // Compute the entry point of the rewritten stub.
-    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-    // Restore registers.
-    __ pop(lr);
-    __ pop(r0);
-    __ pop(r1);
-  }
-
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss =
+      ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+  __ EnterInternalFrame();
+  __ Push(r1, r0);
+  __ mov(ip, Operand(Smi::FromInt(op_)));
+  __ push(ip);
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+  // Compute the entry point of the rewritten stub.
+  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore registers.
+  __ pop(lr);
+  __ pop(r0);
+  __ pop(r1);
   __ Jump(r2);
 }
 
@@ -6828,18 +6410,19 @@
 }
 
 
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
-                                                        Label* miss,
-                                                        Label* done,
-                                                        Register receiver,
-                                                        Register properties,
-                                                        Handle<String> name,
-                                                        Register scratch0) {
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register receiver,
+    Register properties,
+    String* name,
+    Register scratch0) {
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the hole value).
+  // (their names are the null value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // scratch0 points to properties hash.
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -6867,17 +6450,10 @@
     __ b(eq, done);
 
     if (i != kInlinedProbes - 1) {
-      // Load the hole ready for use below:
-      __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
       // Stop if found the property.
       __ cmp(entity_name, Operand(Handle<String>(name)));
       __ b(eq, miss);
 
-      Label the_hole;
-      __ cmp(entity_name, tmp);
-      __ b(eq, &the_hole);
-
       // Check if the entry name is not a symbol.
       __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
       __ ldrb(entity_name,
@@ -6885,8 +6461,6 @@
       __ tst(entity_name, Operand(kIsSymbolMask));
       __ b(eq, miss);
 
-      __ bind(&the_hole);
-
       // Restore the properties.
       __ ldr(properties,
              FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -6901,12 +6475,14 @@
   __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ mov(r1, Operand(Handle<String>(name)));
   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ cmp(r0, Operand(0));
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ tst(r0, Operand(r0));
   __ ldm(ia_w, sp, spill_mask);
 
   __ b(eq, done);
   __ b(ne, miss);
+  return result;
 }
 
 
@@ -6921,11 +6497,6 @@
                                                         Register name,
                                                         Register scratch1,
                                                         Register scratch2) {
-  ASSERT(!elements.is(scratch1));
-  ASSERT(!elements.is(scratch2));
-  ASSERT(!name.is(scratch1));
-  ASSERT(!name.is(scratch2));
-
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6969,17 +6540,11 @@
       ~(scratch1.bit() | scratch2.bit());
 
   __ stm(db_w, sp, spill_mask);
-  if (name.is(r0)) {
-    ASSERT(!elements.is(r1));
-    __ Move(r1, name);
-    __ Move(r0, elements);
-  } else {
-    __ Move(r0, elements);
-    __ Move(r1, name);
-  }
+  __ Move(r0, elements);
+  __ Move(r1, name);
   StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
   __ CallStub(&stub);
-  __ cmp(r0, Operand(0));
+  __ tst(r0, Operand(r0));
   __ mov(scratch2, Operand(r2));
   __ ldm(ia_w, sp, spill_mask);
 
@@ -6989,8 +6554,6 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
-  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
-  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  r1: key
@@ -7080,337 +6643,6 @@
 }
 
 
-struct AheadOfTimeWriteBarrierStubList {
-  Register object, value, address;
-  RememberedSetAction action;
-};
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
-  // Used in RegExpExecStub.
-  { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
-  { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
-  // Used in CompileArrayPushCall.
-  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
-  // Also used in KeyedStoreIC::GenerateGeneric.
-  { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
-  // Used in CompileStoreGlobal.
-  { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
-  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
-  { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
-  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
-  { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
-  // KeyedStoreStubCompiler::GenerateStoreFastElement.
-  { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
-  { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
-  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
-  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
-  // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
-  { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
-  // ElementsTransitionGenerator::GenerateDoubleToObject
-  { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
-  { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
-  // StoreArrayLiteralElementStub::Generate
-  { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
-  // Null termination.
-  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
-  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    if (object_.is(entry->object) &&
-        value_.is(entry->value) &&
-        address_.is(entry->address) &&
-        remembered_set_action_ == entry->action &&
-        save_fp_regs_mode_ == kDontSaveFPRegs) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
-  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
-  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
-  stub1.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
-  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    RecordWriteStub stub(entry->object,
-                         entry->value,
-                         entry->address,
-                         entry->action,
-                         kDontSaveFPRegs);
-    stub.GetCode()->set_is_pregenerated(true);
-  }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed.  The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
-  Label skip_to_incremental_noncompacting;
-  Label skip_to_incremental_compacting;
-
-  // The first two instructions are generated with labels so as to get the
-  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
-  // forth between a compare instructions (a nop in this position) and the
-  // real branch when we start and stop incremental heap marking.
-  // See RecordWriteStub::Patch for details.
-  __ b(&skip_to_incremental_noncompacting);
-  __ b(&skip_to_incremental_compacting);
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  }
-  __ Ret();
-
-  __ bind(&skip_to_incremental_noncompacting);
-  GenerateIncremental(masm, INCREMENTAL);
-
-  __ bind(&skip_to_incremental_compacting);
-  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
-  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
-  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
-  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
-  PatchBranchIntoNop(masm, 0);
-  PatchBranchIntoNop(masm, Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
-  regs_.Save(masm);
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    Label dont_need_remembered_set;
-
-    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
-    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
-                           regs_.scratch0(),
-                           &dont_need_remembered_set);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     ne,
-                     &dont_need_remembered_set);
-
-    // First notify the incremental marker if necessary, then update the
-    // remembered set.
-    CheckNeedsToInformIncrementalMarker(
-        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
-    InformIncrementalMarker(masm, mode);
-    regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-
-    __ bind(&dont_need_remembered_set);
-  }
-
-  CheckNeedsToInformIncrementalMarker(
-      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
-  InformIncrementalMarker(masm, mode);
-  regs_.Restore(masm);
-  __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-  int argument_count = 3;
-  __ PrepareCallCFunction(argument_count, regs_.scratch0());
-  Register address =
-      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
-  ASSERT(!address.is(regs_.object()));
-  ASSERT(!address.is(r0));
-  __ Move(address, regs_.address());
-  __ Move(r0, regs_.object());
-  if (mode == INCREMENTAL_COMPACTION) {
-    __ Move(r1, address);
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ ldr(r1, MemOperand(address, 0));
-  }
-  __ mov(r2, Operand(ExternalReference::isolate_address()));
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  if (mode == INCREMENTAL_COMPACTION) {
-    __ CallCFunction(
-        ExternalReference::incremental_evacuation_record_write_function(
-            masm->isolate()),
-        argument_count);
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ CallCFunction(
-        ExternalReference::incremental_marking_record_write_function(
-            masm->isolate()),
-        argument_count);
-  }
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
-    MacroAssembler* masm,
-    OnNoNeedToInformIncrementalMarker on_no_need,
-    Mode mode) {
-  Label on_black;
-  Label need_incremental;
-  Label need_incremental_pop_scratch;
-
-  // Let's look at the color of the object:  If it is not black we don't have
-  // to inform the incremental marker.
-  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ Ret();
-  }
-
-  __ bind(&on_black);
-
-  // Get the value from the slot.
-  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
-  if (mode == INCREMENTAL_COMPACTION) {
-    Label ensure_not_white;
-
-    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kEvacuationCandidateMask,
-                     eq,
-                     &ensure_not_white);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
-                     eq,
-                     &need_incremental);
-
-    __ bind(&ensure_not_white);
-  }
-
-  // We need extra registers for this, so we push the object and the address
-  // register temporarily.
-  __ Push(regs_.object(), regs_.address());
-  __ EnsureNotWhite(regs_.scratch0(),  // The value.
-                    regs_.scratch1(),  // Scratch.
-                    regs_.object(),  // Scratch.
-                    regs_.address(),  // Scratch.
-                    &need_incremental_pop_scratch);
-  __ Pop(regs_.object(), regs_.address());
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ Ret();
-  }
-
-  __ bind(&need_incremental_pop_scratch);
-  __ Pop(regs_.object(), regs_.address());
-
-  __ bind(&need_incremental);
-
-  // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0    : element value to store
-  //  -- r1    : array literal
-  //  -- r2    : map of array literal
-  //  -- r3    : element index as smi
-  //  -- r4    : array literal index in function as smi
-  // -----------------------------------
-
-  Label element_done;
-  Label double_elements;
-  Label smi_element;
-  Label slow_elements;
-  Label fast_elements;
-
-  __ CheckFastElements(r2, r5, &double_elements);
-  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
-  __ JumpIfSmi(r0, &smi_element);
-  __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
-
-  // Store into the array literal requires a elements transition. Call into
-  // the runtime.
-  __ bind(&slow_elements);
-  // call.
-  __ Push(r1, r3, r0);
-  __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
-  __ Push(r5, r4);
-  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
-  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
-  __ bind(&fast_elements);
-  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ str(r0, MemOperand(r6, 0));
-  // Update the write barrier for the array store.
-  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
-
-  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
-  // FAST_ELEMENTS, and value is Smi.
-  __ bind(&smi_element);
-  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
-  __ Ret();
-
-  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
-  __ bind(&double_elements);
-  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
-  __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
-                                 &slow_elements);
-  __ Ret();
-}
-
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 38ed476..cdea03e 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -58,25 +58,6 @@
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
- public:
-  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
-      : save_doubles_(save_fp) { }
-
-  void Generate(MacroAssembler* masm);
-
-  virtual bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -136,7 +117,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -235,7 +216,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -406,9 +387,6 @@
         the_heap_number_(the_heap_number),
         scratch_(scratch) { }
 
-  bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
-
  private:
   Register the_int_;
   Register the_heap_number_;
@@ -457,218 +435,6 @@
 };
 
 
-class RecordWriteStub: public CodeStub {
- public:
-  RecordWriteStub(Register object,
-                  Register value,
-                  Register address,
-                  RememberedSetAction remembered_set_action,
-                  SaveFPRegsMode fp_mode)
-      : object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
-        regs_(object,   // An input reg.
-              address,  // An input reg.
-              value) {  // One scratch reg.
-  }
-
-  enum Mode {
-    STORE_BUFFER_ONLY,
-    INCREMENTAL,
-    INCREMENTAL_COMPACTION
-  };
-
-  virtual bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
-  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
-    masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
-    ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
-  }
-
-  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
-    masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
-    ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
-  }
-
-  static Mode GetMode(Code* stub) {
-    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
-    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
-                                                   Assembler::kInstrSize);
-
-    if (Assembler::IsBranch(first_instruction)) {
-      return INCREMENTAL;
-    }
-
-    ASSERT(Assembler::IsTstImmediate(first_instruction));
-
-    if (Assembler::IsBranch(second_instruction)) {
-      return INCREMENTAL_COMPACTION;
-    }
-
-    ASSERT(Assembler::IsTstImmediate(second_instruction));
-
-    return STORE_BUFFER_ONLY;
-  }
-
-  static void Patch(Code* stub, Mode mode) {
-    MacroAssembler masm(NULL,
-                        stub->instruction_start(),
-                        stub->instruction_size());
-    switch (mode) {
-      case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
-               GetMode(stub) == INCREMENTAL_COMPACTION);
-        PatchBranchIntoNop(&masm, 0);
-        PatchBranchIntoNop(&masm, Assembler::kInstrSize);
-        break;
-      case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        PatchNopIntoBranch(&masm, 0);
-        break;
-      case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        PatchNopIntoBranch(&masm, Assembler::kInstrSize);
-        break;
-    }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
-  }
-
- private:
-  // This is a helper class for freeing up 3 scratch registers.  The input is
-  // two registers that must be preserved and one scratch register provided by
-  // the caller.
-  class RegisterAllocation {
-   public:
-    RegisterAllocation(Register object,
-                       Register address,
-                       Register scratch0)
-        : object_(object),
-          address_(address),
-          scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
-      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
-    }
-
-    void Save(MacroAssembler* masm) {
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
-      // We don't have to save scratch0_ because it was given to us as
-      // a scratch register.
-      masm->push(scratch1_);
-    }
-
-    void Restore(MacroAssembler* masm) {
-      masm->pop(scratch1_);
-    }
-
-    // If we have to call into C then we need to save and restore all caller-
-    // saved registers that were not already preserved.  The scratch registers
-    // will be restored by other means so we don't bother pushing them here.
-    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
-      masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
-      if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(VFP3);
-        masm->sub(sp,
-                  sp,
-                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
-        // Save all VFP registers except d0.
-        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
-          DwVfpRegister reg = DwVfpRegister::from_code(i);
-          masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
-        }
-      }
-    }
-
-    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
-                                           SaveFPRegsMode mode) {
-      if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(VFP3);
-        // Restore all VFP registers except d0.
-        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
-          DwVfpRegister reg = DwVfpRegister::from_code(i);
-          masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
-        }
-        masm->add(sp,
-                  sp,
-                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
-      }
-      masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
-    }
-
-    inline Register object() { return object_; }
-    inline Register address() { return address_; }
-    inline Register scratch0() { return scratch0_; }
-    inline Register scratch1() { return scratch1_; }
-
-   private:
-    Register object_;
-    Register address_;
-    Register scratch0_;
-    Register scratch1_;
-
-    Register GetRegThatIsNotOneOf(Register r1,
-                                  Register r2,
-                                  Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
-        Register candidate = Register::FromAllocationIndex(i);
-        if (candidate.is(r1)) continue;
-        if (candidate.is(r2)) continue;
-        if (candidate.is(r3)) continue;
-        return candidate;
-      }
-      UNREACHABLE();
-      return no_reg;
-    }
-    friend class RecordWriteStub;
-  };
-
-  enum OnNoNeedToInformIncrementalMarker {
-    kReturnOnNoNeedToInformIncrementalMarker,
-    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
-  };
-
-  void Generate(MacroAssembler* masm);
-  void GenerateIncremental(MacroAssembler* masm, Mode mode);
-  void CheckNeedsToInformIncrementalMarker(
-      MacroAssembler* masm,
-      OnNoNeedToInformIncrementalMarker on_no_need,
-      Mode mode);
-  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
-  void Activate(Code* code) {
-    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-  }
-
-  class ObjectBits: public BitField<int, 0, 4> {};
-  class ValueBits: public BitField<int, 4, 4> {};
-  class AddressBits: public BitField<int, 8, 4> {};
-  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
-  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
-  Label slow_;
-  RegisterAllocation regs_;
-};
-
-
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM.
@@ -856,13 +622,14 @@
 
   void Generate(MacroAssembler* masm);
 
-  static void GenerateNegativeLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register receiver,
-                                     Register properties,
-                                     Handle<String> name,
-                                     Register scratch0);
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register receiver,
+      Register properties,
+      String* name,
+      Register scratch0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -872,8 +639,6 @@
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -886,7 +651,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryLookup; }
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index befd8f2..bf748a9 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,409 +30,23 @@
 #if defined(V8_TARGET_ARCH_ARM)
 
 #include "codegen.h"
-#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm)
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
-  switch (type) {
-    case TranscendentalCache::SIN: return &sin;
-    case TranscendentalCache::COS: return &cos;
-    case TranscendentalCache::TAN: return &tan;
-    case TranscendentalCache::LOG: return &log;
-    default: UNIMPLEMENTED();
-  }
-  return NULL;
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
-  return &sqrt;
-}
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
-  masm->set_has_frame(true);
+  masm->EnterInternalFrame();
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
-  masm->set_has_frame(false);
+  masm->LeaveInternalFrame();
 }
 
 
-// -------------------------------------------------------------------------
-// Code generators
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : key
-  //  -- r2    : receiver
-  //  -- lr    : return address
-  //  -- r3    : target map, scratch for subsequent call
-  //  -- r4    : scratch (elements)
-  // -----------------------------------
-  // Set transitioned map.
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
-                      HeapObject::kMapOffset,
-                      r3,
-                      r9,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : key
-  //  -- r2    : receiver
-  //  -- lr    : return address
-  //  -- r3    : target map, scratch for subsequent call
-  //  -- r4    : scratch (elements)
-  // -----------------------------------
-  Label loop, entry, convert_hole, gc_required, only_change_map, done;
-  bool vfp3_supported = CpuFeatures::IsSupported(VFP3);
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
-  __ b(eq, &only_change_map);
-
-  __ push(lr);
-  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  // r4: source FixedArray
-  // r5: number of elements (smi-tagged)
-
-  // Allocate new FixedDoubleArray.
-  __ mov(lr, Operand(FixedDoubleArray::kHeaderSize));
-  __ add(lr, lr, Operand(r5, LSL, 2));
-  __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
-  // r6: destination FixedDoubleArray, not tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
-  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
-  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-  // Update receiver's map.
-
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
-                      HeapObject::kMapOffset,
-                      r3,
-                      r9,
-                      kLRHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ add(r3, r6, Operand(kHeapObjectTag));
-  __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ RecordWriteField(r2,
-                      JSObject::kElementsOffset,
-                      r3,
-                      r9,
-                      kLRHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Prepare for conversion loop.
-  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
-  __ add(r6, r7, Operand(r5, LSL, 2));
-  __ mov(r4, Operand(kHoleNanLower32));
-  __ mov(r5, Operand(kHoleNanUpper32));
-  // r3: begin of source FixedArray element fields, not tagged
-  // r4: kHoleNanLower32
-  // r5: kHoleNanUpper32
-  // r6: end of destination FixedDoubleArray, not tagged
-  // r7: begin of FixedDoubleArray element fields, not tagged
-  if (!vfp3_supported) __ Push(r1, r0);
-
-  __ b(&entry);
-
-  __ bind(&only_change_map);
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
-                      HeapObject::kMapOffset,
-                      r3,
-                      r9,
-                      kLRHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ b(&done);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ pop(lr);
-  __ b(fail);
-
-  // Convert and copy elements.
-  __ bind(&loop);
-  __ ldr(r9, MemOperand(r3, 4, PostIndex));
-  // r9: current element
-  __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
-
-  // Normal smi, convert to double and store.
-  if (vfp3_supported) {
-    CpuFeatures::Scope scope(VFP3);
-    __ vmov(s0, r9);
-    __ vcvt_f64_s32(d0, s0);
-    __ vstr(d0, r7, 0);
-    __ add(r7, r7, Operand(8));
-  } else {
-    FloatingPointHelper::ConvertIntToDouble(masm,
-                                            r9,
-                                            FloatingPointHelper::kCoreRegisters,
-                                            d0,
-                                            r0,
-                                            r1,
-                                            lr,
-                                            s0);
-    __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
-  }
-  __ b(&entry);
-
-  // Hole found, store the-hole NaN.
-  __ bind(&convert_hole);
-  if (FLAG_debug_code) {
-    // Restore a "smi-untagged" heap object.
-    __ SmiTag(r9);
-    __ orr(r9, r9, Operand(1));
-    __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, "object found in smi-only array");
-  }
-  __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
-
-  __ bind(&entry);
-  __ cmp(r7, r6);
-  __ b(lt, &loop);
-
-  if (!vfp3_supported) __ Pop(r1, r0);
-  __ pop(lr);
-  __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : key
-  //  -- r2    : receiver
-  //  -- lr    : return address
-  //  -- r3    : target map, scratch for subsequent call
-  //  -- r4    : scratch (elements)
-  // -----------------------------------
-  Label entry, loop, convert_hole, gc_required, only_change_map;
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
-  __ b(eq, &only_change_map);
-
-  __ push(lr);
-  __ Push(r3, r2, r1, r0);
-  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  // r4: source FixedDoubleArray
-  // r5: number of elements (smi-tagged)
-
-  // Allocate new FixedArray.
-  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
-  __ add(r0, r0, Operand(r5, LSL, 1));
-  __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
-  // r6: destination FixedArray, not tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
-  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
-  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
-
-  // Prepare for conversion loop.
-  __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
-  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
-  __ add(r6, r6, Operand(kHeapObjectTag));
-  __ add(r5, r3, Operand(r5, LSL, 1));
-  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
-  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
-  // r3: begin of destination FixedArray element fields, not tagged
-  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
-  // r5: end of destination FixedArray, not tagged
-  // r6: destination FixedArray
-  // r7: the-hole pointer
-  // r9: heap number map
-  __ b(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ Pop(r3, r2, r1, r0);
-  __ pop(lr);
-  __ b(fail);
-
-  __ bind(&loop);
-  __ ldr(r1, MemOperand(r4, 8, PostIndex));
-  // lr: current element's upper 32 bit
-  // r4: address of next element's upper 32 bit
-  __ cmp(r1, Operand(kHoleNanUpper32));
-  __ b(eq, &convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
-  // r2: new heap number
-  __ ldr(r0, MemOperand(r4, 12, NegOffset));
-  __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
-  __ mov(r0, r3);
-  __ str(r2, MemOperand(r3, 4, PostIndex));
-  __ RecordWrite(r6,
-                 r0,
-                 r2,
-                 kLRHasBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ b(&entry);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ str(r7, MemOperand(r3, 4, PostIndex));
-
-  __ bind(&entry);
-  __ cmp(r3, r5);
-  __ b(lt, &loop);
-
-  __ Pop(r3, r2, r1, r0);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ RecordWriteField(r2,
-                      JSObject::kElementsOffset,
-                      r6,
-                      r9,
-                      kLRHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(lr);
-
-  __ bind(&only_change_map);
-  // Update receiver's map.
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
-                      HeapObject::kMapOffset,
-                      r3,
-                      r9,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
-                                       Register string,
-                                       Register index,
-                                       Register result,
-                                       Label* call_runtime) {
-  // Fetch the instance type of the receiver into result register.
-  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ tst(result, Operand(kIsIndirectStringMask));
-  __ b(eq, &check_sequential);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ tst(result, Operand(kSlicedNotConsMask));
-  __ b(eq, &cons_string);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
-  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
-  __ add(index, index, Operand(result, ASR, kSmiTagSize));
-  __ jmp(&indirect_string_loaded);
-
-  // Handle cons strings.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
-  __ CompareRoot(result, Heap::kEmptyStringRootIndex);
-  __ b(ne, call_runtime);
-  // Get the first of the two strings and load its instance type.
-  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // Distinguish sequential and external strings. Only these two string
-  // representations can reach here (slices and flat cons strings have been
-  // reduced to the underlying sequential or external string).
-  Label external_string, check_encoding;
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ tst(result, Operand(kStringRepresentationMask));
-  __ b(ne, &external_string);
-
-  // Prepare sequential strings
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ add(string,
-         string,
-         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  __ jmp(&check_encoding);
-
-  // Handle external strings.
-  __ bind(&external_string);
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ tst(result, Operand(kIsIndirectStringMask));
-    __ Assert(eq, "external string expected, but not found");
-  }
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ tst(result, Operand(kShortExternalStringMask));
-  __ b(ne, call_runtime);
-  __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
-  Label ascii, done;
-  __ bind(&check_encoding);
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ tst(result, Operand(kStringEncodingMask));
-  __ b(ne, &ascii);
-  // Two-byte string.
-  __ ldrh(result, MemOperand(string, index, LSL, 1));
-  __ jmp(&done);
-  __ bind(&ascii);
-  // Ascii string.
-  __ ldrb(result, MemOperand(string, index));
-  __ bind(&done);
-}
-
-#undef __
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index c340e6b..d27982a 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -29,6 +29,7 @@
 #define V8_ARM_CODEGEN_ARM_H_
 
 #include "ast.h"
+#include "code-stubs-arm.h"
 #include "ic-inl.h"
 
 namespace v8 {
@@ -68,26 +69,21 @@
                               int pos,
                               bool right_here = false);
 
+  // Constants related to patching of inlined load/store.
+  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+    return FLAG_debug_code ? 32 : 13;
+  }
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
+  static int GetInlinedNamedStoreInstructionsAfterPatch() {
+    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+    return Isolate::Current()->inlined_write_barrier_size() + 4;
+  }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
 
-class StringCharLoadGenerator : public AllStatic {
- public:
-  // Generates the code for handling different string types and loading the
-  // indexed character into |result|.  We expect |index| as untagged input and
-  // |result| as untagged output.
-  static void Generate(MacroAssembler* masm,
-                       Register string,
-                       Register index,
-                       Register result,
-                       Label* call_runtime);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index e767001..823c6ff 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -87,21 +87,22 @@
 namespace internal {
 
 // Constant pool marker.
-const int kConstantPoolMarkerMask = 0xffe00000;
-const int kConstantPoolMarker = 0x0c000000;
-const int kConstantPoolLengthMask = 0x001ffff;
+static const int kConstantPoolMarkerMask = 0xffe00000;
+static const int kConstantPoolMarker = 0x0c000000;
+static const int kConstantPoolLengthMask = 0x001ffff;
 
 // Number of registers in normal ARM mode.
-const int kNumRegisters = 16;
+static const int kNumRegisters = 16;
 
 // VFP support.
-const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 16;
-const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
+static const int kNumVFPSingleRegisters = 32;
+static const int kNumVFPDoubleRegisters = 16;
+static const int kNumVFPRegisters =
+    kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
 
 // PC is register 15.
-const int kPCRegister = 15;
-const int kNoRegister = -1;
+static const int kPCRegister = 15;
+static const int kNoRegister = -1;
 
 // -----------------------------------------------------------------------------
 // Conditions.
@@ -370,9 +371,9 @@
   // stop
   kStopCode = 1 << 23
 };
-const uint32_t kStopCodeMask = kStopCode - 1;
-const uint32_t kMaxStopCode = kStopCode - 1;
-const int32_t  kDefaultStopCode = -1;
+static const uint32_t kStopCodeMask = kStopCode - 1;
+static const uint32_t kMaxStopCode = kStopCode - 1;
+static const int32_t  kDefaultStopCode = -1;
 
 
 // Type of VFP register. Determines register encoding.
@@ -390,17 +391,17 @@
 
 // This mask does not include the "inexact" or "input denormal" cumulative
 // exceptions flags, because we usually don't want to check for it.
-const uint32_t kVFPExceptionMask = 0xf;
-const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
-const uint32_t kVFPOverflowExceptionBit = 1 << 2;
-const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
-const uint32_t kVFPInexactExceptionBit = 1 << 4;
-const uint32_t kVFPFlushToZeroMask = 1 << 24;
+static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
+static const uint32_t kVFPInexactExceptionBit = 1 << 4;
+static const uint32_t kVFPFlushToZeroMask = 1 << 24;
 
-const uint32_t kVFPNConditionFlagBit = 1 << 31;
-const uint32_t kVFPZConditionFlagBit = 1 << 30;
-const uint32_t kVFPCConditionFlagBit = 1 << 29;
-const uint32_t kVFPVConditionFlagBit = 1 << 28;
+static const uint32_t kVFPNConditionFlagBit = 1 << 31;
+static const uint32_t kVFPZConditionFlagBit = 1 << 30;
+static const uint32_t kVFPCConditionFlagBit = 1 << 29;
+static const uint32_t kVFPVConditionFlagBit = 1 << 28;
 
 
 // VFP rounding modes. See ARM DDI 0406B Page A2-29.
@@ -417,7 +418,7 @@
   kRoundToZero = RZ
 };
 
-const uint32_t kVFPRoundingModeMask = 3 << 22;
+static const uint32_t kVFPRoundingModeMask = 3 << 22;
 
 enum CheckForInexactConversion {
   kCheckForInexactConversion,
@@ -573,13 +574,13 @@
   // The naming of these accessor corresponds to figure A3-1.
   //
   // Two kind of accessors are declared:
-  // - <Name>Field() will return the raw field, i.e. the field's bits at their
+  // - <Name>Field() will return the raw field, ie the field's bits at their
   //   original place in the instruction encoding.
-  //   e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
-  //   0xC0810002 ConditionField(instr) will return 0xC0000000.
+  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+  //   ConditionField(instr) will return 0xC0000000.
   // - <Name>Value() will return the field value, shifted back to bit 0.
-  //   e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
-  //   0xC0810002 ConditionField(instr) will return 0xC.
+  //   eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
+  //   ConditionField(instr) will return 0xC.
 
 
   // Generally applicable fields
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 7b08ed8..51cfeb6 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -41,7 +41,7 @@
 namespace v8 {
 namespace internal {
 
-void CPU::SetUp() {
+void CPU::Setup() {
   CpuFeatures::Probe();
 }
 
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 96139a2..07a2272 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -132,58 +132,56 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Store the registers containing live values on the expression stack to
-    // make sure that these are correctly updated during GC. Non object values
-    // are stored as a smi causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
-    if ((object_regs | non_object_regs) != 0) {
-      for (int i = 0; i < kNumJSCallerSaved; i++) {
-        int r = JSCallerSavedCode(i);
-        Register reg = { r };
-        if ((non_object_regs & (1 << r)) != 0) {
-          if (FLAG_debug_code) {
-            __ tst(reg, Operand(0xc0000000));
-            __ Assert(eq, "Unable to encode value as smi");
-          }
-          __ mov(reg, Operand(reg, LSL, kSmiTagSize));
+  // Store the registers containing live values on the expression stack to
+  // make sure that these are correctly updated during GC. Non object values
+  // are stored as a smi causing it to be untouched by GC.
+  ASSERT((object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((object_regs & non_object_regs) == 0);
+  if ((object_regs | non_object_regs) != 0) {
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ tst(reg, Operand(0xc0000000));
+          __ Assert(eq, "Unable to encode value as smi");
         }
+        __ mov(reg, Operand(reg, LSL, kSmiTagSize));
       }
-      __ stm(db_w, sp, object_regs | non_object_regs);
     }
+    __ stm(db_w, sp, object_regs | non_object_regs);
+  }
 
 #ifdef DEBUG
-    __ RecordComment("// Calling from debug break to runtime - come in - over");
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-    __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
-    __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+  __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
+  __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
 
-    CEntryStub ceb(1);
-    __ CallStub(&ceb);
+  CEntryStub ceb(1);
+  __ CallStub(&ceb);
 
-    // Restore the register values from the expression stack.
-    if ((object_regs | non_object_regs) != 0) {
-      __ ldm(ia_w, sp, object_regs | non_object_regs);
-      for (int i = 0; i < kNumJSCallerSaved; i++) {
-        int r = JSCallerSavedCode(i);
-        Register reg = { r };
-        if ((non_object_regs & (1 << r)) != 0) {
-          __ mov(reg, Operand(reg, LSR, kSmiTagSize));
-        }
-        if (FLAG_debug_code &&
-            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-          __ mov(reg, Operand(kDebugZapValue));
-        }
+  // Restore the register values from the expression stack.
+  if ((object_regs | non_object_regs) != 0) {
+    __ ldm(ia_w, sp, object_regs | non_object_regs);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+      }
+      if (FLAG_debug_code &&
+          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+        __ mov(reg, Operand(kDebugZapValue));
       }
     }
-
-    // Leave the internal frame.
   }
 
+  __ LeaveInternalFrame();
+
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
   // overwritten by the address of DebugBreakXXX.
@@ -251,6 +249,14 @@
 }
 
 
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  // Calling convention for construct call (from builtins-arm.cc)
+  //  -- r0     : number of arguments (not smi)
+  //  -- r1     : constructor function
+  Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
+}
+
+
 void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
   // In places other than IC call sites it is expected that r0 is TOS which
   // is an object - this is not generally the case so this should be used with
@@ -259,43 +265,11 @@
 }
 
 
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-arm.cc).
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- r1 : function
+  //  No registers used on entry.
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, r1.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-arm.cc).
-  // ----------- S t a t e -------------
-  //  -- r1 : function
-  //  -- r2 : cache cell for call target
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
-  // Calling convention for CallConstructStub (from code-stubs-arm.cc)
-  // ----------- S t a t e -------------
-  //  -- r0     : number of arguments (not smi)
-  //  -- r1     : constructor function
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
-  // Calling convention for CallConstructStub (from code-stubs-arm.cc)
-  // ----------- S t a t e -------------
-  //  -- r0     : number of arguments (not smi)
-  //  -- r1     : constructor function
-  //  -- r2     : cache cell for call target
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 7b2a3c4..d4f251f 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -79,24 +79,18 @@
     ASSERT(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
     ASSERT(call_address + patch_size() <= code->instruction_end());
+
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
   }
 
-  Isolate* isolate = code->GetIsolate();
-
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = isolate->deoptimizer_data();
+  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
-  // We might be in the middle of incremental marking with compaction.
-  // Tell collector to treat this code object in a special way and
-  // ignore all slots that might have been recorded on it.
-  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -108,12 +102,7 @@
 }
 
 
-static const int32_t kBranchBeforeStackCheck = 0x2a000001;
-static const int32_t kBranchBeforeInterrupt =  0x5a000004;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
-                                        Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   const int kInstrSize = Assembler::kInstrSize;
@@ -122,16 +111,10 @@
   //  2a 00 00 01       bcs ok
   //  e5 9f c? ??       ldr ip, [pc, <stack guard address>]
   //  e1 2f ff 3c       blx ip
-  ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
+  ASSERT(Memory::int32_at(pc_after - kInstrSize) ==
+      (al | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | ip.code()));
   ASSERT(Assembler::IsLdrPcImmediateOffset(
       Assembler::instr_at(pc_after - 2 * kInstrSize)));
-  if (FLAG_count_based_interrupts) {
-    ASSERT_EQ(kBranchBeforeInterrupt,
-              Memory::int32_at(pc_after - 3 * kInstrSize));
-  } else {
-    ASSERT_EQ(kBranchBeforeStackCheck,
-              Memory::int32_at(pc_after - 3 * kInstrSize));
-  }
 
   // We patch the code to the following form:
   //  e1 5d 00 0c       cmp sp, <limit>
@@ -154,32 +137,20 @@
          reinterpret_cast<uint32_t>(check_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(replacement_code->entry());
-
-  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
-                                         Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   const int kInstrSize = Assembler::kInstrSize;
-  ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
-  ASSERT(Assembler::IsLdrPcImmediateOffset(
-      Assembler::instr_at(pc_after - 2 * kInstrSize)));
+  ASSERT(Memory::uint32_at(pc_after - kInstrSize) == 0xe12fff3c);
+  ASSERT(Memory::uint8_at(pc_after - kInstrSize - 1) == 0xe5);
+  ASSERT(Memory::uint8_at(pc_after - kInstrSize - 2) == 0x9f);
 
   // Replace NOP with conditional jump.
   CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
-  if (FLAG_count_based_interrupts) {
-    patcher.masm()->b(+16, pl);
-    ASSERT_EQ(kBranchBeforeInterrupt,
-              Memory::int32_at(pc_after - 3 * kInstrSize));
-  } else {
-    patcher.masm()->b(+4, cs);
-    ASSERT_EQ(kBranchBeforeStackCheck,
-              Memory::int32_at(pc_after - 3 * kInstrSize));
-  }
+  patcher.masm()->b(+4, cs);
 
   // Replace the stack check address in the constant pool
   // with the entry address of the replacement code.
@@ -190,9 +161,6 @@
          reinterpret_cast<uint32_t>(replacement_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(check_code->entry());
-
-  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 2 * kInstrSize, check_code);
 }
 
 
@@ -229,13 +197,12 @@
   ASSERT(Translation::BEGIN == opcode);
   USE(opcode);
   int count = iterator.Next();
-  iterator.Skip(1);  // Drop JS frame count.
   ASSERT(count == 1);
   USE(count);
 
   opcode = static_cast<Translation::Opcode>(iterator.Next());
   USE(opcode);
-  ASSERT(Translation::JS_FRAME == opcode);
+  ASSERT(Translation::FRAME == opcode);
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
@@ -271,7 +238,9 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
-  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -336,7 +305,7 @@
     output_[0] = input_;
     output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
   } else {
-    // Set up the frame pointer and the context pointer.
+    // Setup the frame pointer and the context pointer.
     output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
     output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
 
@@ -359,220 +328,15 @@
 }
 
 
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
-                                                 int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
-  // Arguments adaptor can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  uint32_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // A marker value is used in place of the context.
-  output_offset -= kPointerSize;
-  intptr_t context = reinterpret_cast<intptr_t>(
-      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  output_frame->SetFrameSlot(output_offset, context);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
-           top_address + output_offset, output_offset, context);
-  }
-
-  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(function);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* adaptor_trampoline =
-      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
-  uint32_t pc = reinterpret_cast<uint32_t>(
-      adaptor_trampoline->instruction_start() +
-      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
-                                              int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = 7 * kPointerSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
-  // Construct stub can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  uint32_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // The context can be gotten from the previous frame.
-  output_offset -= kPointerSize;
-  value = output_[frame_index - 1]->GetContext();
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // A marker value is used in place of the function.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  // Constructor function being invoked by the stub.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(function);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // The newly allocated object was passed as receiver in the artificial
-  // constructor stub environment created by HEnvironment::CopyForInlining().
-  output_offset -= kPointerSize;
-  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
-  uint32_t pc = reinterpret_cast<uint32_t>(
-      construct_stub->instruction_start() +
-      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
-}
-
-
 // This code is very similar to ia32 code, but relies on register names (fp, sp)
 // and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
-                                   int frame_index) {
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
   // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
   int node_id = iterator->Next();
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
@@ -592,7 +356,9 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -680,8 +446,9 @@
     value = reinterpret_cast<intptr_t>(function->context());
   }
   output_frame->SetFrameSlot(output_offset, value);
-  output_frame->SetContext(value);
-  if (is_topmost) output_frame->SetRegister(cp.code(), value);
+  if (is_topmost) {
+    output_frame->SetRegister(cp.code(), value);
+  }
   if (FLAG_trace_deopt) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
@@ -833,10 +600,7 @@
   __ mov(r5, Operand(ExternalReference::isolate_address()));
   __ str(r5, MemOperand(sp, 1 * kPointerSize));  // Isolate.
   // Call Deoptimizer::New().
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
-  }
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
 
   // Preserve "deoptimizer" object in register r0 and get the input
   // frame descriptor pointer to r1 (deoptimizer->input_);
@@ -890,11 +654,8 @@
   // r0: deoptimizer object; r1: scratch.
   __ PrepareCallCFunction(1, r1);
   // Call Deoptimizer::ComputeOutputFrames().
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(
-        ExternalReference::compute_output_frames_function(isolate), 1);
-  }
+  __ CallCFunction(
+      ExternalReference::compute_output_frames_function(isolate), 1);
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
   // Replace the current (input) frame with the output frames.
@@ -910,6 +671,7 @@
   __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
   __ bind(&inner_push_loop);
   __ sub(r3, r3, Operand(sizeof(uint32_t)));
+  // __ add(r6, r2, Operand(r3, LSL, 1));
   __ add(r6, r2, Operand(r3));
   __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
   __ push(r7);
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 96a7d3c..603b3cf 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -473,7 +473,7 @@
       return 1;
     }
     case 'i': {  // 'i: immediate value from adjacent bits.
-      // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
+      // Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
       int width = (format[3] - '0') * 10 + (format[4] - '0');
       int lsb   = (format[6] - '0') * 10 + (format[7] - '0');
 
@@ -662,15 +662,6 @@
 }
 
 
-// The disassembler may end up decoding data inlined in the code. We do not want
-// it to crash if the data does not ressemble any known instruction.
-#define VERIFY(condition) \
-if(!(condition)) {        \
-  Unknown(instr);         \
-  return;                 \
-}
-
-
 // For currently unimplemented decodings the disassembler calls Unknown(instr)
 // which will just print "unknown" of the instruction bits.
 void Decoder::Unknown(Instruction* instr) {
@@ -956,13 +947,13 @@
 void Decoder::DecodeType3(Instruction* instr) {
   switch (instr->PUField()) {
     case da_x: {
-      VERIFY(!instr->HasW());
+      ASSERT(!instr->HasW());
       Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
       break;
     }
     case ia_x: {
       if (instr->HasW()) {
-        VERIFY(instr->Bits(5, 4) == 0x1);
+        ASSERT(instr->Bits(5, 4) == 0x1);
         if (instr->Bit(22) == 0x1) {
           Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
         } else {
@@ -1083,8 +1074,8 @@
 // vmsr
 // Dd = vsqrt(Dm)
 void Decoder::DecodeTypeVFP(Instruction* instr) {
-  VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
-  VERIFY(instr->Bits(11, 9) == 0x5);
+  ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+  ASSERT(instr->Bits(11, 9) == 0x5);
 
   if (instr->Bit(4) == 0) {
     if (instr->Opc1Value() == 0x7) {
@@ -1175,7 +1166,7 @@
 
 void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
     Instruction* instr) {
-  VERIFY((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+  ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
          (instr->VAValue() == 0x0));
 
   bool to_arm_register = (instr->VLValue() == 0x1);
@@ -1189,8 +1180,8 @@
 
 
 void Decoder::DecodeVCMP(Instruction* instr) {
-  VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  VERIFY(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
          (instr->Opc3Value() & 0x1));
 
   // Comparison.
@@ -1212,8 +1203,8 @@
 
 
 void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
-  VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  VERIFY((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
 
   bool double_to_single = (instr->SzValue() == 1);
 
@@ -1226,8 +1217,8 @@
 
 
 void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
-  VERIFY((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  VERIFY(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
          (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
 
   bool to_integer = (instr->Bit(18) == 1);
@@ -1274,7 +1265,7 @@
 // Ddst = MEM(Rbase + 4*offset).
 // MEM(Rbase + 4*offset) = Dsrc.
 void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
-  VERIFY(instr->TypeValue() == 6);
+  ASSERT(instr->TypeValue() == 6);
 
   if (instr->CoprocessorValue() == 0xA) {
     switch (instr->OpcodeValue()) {
@@ -1356,7 +1347,6 @@
   }
 }
 
-#undef VERIFIY
 
 bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
   int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index a10acd0..26bbd82 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,22 +35,22 @@
 // The ARM ABI does not specify the usage of register r9, which may be reserved
 // as the static base or thread register on some platforms, in which case we
 // leave it alone. Adjust the value of kR9Available accordingly:
-const int kR9Available = 1;  // 1 if available to us, 0 if reserved
+static const int kR9Available = 1;  // 1 if available to us, 0 if reserved
 
 
 // Register list in load/store instructions
 // Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 16;
+static const int kNumRegs = 16;
 
 
 // Caller-saved/arguments registers
-const RegList kJSCallerSaved =
+static const RegList kJSCallerSaved =
   1 << 0 |  // r0 a1
   1 << 1 |  // r1 a2
   1 << 2 |  // r2 a3
   1 << 3;   // r3 a4
 
-const int kNumJSCallerSaved = 4;
+static const int kNumJSCallerSaved = 4;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
@@ -60,7 +60,7 @@
 
 
 // Callee-saved registers preserved when switching from C to JavaScript
-const RegList kCalleeSaved =
+static const RegList kCalleeSaved =
   1 <<  4 |  //  r4 v1
   1 <<  5 |  //  r5 v2
   1 <<  6 |  //  r6 v3
@@ -70,45 +70,36 @@
   1 << 10 |  // r10 v7
   1 << 11;   // r11 v8 (fp in JavaScript code)
 
-// When calling into C++ (only for C++ calls that can't cause a GC).
-// The call code will take care of lr, fp, etc.
-const RegList kCallerSaved =
-  1 <<  0 |  // r0
-  1 <<  1 |  // r1
-  1 <<  2 |  // r2
-  1 <<  3 |  // r3
-  1 <<  9;   // r9
-
-
-const int kNumCalleeSaved = 7 + kR9Available;
+static const int kNumCalleeSaved = 7 + kR9Available;
 
 // Double registers d8 to d15 are callee-saved.
-const int kNumDoubleCalleeSaved = 8;
+static const int kNumDoubleCalleeSaved = 8;
 
 
 // Number of registers for which space is reserved in safepoints. Must be a
 // multiple of 8.
 // TODO(regis): Only 8 registers may actually be sufficient. Revisit.
-const int kNumSafepointRegisters = 16;
+static const int kNumSafepointRegisters = 16;
 
 // Define the list of registers actually saved at safepoints.
 // Note that the number of saved registers may be smaller than the reserved
 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
+    kNumJSCallerSaved + kNumCalleeSaved;
 
 // ----------------------------------------------------
 
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset     = 0 * kPointerSize;
-  static const int kCodeOffset     = 1 * kPointerSize;
-  static const int kStateOffset    = 2 * kPointerSize;
-  static const int kContextOffset  = 3 * kPointerSize;
-  static const int kFPOffset       = 4 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kStateOffset   = 1 * kPointerSize;
+  static const int kContextOffset = 2 * kPointerSize;
+  static const int kFPOffset      = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
-  static const int kSize = kFPOffset + kPointerSize;
+  static const int kSize = kPCOffset + kPointerSize;
 };
 
 
@@ -136,9 +127,6 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -164,8 +152,6 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 0cbd46e..50ed8b1 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,13 +34,11 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
-#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
 
 #include "arm/code-stubs-arm.h"
-#include "arm/macro-assembler-arm.h"
 
 namespace v8 {
 namespace internal {
@@ -48,6 +46,11 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
@@ -110,20 +113,13 @@
 };
 
 
-// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
-int FullCodeGenerator::self_optimization_header_size() {
-  UNREACHABLE();
-  return 24;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
 // function.
 //
 // The live registers are:
-//   o r1: the JS function object being called (i.e., ourselves)
+//   o r1: the JS function object being called (ie, ourselves)
 //   o cp: our context
 //   o fp: our caller's frame pointer
 //   o sp: stack pointer
@@ -131,12 +127,10 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-arm.h for its layout.
-void FullCodeGenerator::Generate() {
-  CompilationInfo* info = info_;
-  handler_table_ =
-      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
-      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  scope_ = info->scope();
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -151,7 +145,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). r5 is zero for method calls and non-zero for
   // function calls.
-  if (!info->is_classic_mode() || info->is_native()) {
+  if (info->is_strict_mode() || info->is_native()) {
     Label ok;
     __ cmp(r5, Operand(0));
     __ b(eq, &ok);
@@ -161,11 +155,6 @@
     __ bind(&ok);
   }
 
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done below).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(lr, fp, cp, r1);
@@ -211,12 +200,13 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        MemOperand target = ContextOperand(cp, var->index());
-        __ str(r0, target);
-
-        // Update the write barrier.
-        __ RecordWriteContextSlot(
-            cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+        __ mov(r1, Operand(Context::SlotOffset(var->index())));
+        __ str(r0, MemOperand(cp, r1));
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have to use two more registers to avoid
+        // clobbering cp.
+        __ mov(r2, Operand(cp));
+        __ RecordWrite(r2, Operand(r1), r3, r0);
       }
     }
   }
@@ -244,7 +234,7 @@
     // The stub will rewrite receiever and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (!is_classic_mode()) {
+    if (is_strict_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -273,11 +263,8 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -318,68 +305,19 @@
 }
 
 
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
-  __ mov(r2, Operand(profiling_counter_));
-  __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-  __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
-  __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
-  int reset_value = FLAG_interrupt_budget;
-  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
-    // Self-optimization is a one-off thing: if it fails, don't try again.
-    reset_value = Smi::kMaxValue;
-  }
-  if (isolate()->IsDebuggerActive()) {
-    // Detect debug break requests as soon as possible.
-    reset_value = 10;
-  }
-  __ mov(r2, Operand(profiling_counter_));
-  __ mov(r3, Operand(Smi::FromInt(reset_value)));
-  __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
-                                       Label* back_edge_target) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-
-  if (FLAG_count_based_interrupts) {
-    int weight = 1;
-    if (FLAG_weighted_back_edges) {
-      ASSERT(back_edge_target->is_bound());
-      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kBackEdgeDistanceDivisor));
-    }
-    EmitProfilingCounterDecrement(weight);
-    __ b(pl, &ok);
-    InterruptStub stub;
-    __ CallStub(&stub);
-  } else {
-    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-    __ cmp(sp, Operand(ip));
-    __ b(hs, &ok);
-    StackCheckStub stub;
-    __ CallStub(&stub);
-  }
-
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmp(sp, Operand(ip));
+  __ b(hs, &ok);
+  StackCheckStub stub;
+  __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
   RecordStackCheck(stmt->OsrEntryId());
 
-  if (FLAG_count_based_interrupts) {
-    EmitProfilingCounterReset();
-  }
-
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -401,32 +339,6 @@
       __ push(r0);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
-    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
-      // Pretend that the exit is a backwards jump to the entry.
-      int weight = 1;
-      if (info_->ShouldSelfOptimize()) {
-        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-      } else if (FLAG_weighted_back_edges) {
-        int distance = masm_->pc_offset();
-        weight = Min(kMaxBackEdgeWeight,
-                     Max(1, distance / kBackEdgeDistanceDivisor));
-      }
-      EmitProfilingCounterDecrement(weight);
-      Label ok;
-      __ b(pl, &ok);
-      __ push(r0);
-      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
-        __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-        __ push(r2);
-        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
-      } else {
-        InterruptStub stub;
-        __ CallStub(&stub);
-      }
-      __ pop(r0);
-      EmitProfilingCounterReset();
-      __ bind(&ok);
-    }
 
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
@@ -479,7 +391,7 @@
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -502,7 +414,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -537,7 +449,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -596,7 +508,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -663,7 +575,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -753,20 +665,17 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ str(src, location);
-
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
-    __ RecordWriteContextSlot(scratch0,
-                              location.offset(),
-                              src,
-                              scratch1,
-                              kLRHasBeenSaved,
-                              kDontSaveFPRegs);
+    __ RecordWrite(scratch0,
+                   Operand(Context::SlotOffset(var->index())),
+                   scratch1,
+                   src);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -777,7 +686,13 @@
 
   Label skip;
   if (should_normalize) __ b(&skip);
-  PrepareForBailout(expr, TOS_REG);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
   if (should_normalize) {
     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
     __ cmp(r0, ip);
@@ -788,17 +703,16 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function,
+                                        int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      ++(*global_count);
       break;
 
     case Variable::PARAMETER:
@@ -807,7 +721,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ str(result_register(), StackOperand(variable));
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, StackOperand(variable));
@@ -832,16 +746,10 @@
         __ str(result_register(), ContextOperand(cp, variable->index()));
         int offset = Context::SlotOffset(variable->index());
         // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(cp,
-                                  offset,
-                                  result_register(),
-                                  r2,
-                                  kLRHasBeenSaved,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
+        __ mov(r1, Operand(cp));
+        __ RecordWrite(r1, Operand(offset), r2, result_register());
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
         __ str(ip, ContextOperand(cp, variable->index()));
@@ -853,13 +761,11 @@
     case Variable::LOOKUP: {
       Comment cmnt(masm_, "[ Declaration");
       __ mov(r2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
-      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
-          ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
       __ mov(r1, Operand(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -869,7 +775,7 @@
         __ Push(cp, r2, r1);
         // Push initial value for function declaration.
         VisitForStackValue(function);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
         __ Push(cp, r2, r1, r0);
       } else {
@@ -883,6 +789,9 @@
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
@@ -944,7 +853,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
 
     __ cmp(r0, Operand(0));
@@ -997,8 +906,6 @@
   __ cmp(r0, null_value);
   __ b(eq, &exit);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(r0, &convert);
@@ -1010,17 +917,52 @@
   __ bind(&done_convert);
   __ push(r0);
 
-  // Check for proxies.
-  Label call_runtime;
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
-  __ b(le, &call_runtime);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  Label next, call_runtime;
+  // Preload a couple of values used in the loop.
+  Register  empty_fixed_array_value = r6;
+  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = r7;
+  __ LoadRoot(empty_descriptor_array_value,
+              Heap::kEmptyDescriptorArrayRootIndex);
+  __ mov(r1, r0);
+  __ bind(&next);
+
+  // Check that there are no elements.  Register r1 contains the
+  // current JS object we've reached through the prototype chain.
+  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
+  __ cmp(r2, empty_fixed_array_value);
+  __ b(ne, &call_runtime);
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in r2 for the subsequent
+  // prototype load.
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(r3, &call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (r3).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
+  __ JumpIfSmi(r3, &call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  __ cmp(r1, r0);
+  __ b(eq, &check_prototype);
+  __ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ cmp(r3, empty_fixed_array_value);
+  __ b(ne, &call_runtime);
+
+  // Load the prototype from the map and loop if non-null.
+  __ bind(&check_prototype);
+  __ ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
+  __ cmp(r1, null_value);
+  __ b(ne, &next);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1049,7 +991,7 @@
   __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
   __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Set up the four remaining stack slots.
+  // Setup the four remaining stack slots.
   __ push(r0);  // Map.
   __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
   __ mov(r0, Operand(Smi::FromInt(0)));
@@ -1058,32 +1000,14 @@
   __ jmp(&loop);
 
   // We got a fixed array in register r0. Iterate through that.
-  Label non_proxy;
   __ bind(&fixed_array);
-
-  Handle<JSGlobalPropertyCell> cell =
-      isolate()->factory()->NewJSGlobalPropertyCell(
-          Handle<Object>(
-              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
-  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
-  __ LoadHeapObject(r1, cell);
-  __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
-  __ str(r2, FieldMemOperand(r1, JSGlobalPropertyCell::kValueOffset));
-
-  __ mov(r1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
-  __ ldr(r2, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
-  __ b(gt, &non_proxy);
-  __ mov(r1, Operand(Smi::FromInt(0)));  // Zero indicates proxy
-  __ bind(&non_proxy);
-  __ Push(r1, r0);  // Smi and array
+  __ mov(r1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(r1, r0);
   __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
   __ mov(r0, Operand(Smi::FromInt(0)));
   __ Push(r1, r0);  // Fixed array length (as smi) and initial index.
 
   // Generate code for doing the condition check.
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   // Load the current count to r0, load the length to r1.
   __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
@@ -1095,23 +1019,18 @@
   __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
 
-  // Get the expected map from the stack or a smi in the
+  // Get the expected map from the stack or a zero map in the
   // permanent slow case into register r2.
   __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we may have to filter the key.
+  // If not, we have to filter the key.
   Label update_each;
   __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
   __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
   __ cmp(r4, Operand(r2));
   __ b(eq, &update_each);
 
-  // For proxies, no filtering is done.
-  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  __ cmp(r2, Operand(Smi::FromInt(0)));
-  __ b(eq, &update_each);
-
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1127,7 +1046,7 @@
   __ mov(result_register(), r3);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each());
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
   }
 
   // Generate code for the body of the loop.
@@ -1140,7 +1059,7 @@
   __ add(r0, r0, Operand(Smi::FromInt(1)));
   __ push(r0);
 
-  EmitStackCheck(stmt, &loop);
+  EmitStackCheck(stmt);
   __ b(&loop);
 
   // Remove the pointers stored on the stack.
@@ -1148,7 +1067,6 @@
   __ Drop(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1167,7 +1085,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->language_mode());
+    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ mov(r0, Operand(info));
     __ push(r0);
     __ CallStub(&stub);
@@ -1198,7 +1116,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
         __ tst(temp, temp);
@@ -1211,7 +1129,7 @@
     }
     // If no outer scope calls eval, we do not need to check more
     // context extensions.
-    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1242,7 +1160,7 @@
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallIC(ic, mode);
+  __ Call(ic, mode);
 }
 
 
@@ -1255,7 +1173,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
         __ tst(temp, temp);
@@ -1287,24 +1205,15 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == DYNAMIC_GLOBAL) {
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == CONST ||
-        local->mode() == CONST_HARMONY ||
-        local->mode() == LET) {
+    if (local->mode() == Variable::CONST) {
       __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-      if (local->mode() == CONST) {
-        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-      } else {  // LET || CONST_HARMONY
-        __ b(ne, done);
-        __ mov(r0, Operand(var->name()));
-        __ push(r0);
-        __ CallRuntime(Runtime::kThrowReferenceError, 1);
-      }
+      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
     }
     __ jmp(done);
   }
@@ -1326,7 +1235,7 @@
       __ ldr(r0, GlobalObjectOperand());
       __ mov(r2, Operand(var->name()));
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(r0);
       break;
     }
@@ -1337,64 +1246,24 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->binding_needs_init()) {
-        // var->scope() may be NULL when the proxy is located in eval code and
-        // refers to a potential outside binding. Currently those bindings are
-        // always looked up dynamically, i.e. in that case
-        //     var->location() == LOOKUP.
-        // always holds.
-        ASSERT(var->scope() != NULL);
-
-        // Check if the binding really needs an initialization check. The check
-        // can be skipped in the following situation: we have a LET or CONST
-        // binding in harmony mode, both the Variable and the VariableProxy have
-        // the same declaration scope (i.e. they are both in global code, in the
-        // same function or in the same eval code) and the VariableProxy is in
-        // the source physically located after the initializer of the variable.
-        //
-        // We cannot skip any initialization checks for CONST in non-harmony
-        // mode because const variables may be declared but never initialized:
-        //   if (false) { const x; }; var y = x;
-        //
-        // The condition on the declaration scopes is a conservative check for
-        // nested functions that access a binding and are called before the
-        // binding is initialized:
-        //   function() { f(); let x = 1; function f() { x = 2; } }
-        //
-        bool skip_init_check;
-        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
-          skip_init_check = false;
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        GetVar(r0, var);
+        __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+        if (var->mode() == Variable::LET) {
+          Label done;
+          __ b(ne, &done);
+          __ mov(r0, Operand(var->name()));
+          __ push(r0);
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          __ bind(&done);
         } else {
-          // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
-          skip_init_check = var->mode() != CONST &&
-              var->initializer_position() < proxy->position();
+          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
         }
-
-        if (!skip_init_check) {
-          // Let and const need a read barrier.
-          GetVar(r0, var);
-          __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
-            // Throw a reference error when using an uninitialized let/const
-            // binding in harmony mode.
-            Label done;
-            __ b(ne, &done);
-            __ mov(r0, Operand(var->name()));
-            __ push(r0);
-            __ CallRuntime(Runtime::kThrowReferenceError, 1);
-            __ bind(&done);
-          } else {
-            // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST);
-            __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-          }
-          context()->Plug(r0);
-          break;
-        }
+        context()->Plug(r0);
       }
-      context()->Plug(var);
       break;
     }
 
@@ -1466,23 +1335,12 @@
 }
 
 
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
-  if (expression == NULL) {
-    __ LoadRoot(r1, Heap::kNullValueRootIndex);
-    __ push(r1);
-  } else {
-    VisitForStackValue(expression);
-  }
-}
-
-
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ ldr(r3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r1, Operand(constant_properties));
+  __ mov(r1, Operand(expr->constant_properties()));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1491,15 +1349,10 @@
       : ObjectLiteral::kNoFlags;
   __ mov(r0, Operand(Smi::FromInt(flags)));
   __ Push(r3, r2, r1, r0);
-  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    __ CallStub(&stub);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1511,7 +1364,6 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
-  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1534,10 +1386,10 @@
             VisitForAccumulatorValue(value);
             __ mov(r2, Operand(key->handle()));
             __ ldr(r1, MemOperand(sp));
-            Handle<Code> ic = is_classic_mode()
-                ? isolate()->builtins()->StoreIC_Initialize()
-                : isolate()->builtins()->StoreIC_Initialize_Strict();
-            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            __ Call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1560,29 +1412,21 @@
         }
         break;
       case ObjectLiteral::Property::GETTER:
-        accessor_table.lookup(key)->second->getter = value;
-        break;
       case ObjectLiteral::Property::SETTER:
-        accessor_table.lookup(key)->second->setter = value;
+        // Duplicate receiver on stack.
+        __ ldr(r0, MemOperand(sp));
+        __ push(r0);
+        VisitForStackValue(key);
+        __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+                           Smi::FromInt(1) :
+                           Smi::FromInt(0)));
+        __ push(r1);
+        VisitForStackValue(value);
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
         break;
     }
   }
 
-  // Emit code to define accessors, using only a single call to the runtime for
-  // each pair of corresponding getters and setters.
-  for (AccessorTable::Iterator it = accessor_table.begin();
-       it != accessor_table.end();
-       ++it) {
-    __ ldr(r0, MemOperand(sp));  // Duplicate receiver.
-    __ push(r0);
-    VisitForStackValue(it->first);
-    EmitAccessor(it->second->getter);
-    EmitAccessor(it->second->setter);
-    __ mov(r0, Operand(Smi::FromInt(NONE)));
-    __ push(r0);
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
-  }
-
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ ldr(r0, MemOperand(sp));
@@ -1603,20 +1447,13 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
-  bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
-  __ mov(r1, Operand(constant_elements));
+  __ mov(r1, Operand(expr->constant_elements()));
   __ Push(r3, r2, r1);
-  if (has_fast_elements && constant_elements_values->map() ==
+  if (expr->constant_elements()->map() ==
       isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1628,13 +1465,8 @@
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
-           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-           FLAG_smi_only_arrays);
-    FastCloneShallowArrayStub::Mode mode = has_fast_elements
-      ? FastCloneShallowArrayStub::CLONE_ELEMENTS
-      : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-    FastCloneShallowArrayStub stub(mode, length);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
     __ CallStub(&stub);
   }
 
@@ -1657,23 +1489,15 @@
     }
     VisitForAccumulatorValue(subexpr);
 
-    if (constant_elements_kind == FAST_ELEMENTS) {
-      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-      __ ldr(r6, MemOperand(sp));  // Copy of array literal.
-      __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
-      __ str(result_register(), FieldMemOperand(r1, offset));
-      // Update the write barrier for the array store.
-      __ RecordWriteField(r1, offset, result_register(), r2,
-                          kLRHasBeenSaved, kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
-    } else {
-      __ ldr(r1, MemOperand(sp));  // Copy of array literal.
-      __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
-      __ mov(r3, Operand(Smi::FromInt(i)));
-      __ mov(r4, Operand(Smi::FromInt(expr->literal_index())));
-      StoreArrayLiteralElementStub stub;
-      __ CallStub(&stub);
-    }
+    // Store the subexpression value in the array's elements.
+    __ ldr(r1, MemOperand(sp));  // Copy of array literal.
+    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ str(result_register(), FieldMemOperand(r1, offset));
+
+    // Update the write barrier for the array store with r0 as the scratch
+    // register.
+    __ RecordWrite(r1, Operand(offset), r2, result_register());
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1805,7 +1629,7 @@
   __ mov(r2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1813,7 +1637,7 @@
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1840,7 +1664,7 @@
 
   __ bind(&stub_call);
   BinaryOpStub stub(op, mode);
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
 
@@ -1891,7 +1715,7 @@
       __ mov(ip, Operand(scratch1, ASR, 31));
       __ cmp(ip, Operand(scratch2));
       __ b(ne, &stub_call);
-      __ cmp(scratch1, Operand(0));
+      __ tst(scratch1, Operand(scratch1));
       __ mov(right, Operand(scratch1), LeaveCC, ne);
       __ b(ne, &done);
       __ add(scratch2, right, Operand(left), SetCC);
@@ -1923,13 +1747,13 @@
   __ pop(r1);
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1961,10 +1785,10 @@
       __ mov(r1, r0);
       __ pop(r0);  // Restore value.
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
-      CallIC(ic);
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ Call(ic);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1974,13 +1798,14 @@
       __ mov(r1, r0);
       __ pop(r2);
       __ pop(r0);  // Restore value.
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      CallIC(ic);
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ Call(ic);
       break;
     }
   }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(r0);
 }
 
@@ -1991,10 +1816,10 @@
     // Global var, const, or let.
     __ mov(r2, Operand(var->name()));
     __ ldr(r1, GlobalObjectOperand());
-    Handle<Code> ic = is_classic_mode()
-        ? isolate()->builtins()->StoreIC_Initialize()
-        : isolate()->builtins()->StoreIC_Initialize_Strict();
-    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -2019,12 +1844,12 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == LET && op != Token::INIT_LET) {
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(r0);  // Value.
       __ mov(r1, Operand(var->name()));
-      __ mov(r0, Operand(Smi::FromInt(language_mode())));
+      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
       __ Push(cp, r1, r0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
@@ -2044,14 +1869,12 @@
         // RecordWrite may destroy all its register arguments.
         __ mov(r3, result_register());
         int offset = Context::SlotOffset(var->index());
-        __ RecordWriteContextSlot(
-            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+        __ RecordWrite(r1, Operand(offset), r2, r3);
       }
     }
 
-  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
+  } else if (var->mode() != Variable::CONST) {
+    // Assignment to var or initializing assignment to let.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, r1);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -2064,15 +1887,13 @@
       __ str(r0, location);
       if (var->IsContextSlot()) {
         __ mov(r3, r0);
-        int offset = Context::SlotOffset(var->index());
-        __ RecordWriteContextSlot(
-            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+        __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(r0);  // Value.
       __ mov(r1, Operand(var->name()));
-      __ mov(r0, Operand(Smi::FromInt(language_mode())));
+      __ mov(r0, Operand(Smi::FromInt(strict_mode_flag())));
       __ Push(cp, r1, r0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
@@ -2109,10 +1930,10 @@
     __ pop(r1);
   }
 
-  Handle<Code> ic = is_classic_mode()
-      ? isolate()->builtins()->StoreIC_Initialize()
-      : isolate()->builtins()->StoreIC_Initialize_Strict();
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2155,10 +1976,10 @@
     __ pop(r2);
   }
 
-  Handle<Code> ic = is_classic_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2192,14 +2013,6 @@
   }
 }
 
-
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               RelocInfo::Mode rmode,
-                               unsigned ast_id) {
-  ic_total_count_++;
-  __ Call(code, rmode, ast_id);
-}
-
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2217,7 +2030,7 @@
   // Call the IC initialization code.
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  CallIC(ic, mode, expr->id());
+  __ Call(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2250,7 +2063,7 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2270,7 +2083,6 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
-  __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2279,7 +2091,8 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+                                                      int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
@@ -2288,20 +2101,22 @@
   }
   __ push(r1);
 
-  // Push the receiver of the enclosing function.
+  // Push the receiver of the enclosing function and do runtime call.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
   __ push(r1);
-  // Push the language mode.
-  __ mov(r1, Operand(Smi::FromInt(language_mode())));
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ mov(r1, Operand(Smi::FromInt(strict_mode)));
   __ push(r1);
 
-  // Push the start position of the scope the calls resides in.
-  __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
-  __ push(r1);
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
+                 : Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2335,11 +2150,28 @@
         VisitForStackValue(args->at(i));
       }
 
+      // If we know that eval can only be shadowed by eval-introduced
+      // variables we attempt to load the global eval function directly
+      // in generated code. If we succeed, there is no need to perform a
+      // context lookup in the runtime system.
+      Label done;
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
+        Label slow;
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
+        // Push the function and resolve eval.
+        __ push(r0);
+        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+        __ jmp(&done);
+        __ bind(&slow);
+      }
+
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(r1);
-      EmitResolvePossiblyDirectEval(arg_count);
+      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+      __ bind(&done);
 
       // The runtime call returns a pair of values in r0 (function) and
       // r1 (receiver). Touch up the stack with the right values.
@@ -2350,7 +2182,6 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
-    __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2457,29 +2288,14 @@
   __ mov(r0, Operand(arg_count));
   __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
 
-  // Record call targets in unoptimized code, but not in the snapshot.
-  CallFunctionFlags flags;
-  if (!Serializer::enabled()) {
-    flags = RECORD_CALL_TARGET;
-    Handle<Object> uninitialized =
-        TypeFeedbackCells::UninitializedSentinel(isolate());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
-    RecordTypeFeedbackCell(expr->id(), cell);
-    __ mov(r2, Operand(cell));
-  } else {
-    flags = NO_CALL_FUNCTION_FLAGS;
-  }
-
-  CallConstructStub stub(flags);
-  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+  Handle<Code> construct_builtin =
+      isolate()->builtins()->JSConstructCall();
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2491,7 +2307,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2499,8 +2315,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2512,7 +2327,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ tst(r0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, if_true, if_false, fall_through);
 
@@ -2520,8 +2335,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2546,15 +2360,14 @@
   __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ b(lt, if_false);
   __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(le, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2568,15 +2381,14 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ge, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2592,7 +2404,7 @@
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
   __ tst(r1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ne, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2600,8 +2412,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+    ZoneList<Expression*>* args) {
+
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2680,13 +2492,12 @@
   __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2700,15 +2511,14 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2722,15 +2532,14 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2744,7 +2553,7 @@
 
   __ JumpIfSmi(r0, if_false);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2752,8 +2561,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2776,15 +2585,14 @@
   __ bind(&check_frame_marker);
   __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
   __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2800,15 +2608,14 @@
 
   __ pop(r1);
   __ cmp(r0, r1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2822,8 +2629,9 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
   Label exit;
   // Get the number of formal parameters.
   __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2843,8 +2651,7 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2855,24 +2662,20 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  // Assume that there are only two callable types, and one of them is at
-  // either end of the type range for JS object types. Saves extra comparisons.
-  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
   // Map is now in r0.
   __ b(lt, &null);
-  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                FIRST_SPEC_OBJECT_TYPE + 1);
-  __ b(eq, &function);
 
-  __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_SPEC_OBJECT_TYPE - 1);
-  __ b(eq, &function);
-  // Assume that there is no larger type.
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+  __ b(ge, &function);
 
-  // Check if the constructor in the map is a JS function.
+  // Check if the constructor in the map is a function.
   __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
   __ b(ne, &non_function_constructor);
@@ -2904,7 +2707,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2912,7 +2715,6 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2926,8 +2728,9 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2947,8 +2750,7 @@
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   if (CpuFeatures::IsSupported(VFP3)) {
     __ PrepareCallCFunction(1, r0);
-    __ ldr(r0, ContextOperand(context_register(), Context::GLOBAL_INDEX));
-    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+    __ mov(r0, Operand(ExternalReference::isolate_address()));
     __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
     CpuFeatures::Scope scope(VFP3);
@@ -2968,9 +2770,8 @@
     __ mov(r0, r4);
   } else {
     __ PrepareCallCFunction(2, r0);
-    __ ldr(r1, ContextOperand(context_register(), Context::GLOBAL_INDEX));
     __ mov(r0, Operand(r4));
-    __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
+    __ mov(r1, Operand(ExternalReference::isolate_address()));
     __ CallCFunction(
         ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
@@ -2979,10 +2780,9 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2992,10 +2792,9 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3006,9 +2805,9 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
+
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
@@ -3024,69 +2823,20 @@
 }
 
 
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
-  VisitForAccumulatorValue(args->at(0));  // Load the object.
-
-  Label runtime, done;
-  Register object = r0;
-  Register result = r0;
-  Register scratch0 = r9;
-  Register scratch1 = r1;
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
-  __ Assert(eq, "Trying to get date field from non-date.");
-#endif
-
-  if (index->value() == 0) {
-    __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ mov(scratch1, Operand(stamp));
-      __ ldr(scratch1, MemOperand(scratch1));
-      __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
-      __ cmp(scratch1, scratch0);
-      __ b(ne, &runtime);
-      __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
-                                             kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch1);
-    __ mov(r1, Operand(index));
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ bind(&done);
-  }
-  context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  if (CpuFeatures::IsSupported(VFP3)) {
-    MathPowStub stub(MathPowStub::ON_STACK);
-    __ CallStub(&stub);
-  } else {
-    __ CallRuntime(Runtime::kMath_pow, 2);
-  }
+  MathPowStub stub;
+  __ CallStub(&stub);
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
+
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(r1);  // r0 = value. r1 = object.
@@ -3103,18 +2853,16 @@
   __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
-  __ mov(r2, r0);
-  __ RecordWriteField(
-      r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
 
   __ bind(&done);
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
+
   // Load the argument on the stack and call the stub.
   VisitForStackValue(args->at(0));
 
@@ -3124,9 +2872,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
+
   VisitForAccumulatorValue(args->at(0));
 
   Label done;
@@ -3142,14 +2890,15 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
+
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
+  Register scratch = r2;
   Register result = r3;
 
   __ pop(object);
@@ -3159,6 +2908,7 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
+                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -3187,15 +2937,16 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
+
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
   Register object = r1;
   Register index = r0;
-  Register scratch = r3;
+  Register scratch1 = r2;
+  Register scratch2 = r3;
   Register result = r0;
 
   __ pop(object);
@@ -3205,7 +2956,8 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch,
+                                  scratch1,
+                                  scratch2,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -3234,9 +2986,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
+
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3246,9 +2998,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
+
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3258,11 +3010,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3270,11 +3021,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3282,23 +3032,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
-  // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
-  VisitForStackValue(args->at(0));
-  __ CallStub(&stub);
-  context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3306,9 +3043,8 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3316,8 +3052,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3326,31 +3061,18 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
-  // Check for proxy.
-  Label proxy, done;
-  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_PROXY_TYPE);
-  __ b(eq, &proxy);
-
   // InvokeFunction requires the function in r1. Move it in there.
   __ mov(r1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(r1, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ jmp(&done);
-
-  __ bind(&proxy);
-  __ push(r0);
-  __ CallRuntime(Runtime::kCall, args->length());
-  __ bind(&done);
-
   context()->Plug(r0);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
   RegExpConstructResultStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3360,8 +3082,7 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3420,31 +3141,16 @@
   __ str(scratch1, MemOperand(index2, 0));
   __ str(scratch2, MemOperand(index1, 0));
 
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   scratch1,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   ne,
-                   &no_remembered_set);
+  Label new_space;
+  __ InNewSpace(elements, scratch1, eq, &new_space);
   // Possible optimization: do a check that both values are Smis
   // (or them and test against Smi mask.)
 
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index1,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index2,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
+  __ mov(scratch1, elements);
+  __ RecordWriteHelper(elements, index1, scratch2);
+  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
 
-  __ bind(&no_remembered_set);
+  __ bind(&new_space);
   // We are done. Drop elements from the stack, and return undefined.
   __ Drop(3);
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3458,9 +3164,9 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
+
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
 
@@ -3509,8 +3215,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   Register right = r0;
@@ -3550,8 +3255,7 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3563,15 +3267,14 @@
 
   __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
   __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3586,12 +3289,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-  ZoneList<Expression*>* args = expr->arguments();
+
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3754,7 +3457,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its ascii character value.
   __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator
@@ -3765,7 +3468,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ASCII char (in lower byte).
+  //   separator: Single separator ascii char (in lower byte).
 
   // Copy the separator character to the result.
   __ strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -3847,7 +3550,7 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    CallIC(ic, mode, expr->id());
+    __ Call(ic, mode, expr->id());
     // Restore context register.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3868,9 +3571,7 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
-            ? kNonStrictMode : kStrictMode;
-        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag)));
+        __ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
         __ push(r1);
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(r0);
@@ -3878,7 +3579,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
         if (var->IsUnallocated()) {
           __ ldr(r2, GlobalObjectOperand());
           __ mov(r1, Operand(var->name()));
@@ -3921,35 +3622,18 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
-      } else if (context()->IsTest()) {
-        const TestContext* test = TestContext::cast(context());
-        // The labels are swapped for the recursive call.
-        VisitForControl(expr->expression(),
-                        test->false_label(),
-                        test->true_label(),
-                        test->fall_through());
-        context()->Plug(test->true_label(), test->false_label());
       } else {
-        // We handle value contexts explicitly rather than simply visiting
-        // for control and plugging the control flow into the context,
-        // because we need to prepare a pair of extra administrative AST ids
-        // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
-        Label materialize_true, materialize_false, done;
-        VisitForControl(expr->expression(),
-                        &materialize_false,
-                        &materialize_true,
-                        &materialize_true);
-        __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
-        __ LoadRoot(r0, Heap::kTrueValueRootIndex);
-        if (context()->IsStackValue()) __ push(r0);
-        __ jmp(&done);
-        __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
-        __ LoadRoot(r0, Heap::kFalseValueRootIndex);
-        if (context()->IsStackValue()) __ push(r0);
-        __ bind(&done);
+        Label materialize_true, materialize_false;
+        Label* if_true = NULL;
+        Label* if_false = NULL;
+        Label* fall_through = NULL;
+
+        // Notice that the labels are swapped.
+        context()->PrepareTest(&materialize_true, &materialize_false,
+                               &if_false, &if_true, &fall_through);
+        if (context()->IsTest()) ForwardBailoutToChild(expr);
+        VisitForControl(expr->expression(), if_true, if_false, fall_through);
+        context()->Plug(if_false, if_true);  // Labels swapped.
       }
       break;
     }
@@ -4002,7 +3686,7 @@
   // accumulator register r0.
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(r0);
 }
 
@@ -4113,7 +3797,7 @@
   SetSourcePosition(expr->position());
 
   BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4142,10 +3826,10 @@
     case NAMED_PROPERTY: {
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       __ pop(r1);
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4159,10 +3843,10 @@
     case KEYED_PROPERTY: {
       __ pop(r1);  // Key.
       __ pop(r2);  // Receiver.
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4188,7 +3872,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    CallIC(ic);
+    __ Call(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(r0);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4208,25 +3892,20 @@
     context()->Plug(r0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInDuplicateContext(expr);
+    VisitInCurrentContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Expression* sub_expr,
-                                                 Handle<String> check) {
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(sub_expr);
+    VisitForTypeofValue(expr);
   }
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(r0, if_true);
@@ -4263,11 +3942,9 @@
 
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(r0, if_false);
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
-    __ b(eq, if_true);
-    __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
-    Split(eq, if_true, if_false, fall_through);
+    __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+    Split(ge, if_true, if_false, fall_through);
+
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(r0, if_false);
     if (!FLAG_harmony_typeof) {
@@ -4286,7 +3963,18 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+  Split(eq, if_true, if_false, fall_through);
 }
 
 
@@ -4294,12 +3982,9 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr)) return;
-
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
+
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4307,13 +3992,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+    context()->Plug(if_true, if_false);
+    return;
+  }
+
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ LoadRoot(ip, Heap::kTrueValueRootIndex);
       __ cmp(r0, ip);
       Split(eq, if_true, if_false, fall_through);
@@ -4323,7 +4015,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       // The stub returns 0 for true.
       __ tst(r0, r0);
       Split(eq, if_true, if_false, fall_through);
@@ -4337,25 +4029,33 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cond = eq;
+          __ pop(r1);
           break;
         case Token::LT:
           cond = lt;
+          __ pop(r1);
           break;
         case Token::GT:
-          cond = gt;
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cond = lt;
+          __ mov(r1, result_register());
+          __ pop(r0);
          break;
         case Token::LTE:
-          cond = le;
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cond = ge;
+          __ mov(r1, result_register());
+          __ pop(r0);
           break;
         case Token::GTE:
           cond = ge;
+          __ pop(r1);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
-      __ pop(r1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4371,9 +4071,9 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ cmp(r0, Operand(0));
       Split(cond, if_true, if_false, fall_through);
     }
@@ -4385,9 +4085,8 @@
 }
 
 
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
-                                              Expression* sub_expr,
-                                              NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+  Comment cmnt(masm_, "[ CompareToNull");
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4395,21 +4094,15 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Heap::RootListIndex nil_value = nil == kNullValue ?
-      Heap::kNullValueRootIndex :
-      Heap::kUndefinedValueRootIndex;
-  __ LoadRoot(r1, nil_value);
+  VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ LoadRoot(r1, Heap::kNullValueRootIndex);
   __ cmp(r0, r1);
-  if (expr->op() == Token::EQ_STRICT) {
+  if (expr->is_strict()) {
     Split(eq, if_true, if_false, fall_through);
   } else {
-    Heap::RootListIndex other_nil_value = nil == kNullValue ?
-        Heap::kUndefinedValueRootIndex :
-        Heap::kNullValueRootIndex;
     __ b(eq, if_true);
-    __ LoadRoot(r1, other_nil_value);
+    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
     __ cmp(r0, r1);
     __ b(eq, if_true);
     __ JumpIfSmi(r0, if_false);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index e843657..2e49cae 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -208,8 +208,7 @@
 
   // Update the write barrier. Make sure not to clobber the value.
   __ mov(scratch1, value);
-  __ RecordWrite(
-      elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(elements, scratch2, scratch1);
 }
 
 
@@ -382,10 +381,10 @@
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                               int argc,
-                                               Code::Kind kind,
-                                               Code::ExtraICState extra_state) {
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r1    : receiver
   //  -- r2    : name
@@ -395,11 +394,11 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_state,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r1, r2, r3, r4, r5, r6);
+      masm, flags, r1, r2, r3, r4, r5);
 
   // If the stub cache probing failed, the receiver might be a value.
   // For value objects, we use the map of the prototype objects for
@@ -438,7 +437,7 @@
   // Probe the stub cache for the value object.
   __ bind(&probe);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r1, r2, r3, r4, r5, r6);
+      masm, flags, r1, r2, r3, r4, r5);
 
   __ bind(&miss);
 }
@@ -464,7 +463,7 @@
 }
 
 
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -486,10 +485,10 @@
 }
 
 
-void CallICBase::GenerateMiss(MacroAssembler* masm,
-                              int argc,
-                              IC::UtilityId id,
-                              Code::ExtraICState extra_state) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -505,22 +504,21 @@
   // Get the receiver of the function from the stack.
   __ ldr(r3, MemOperand(sp, argc * kPointerSize));
 
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Push the receiver and the name of the function.
-    __ Push(r3, r2);
+  // Push the receiver and the name of the function.
+  __ Push(r3, r2);
 
-    // Call the entry.
-    __ mov(r0, Operand(2));
-    __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+  // Call the entry.
+  __ mov(r0, Operand(2));
+  __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
 
-    CEntryStub stub(1);
-    __ CallStub(&stub);
+  CEntryStub stub(1);
+  __ CallStub(&stub);
 
-    // Move result to r1 and leave the internal frame.
-    __ mov(r1, Operand(r0));
-  }
+  // Move result to r1 and leave the internal frame.
+  __ mov(r1, Operand(r0));
+  __ LeaveInternalFrame();
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -541,7 +539,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -553,6 +551,18 @@
 }
 
 
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+}
+
+
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
                                  Code::ExtraICState extra_ic_state) {
@@ -568,6 +578,27 @@
 }
 
 
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
+}
+
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- r2    : name
@@ -619,13 +650,12 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(r2);  // save the key
-    __ Push(r1, r2);  // pass the receiver and the key
-    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-    __ pop(r2);  // restore the key
-  }
+  __ EnterInternalFrame();
+  __ push(r2);  // save the key
+  __ Push(r1, r2);  // pass the receiver and the key
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(r2);  // restore the key
+  __ LeaveInternalFrame();
   __ mov(r1, r0);
   __ jmp(&do_call);
 
@@ -685,7 +715,7 @@
   __ JumpIfSmi(r2, &miss);
   __ IsObjectJSStringType(r2, r0, &miss);
 
-  CallICBase::GenerateNormal(masm, argc);
+  GenerateCallNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
@@ -706,7 +736,7 @@
   Code::Flags flags =
       Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r0, r2, r3, r4, r5, r6);
+      masm, flags, r0, r2, r3, r4, r5);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -878,8 +908,7 @@
       GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
   __ str(r0, mapped_location);
   __ add(r6, r3, r5);
-  __ mov(r9, r0);
-  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(r3, r6, r9);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in r3.
@@ -887,8 +916,7 @@
       GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
   __ str(r0, unmapped_location);
   __ add(r6, r3, r4);
-  __ mov(r9, r0);
-  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(r3, r6, r9);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -1031,34 +1059,15 @@
   __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
   __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
   __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
-  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
-  __ And(r3, r3, Operand(mask));
+  __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys =
       ExternalReference::keyed_lookup_cache_keys(isolate);
-
   __ mov(r4, Operand(cache_keys));
   __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    // Load map and move r4 to next entry.
-    __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
-    __ cmp(r2, r5);
-    __ b(ne, &try_next_entry);
-    __ ldr(r5, MemOperand(r4, -kPointerSize));  // Load symbol
-    __ cmp(r0, r5);
-    __ b(eq, &hit_on_nth_entry[i]);
-    __ bind(&try_next_entry);
-  }
-
-  // Last entry: Load map and move r4 to symbol.
-  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
+  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));  // Move r4 to symbol.
   __ cmp(r2, r5);
   __ b(ne, &slow);
   __ ldr(r5, MemOperand(r4));
@@ -1072,25 +1081,13 @@
   // r3     : lookup cache index
   ExternalReference cache_field_offsets =
       ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    __ mov(r4, Operand(cache_field_offsets));
-    if (i != 0) {
-      __ add(r3, r3, Operand(i));
-    }
-    __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
-    __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
-    __ sub(r5, r5, r6, SetCC);
-    __ b(ge, &property_array_property);
-    if (i != 0) {
-      __ jmp(&load_in_object_property);
-    }
-  }
+  __ mov(r4, Operand(cache_field_offsets));
+  __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+  __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
+  __ sub(r5, r5, r6, SetCC);
+  __ b(ge, &property_array_property);
 
   // Load in-object property.
-  __ bind(&load_in_object_property);
   __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
   __ add(r6, r6, r5);  // Index from start of object.
   __ sub(r1, r1, Operand(kHeapObjectTag));  // Remove the heap tag.
@@ -1140,12 +1137,14 @@
 
   Register receiver = r1;
   Register index = r0;
-  Register scratch = r3;
+  Register scratch1 = r2;
+  Register scratch2 = r3;
   Register result = r0;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch,
+                                          scratch1,
+                                          scratch2,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1240,47 +1239,6 @@
 }
 
 
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r2     : receiver
-  //  -- r3     : target map
-  //  -- lr     : return address
-  // -----------------------------------
-  // Must return the modified receiver in r0.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
-    __ mov(r0, r2);
-    __ Ret();
-    __ bind(&fail);
-  }
-
-  __ push(r2);
-  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
-    MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r2     : receiver
-  //  -- r3     : target map
-  //  -- lr     : return address
-  // -----------------------------------
-  // Must return the modified receiver in r0.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
-    __ mov(r0, r2);
-    __ Ret();
-    __ bind(&fail);
-  }
-
-  __ push(r2);
-  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
                                               StrictModeFlag strict_mode) {
   // ---------- S t a t e --------------
@@ -1309,19 +1267,13 @@
   //  -- r2     : receiver
   //  -- lr     : return address
   // -----------------------------------
-  Label slow, array, extra, check_if_double_array;
-  Label fast_object_with_map_check, fast_object_without_map_check;
-  Label fast_double_with_map_check, fast_double_without_map_check;
-  Label transition_smi_elements, finish_object_store, non_double_value;
-  Label transition_double_elements;
+  Label slow, fast, array, extra;
 
   // Register usage.
   Register value = r0;
   Register key = r1;
   Register receiver = r2;
-  Register receiver_map = r3;
-  Register elements_map = r6;
-  Register elements = r7;  // Elements array of the receiver.
+  Register elements = r3;  // Elements array of the receiver.
   // r4 and r5 are used as general scratch registers.
 
   // Check that the key is a smi.
@@ -1329,26 +1281,35 @@
   // Check that the object isn't a smi.
   __ JumpIfSmi(receiver, &slow);
   // Get the map of the object.
-  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
   __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &slow);
   // Check if the object is a JS array or not.
-  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   __ cmp(r4, Operand(JS_ARRAY_TYPE));
   __ b(eq, &array);
   // Check that the object is some kind of JSObject.
-  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+  __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
   __ b(lt, &slow);
+  __ cmp(r4, Operand(JS_PROXY_TYPE));
+  __ b(eq, &slow);
+  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ b(eq, &slow);
 
   // Object case: Check key against length in the elements array.
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check that the object is in fast mode and writable.
+  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r4, ip);
+  __ b(ne, &slow);
   // Check array bounds. Both the key and the length of FixedArray are smis.
   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ cmp(key, Operand(ip));
-  __ b(lo, &fast_object_with_map_check);
+  __ b(lo, &fast);
 
   // Slow case, handle jump to runtime.
   __ bind(&slow);
@@ -1369,31 +1330,21 @@
   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ cmp(key, Operand(ip));
   __ b(hs, &slow);
-  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ b(ne, &check_if_double_array);
   // Calculate key + 1 as smi.
   STATIC_ASSERT(kSmiTag == 0);
   __ add(r4, key, Operand(Smi::FromInt(1)));
   __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ b(&fast_object_without_map_check);
-
-  __ bind(&check_if_double_array);
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ b(ne, &slow);
-  // Add 1 to key, and go to common element store code for doubles.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ add(r4, key, Operand(Smi::FromInt(1)));
-  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ jmp(&fast_double_without_map_check);
+  __ b(&fast);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
   // is the length is always a smi.
   __ bind(&array);
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r4, ip);
+  __ b(ne, &slow);
 
   // Check the key against the length in the array.
   __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1401,104 +1352,19 @@
   __ b(hs, &extra);
   // Fall through to fast case.
 
-  __ bind(&fast_object_with_map_check);
-  Register scratch_value = r4;
-  Register address = r5;
-  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ b(ne, &fast_double_with_map_check);
-  __ bind(&fast_object_without_map_check);
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ str(value, MemOperand(address));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch_value,
-                             &transition_smi_elements);
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ str(value, MemOperand(address));
+  __ bind(&fast);
+  // Fast case, store the value to the elements backing store.
+  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(r5));
+  // Skip write barrier if the written value is a smi.
+  __ tst(value, Operand(kSmiTagMask));
+  __ Ret(eq);
   // Update write barrier for the elements array address.
-  __ mov(scratch_value, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements,
-                 address,
-                 scratch_value,
-                 kLRHasNotBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
+  __ sub(r4, r5, Operand(elements));
+  __ RecordWrite(elements, Operand(r4), r5, r6);
+
   __ Ret();
-
-  __ bind(&fast_double_with_map_check);
-  // Check for fast double array case. If this fails, call through to the
-  // runtime.
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ b(ne, &slow);
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value,
-                                 key,
-                                 receiver,
-                                 elements,
-                                 r3,
-                                 r4,
-                                 r5,
-                                 r6,
-                                 &transition_double_elements);
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
-  __ b(ne, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         receiver_map,
-                                         r4,
-                                         &slow);
-  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
-  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         r4,
-                                         &slow);
-  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
-  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         r4,
-                                         &slow);
-  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
 }
 
 
@@ -1516,7 +1382,7 @@
       Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
 
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, r1, r2, r3, r4, r5, r6);
+      masm, flags, r1, r2, r3, r4, r5);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -1548,10 +1414,11 @@
   //  -- lr    : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSArray::SetElementsLength accepts
-  // (currently anything except for external arrays which means anything with
-  // elements of FixedArray type).  Value must be a number, but only smis are
-  // accepted as the most common case.
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
 
   Label miss;
 
@@ -1573,13 +1440,6 @@
   __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
   __ b(ne, &miss);
 
-  // Check that the array has fast properties, otherwise the length
-  // property might have been redefined.
-  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
-  __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
-  __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
-  __ b(eq, &miss);
-
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1650,9 +1510,11 @@
     case Token::LT:
       return lt;
     case Token::GT:
-      return gt;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return lt;
     case Token::LTE:
-      return le;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return ge;
     case Token::GTE:
       return ge;
     default:
@@ -1672,9 +1534,6 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
-    if (state == KNOWN_OBJECTS) {
-      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
-    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index cdc1947..30ccd05 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -212,11 +212,10 @@
 }
 
 
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(kind() == kStrictEquality ? " === " : " == ");
-  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  stream->Add(is_strict() ? " === null" : " == null");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -228,13 +227,6 @@
 }
 
 
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_string(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -249,14 +241,6 @@
 }
 
 
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if string_compare(");
-  InputAt(0)->PrintTo(stream);
-  InputAt(1)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -406,12 +390,6 @@
 }
 
 
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
 LChunk::LChunk(CompilationInfo* info, HGraph* graph)
     : spill_slot_count_(0),
       info_(info),
@@ -440,7 +418,7 @@
 
 
 void LChunk::MarkEmptyBlocks() {
-  HPhase phase("L_Mark empty blocks", this);
+  HPhase phase("Mark empty blocks", this);
   for (int i = 0; i < graph()->blocks()->length(); ++i) {
     HBasicBlock* block = graph()->blocks()->at(i);
     int first = block->first_instruction_index();
@@ -476,7 +454,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -551,8 +529,8 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new(zone()) LChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
+  chunk_ = new LChunk(info(), graph());
+  HPhase phase("Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -581,15 +559,20 @@
 }
 
 
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
-                                  Register::ToAllocationIndex(reg));
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                                  DoubleRegister::ToAllocationIndex(reg));
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          DoubleRegister::ToAllocationIndex(reg));
 }
 
 
@@ -604,30 +587,30 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                                      LUnallocated::USED_AT_START));
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+  return Use(value, new LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
-                                             LUnallocated::USED_AT_START));
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
 }
 
 
@@ -662,7 +645,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+      :  Use(value, new LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -671,7 +654,7 @@
     HInstruction* instr = HInstruction::cast(value);
     VisitInstruction(instr);
   }
-  operand->set_virtual_register(value->id());
+  allocator_->RecordUse(value, operand);
   return operand;
 }
 
@@ -679,33 +662,36 @@
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
                                     LUnallocated* result) {
-  result->set_virtual_register(current_instruction_->id());
+  allocator_->RecordDefinition(current_instruction_, result);
   instr->set_result(result);
   return instr;
 }
 
 
 template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr, int index) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -725,9 +711,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator));
+  instr->set_environment(CreateEnvironment(hydrogen_env));
   return instr;
 }
 
@@ -757,7 +741,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasObservableSideEffects()) {
+  if (hinstr->HasSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -769,8 +753,7 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
-      !hinstr->HasObservableSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -787,46 +770,66 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new(zone()) LPointerMap(position_));
+  instr->set_pointer_map(new LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand =
-      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  operand->set_virtual_register(allocator_->GetVirtualRegister());
-  if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new(zone()) LLabel(instr->block());
+  return new LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
+  return AssignEnvironment(new LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineAsRegister(new LBitI(op, left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), r1);
+    LOperand* right = UseFixed(instr->right(), r0);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, r0), instr);
+  }
 }
 
 
@@ -838,7 +841,7 @@
 
     LOperand* left = UseFixed(instr->left(), r1);
     LOperand* right = UseFixed(instr->right(), r0);
-    LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
     return MarkAsCall(DefineFixed(result, r0), instr);
   }
 
@@ -872,7 +875,7 @@
   }
 
   LInstruction* result =
-      DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+      DefineAsRegister(new LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -885,7 +888,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+  LArithmeticD* result = new LArithmeticD(op, left, right);
   return DefineAsRegister(result);
 }
 
@@ -903,8 +906,7 @@
   ASSERT(right->representation().IsTagged());
   LOperand* left_operand = UseFixed(left, r1);
   LOperand* right_operand = UseFixed(right, r0);
-  LArithmeticT* result =
-      new(zone()) LArithmeticT(op, left_operand, right_operand);
+  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -992,26 +994,20 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(
-    HEnvironment* hydrogen_env,
-    int* argument_index_accumulator) {
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer =
-      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
   int ast_id = hydrogen_env->ast_id();
-  ASSERT(ast_id != AstNode::kNoNumber ||
-         hydrogen_env->frame_type() != JS_FUNCTION);
+  ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
-  LEnvironment* result = new(zone()) LEnvironment(
-      hydrogen_env->closure(),
-      hydrogen_env->frame_type(),
-      ast_id,
-      hydrogen_env->parameter_count(),
-      argument_count_,
-      value_count,
-      outer);
-  int argument_index = *argument_index_accumulator;
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1020,44 +1016,31 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new(zone()) LArgument(argument_index++);
+      op = new LArgument(argument_index++);
     } else {
       op = UseAny(value);
     }
     result->AddValue(op, value->representation());
   }
 
-  if (hydrogen_env->frame_type() == JS_FUNCTION) {
-    *argument_index_accumulator = argument_index;
-  }
-
   return result;
 }
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+  return new LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
 LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* value = instr->value();
-  if (value->EmitAtUses()) {
-    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
+  HValue* v = instr->value();
+  if (v->EmitAtUses()) {
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new(zone()) LGoto(successor->block_id());
+    return new LGoto(successor->block_id());
   }
-
-  LBranch* result = new(zone()) LBranch(UseRegister(value));
-  // Tagged values that are not known smis or booleans require a
-  // deoptimization environment.
-  Representation rep = value->representation();
-  HType type = value->type();
-  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
-    return AssignEnvironment(result);
-  }
-  return result;
+  return AssignEnvironment(new LBranch(UseRegister(v)));
 }
 
 
@@ -1066,24 +1049,23 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  return new(zone()) LCmpMapAndBranch(value, temp);
+  return new LCmpMapAndBranch(value, temp);
 }
 
 
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
-  LOperand* value = UseRegister(instr->value());
-  return DefineAsRegister(new(zone()) LArgumentsLength(value));
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  return DefineAsRegister(new LArgumentsLength(UseRegister(length->value())));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new(zone()) LArgumentsElements);
+  return DefineAsRegister(new LArgumentsElements);
 }
 
 
 LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
   LInstanceOf* result =
-      new(zone()) LInstanceOf(UseFixed(instr->left(), r0),
+      new LInstanceOf(UseFixed(instr->left(), r0),
                       UseFixed(instr->right(), r1));
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
@@ -1092,26 +1074,17 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0),
-                                         FixedTemp(r4));
+      new LInstanceOfKnownGlobal(UseFixed(instr->left(), r0), FixedTemp(r4));
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
-  LOperand* receiver = UseRegisterAtStart(instr->receiver());
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
-  return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
 LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
   LOperand* function = UseFixed(instr->function(), r1);
   LOperand* receiver = UseFixed(instr->receiver(), r0);
   LOperand* length = UseFixed(instr->length(), r2);
   LOperand* elements = UseFixed(instr->elements(), r3);
-  LApplyArguments* result = new(zone()) LApplyArguments(function,
+  LApplyArguments* result = new LApplyArguments(function,
                                                 receiver,
                                                 length,
                                                 elements);
@@ -1122,75 +1095,63 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = Use(instr->argument());
-  return new(zone()) LPushArgument(argument);
+  return new LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses()
-      ? NULL
-      : DefineAsRegister(new(zone()) LThisFunction);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
-  return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+  return DefineAsRegister(new LOuterContext(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalObject(context));
+  return DefineAsRegister(new LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+  return DefineAsRegister(new LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr);
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* function = UseFixed(instr->function(), r1);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+  LInvokeFunction* result = new LInvokeFunction(function);
   return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
-  if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
+  if (op == kMathLog || op == kMathSin || op == kMathCos) {
     LOperand* input = UseFixedDouble(instr->value(), d2);
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
     return MarkAsCall(DefineFixedDouble(result, d2), instr);
-  } else if (op == kMathPowHalf) {
-    LOperand* input = UseFixedDouble(instr->value(), d2);
-    LOperand* temp = FixedTemp(d3);
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
-    return DefineFixedDouble(result, d2);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
@@ -1200,6 +1161,8 @@
         return DefineAsRegister(result);
       case kMathRound:
         return AssignEnvironment(DefineAsRegister(result));
+      case kMathPowHalf:
+        return DefineAsRegister(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1212,47 +1175,45 @@
   ASSERT(instr->key()->representation().IsTagged());
   argument_count_ -= instr->argument_count();
   LOperand* key = UseFixed(instr->key(), r2);
-  return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr);
+  return MarkAsCall(DefineFixed(new LCallKeyed(key), r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr);
+  return MarkAsCall(DefineFixed(new LCallNamed, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr);
+  return MarkAsCall(DefineFixed(new LCallGlobal, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr);
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
   LOperand* constructor = UseFixed(instr->constructor(), r1);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new(zone()) LCallNew(constructor);
+  LCallNew* result = new LCallNew(constructor);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), r1);
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0),
-                    instr);
+  return MarkAsCall(DefineFixed(new LCallFunction, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr);
+  return MarkAsCall(DefineFixed(new LCallRuntime, r0), instr);
 }
 
 
@@ -1271,32 +1232,25 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineAsRegister(new(zone()) LBitI(left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), r1);
-    LOperand* right = UseFixed(instr->right(), r0);
-    LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
-    return MarkAsCall(DefineFixed(result, r0), instr);
-  }
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LBitNotI(value));
+  return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
 }
 
 
@@ -1312,7 +1266,7 @@
     LOperand* dividend = UseFixed(instr->left(), r0);
     LOperand* divisor = UseFixed(instr->right(), r1);
     return AssignEnvironment(AssignPointerMap(
-             DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
+             DefineFixed(new LDivI(dividend, divisor), r0)));
   } else {
     return DoArithmeticT(Token::DIV, instr);
   }
@@ -1328,15 +1282,15 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
+      mod = new LModI(value, UseOrConstant(instr->right()));
     } else {
       LOperand* dividend = UseRegister(instr->left());
       LOperand* divisor = UseRegister(instr->right());
-      mod = new(zone()) LModI(dividend,
-                              divisor,
-                              TempRegister(),
-                              FixedTemp(d10),
-                              FixedTemp(d11));
+      mod = new LModI(dividend,
+                      divisor,
+                      TempRegister(),
+                      FixedTemp(d10),
+                      FixedTemp(d11));
     }
 
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
@@ -1354,7 +1308,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), d1);
     LOperand* right = UseFixedDouble(instr->right(), d2);
-    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, d1), instr);
   }
 }
@@ -1375,12 +1329,7 @@
     } else {
       left = UseRegisterAtStart(instr->LeastConstantOperand());
     }
-    LMulI* mul = new(zone()) LMulI(left, right, temp);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      AssignEnvironment(mul);
-    }
-    return DefineAsRegister(mul);
+    return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
 
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
@@ -1397,7 +1346,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new(zone()) LSubI(left, right);
+    LSubI* sub = new LSubI(left, right);
     LInstruction* result = DefineAsRegister(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1417,7 +1366,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new(zone()) LAddI(left, right);
+    LAddI* add = new LAddI(left, right);
     LInstruction* result = DefineAsRegister(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1441,29 +1390,22 @@
   LOperand* left = UseFixedDouble(instr->left(), d1);
   LOperand* right = exponent_type.IsDouble() ?
       UseFixedDouble(instr->right(), d2) :
-      UseFixed(instr->right(), r2);
-  LPower* result = new(zone()) LPower(left, right);
+      UseFixed(instr->right(), r0);
+  LPower* result = new LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, d3),
                     instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
 
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->global_object()->representation().IsTagged());
-  LOperand* global_object = UseFixed(instr->global_object(), r0);
-  LRandom* result = new(zone()) LRandom(global_object);
-  return MarkAsCall(DefineFixedDouble(result, d7), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  LOperand* left = UseFixed(instr->left(), r1);
-  LOperand* right = UseFixed(instr->right(), r0);
-  LCmpT* result = new(zone()) LCmpT(left, right);
+  bool reversed = (op == Token::GT || op == Token::LTE);
+  LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
+  LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+  LCmpT* result = new LCmpT(left, right);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -1474,16 +1416,16 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
-    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
-    return new(zone()) LCmpIDAndBranch(left, right);
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
-    return new(zone()) LCmpIDAndBranch(left, right);
+    return new LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1492,70 +1434,47 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  return new(zone()) LCmpObjectEqAndBranch(left, right);
+  return new LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
     HCompareConstantEqAndBranch* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LCmpConstantEqAndBranch(value);
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
+  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  return new(zone()) LIsObjectAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  return new(zone()) LIsStringAndBranch(value, temp);
+  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+  return new LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
-    HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
-  LOperand* left = UseFixed(instr->left(), r1);
-  LOperand* right = UseFixed(instr->right(), r0);
-  LStringCompareAndBranch* result =
-      new(zone()) LStringCompareAndBranch(left, right);
-  return MarkAsCall(result, instr);
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LHasInstanceTypeAndBranch(value);
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
@@ -1564,14 +1483,14 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+  return DefineAsRegister(new LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
+  return new LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
 
@@ -1579,48 +1498,40 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegister(instr->value());
-  return new(zone()) LClassOfTestAndBranch(value, TempRegister());
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LJSArrayLength(array));
+  return DefineAsRegister(new LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LElementsKind(object));
+  return DefineAsRegister(new LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
-  return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
-  LOperand* object = UseFixed(instr->value(), r0);
-  LDateField* result = new LDateField(object, FixedTemp(r1), instr->index());
-  return MarkAsCall(DefineFixed(result, r0), instr);
+  LValueOf* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  LOperand* value = UseRegisterAtStart(instr->index());
-  LOperand* length = UseRegister(instr->length());
-  return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+  return AssignEnvironment(new LBoundsCheck(UseRegisterAtStart(instr->index()),
+                                            UseRegister(instr->length())));
 }
 
 
@@ -1633,7 +1544,7 @@
 
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* value = UseFixed(instr->value(), r0);
-  return MarkAsCall(new(zone()) LThrow(value), instr);
+  return MarkAsCall(new LThrow(value), instr);
 }
 
 
@@ -1656,25 +1567,22 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
+      LNumberUntagD* res = new LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
-      LOperand* value = UseRegisterAtStart(instr->value());
+      LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       LInstruction* res = NULL;
       if (!needs_check) {
-        res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
+        res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
       } else {
         LOperand* temp1 = TempRegister();
         LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
                                                       : NULL;
         LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
                                                       : NULL;
-        res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
-                                                       temp1,
-                                                       temp2,
-                                                       temp3));
+        res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
         res = AssignEnvironment(res);
       }
       return res;
@@ -1688,31 +1596,32 @@
       // Make sure that the temp and result_temp registers are
       // different.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+      LNumberTagD* result = new LNumberTagD(value, temp1, temp2);
       Define(result, result_temp);
       return AssignPointerMap(result);
     } else {
       ASSERT(to.IsInteger32());
       LOperand* value = UseRegister(instr->value());
-      LOperand* temp1 = TempRegister();
-      LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
-      LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
+      LDoubleToI* res =
+        new LDoubleToI(value,
+                       TempRegister(),
+                       instr->CanTruncateToInt32() ? TempRegister() : NULL);
       return AssignEnvironment(DefineAsRegister(res));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
-      LOperand* value = UseRegisterAtStart(val);
+      LOperand* value = UseRegister(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineAsRegister(new(zone()) LSmiTag(value));
+        return DefineSameAsFirst(new LSmiTag(value));
       } else {
-        LNumberTagI* result = new(zone()) LNumberTagI(value);
-        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+        LNumberTagI* result = new LNumberTagI(value);
+        return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
       ASSERT(to.IsDouble());
       LOperand* value = Use(instr->value());
-      return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+      return DefineAsRegister(new LInteger32ToDouble(value));
     }
   }
   UNREACHABLE();
@@ -1722,13 +1631,13 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+  return AssignEnvironment(new LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new(zone()) LCheckInstanceType(value);
+  LInstruction* result = new LCheckInstanceType(value);
   return AssignEnvironment(result);
 }
 
@@ -1736,26 +1645,26 @@
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
-  LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+  LInstruction* result = new LCheckPrototypeMaps(temp1, temp2);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckSmi(value));
+  return AssignEnvironment(new LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckFunction(value));
+  return AssignEnvironment(new LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new(zone()) LCheckMap(value);
+  LInstruction* result = new LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1765,32 +1674,57 @@
   Representation input_rep = value->representation();
   LOperand* reg = UseRegister(value);
   if (input_rep.IsDouble()) {
-    return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
+    return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11)));
   } else if (input_rep.IsInteger32()) {
-    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+    return DefineAsRegister(new LClampIToUint8(reg));
   } else {
     ASSERT(input_rep.IsTagged());
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve d1 explicitly.
-    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(d11));
+    LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11));
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
 
 
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+    return AssignEnvironment(DefineAsRegister(res));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* temp1 = TempRegister();
+    LOperand* temp2 = TempRegister();
+    LOperand* temp3 = FixedTemp(d11);
+    LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+    return AssignEnvironment(DefineSameAsFirst(res));
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new(zone()) LReturn(UseFixed(instr->value(), r0));
+  return new LReturn(UseFixed(instr->value(), r0));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new(zone()) LConstantI);
+    return DefineAsRegister(new LConstantI);
   } else if (r.IsDouble()) {
-    return DefineAsRegister(new(zone()) LConstantD);
+    return DefineAsRegister(new LConstantD);
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new(zone()) LConstantT);
+    return DefineAsRegister(new LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1799,8 +1733,8 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
-  return instr->RequiresHoleCheck()
+  LLoadGlobalCell* result = new LLoadGlobalCell;
+  return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1808,18 +1742,20 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object = UseFixed(instr->global_object(), r0);
-  LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  LOperand* value = UseRegister(instr->value());
-  // Use a temp to check the value in the cell in the case where we perform
-  // a hole check.
-  return instr->RequiresHoleCheck()
-      ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
-      : new(zone()) LStoreGlobalCell(value, NULL);
+  if (instr->check_hole_value()) {
+    LOperand* temp = TempRegister();
+    LOperand* value = UseRegister(instr->value());
+    return AssignEnvironment(new LStoreGlobalCell(value, temp));
+  } else {
+    LOperand* value = UseRegisterAtStart(instr->value());
+    return new LStoreGlobalCell(value, NULL);
+  }
 }
 
 
@@ -1827,16 +1763,14 @@
   LOperand* global_object = UseFixed(instr->global_object(), r1);
   LOperand* value = UseFixed(instr->value(), r0);
   LStoreGlobalGeneric* result =
-      new(zone()) LStoreGlobalGeneric(global_object, value);
+      new LStoreGlobalGeneric(global_object, value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  LInstruction* result =
-      DefineAsRegister(new(zone()) LLoadContextSlot(context));
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+  return DefineAsRegister(new LLoadContextSlot(context));
 }
 
 
@@ -1850,14 +1784,13 @@
     context = UseRegister(instr->context());
     value = UseRegister(instr->value());
   }
-  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+  return new LStoreContextSlot(context, value);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   return DefineAsRegister(
-      new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
+      new LLoadNamedField(UseRegisterAtStart(instr->object())));
 }
 
 
@@ -1866,13 +1799,11 @@
   ASSERT(instr->representation().IsTagged());
   if (instr->need_generic()) {
     LOperand* obj = UseFixed(instr->object(), r0);
-    LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
     return MarkAsCall(DefineFixed(result, r0), instr);
   } else {
     LOperand* obj = UseRegisterAtStart(instr->object());
-    LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1880,7 +1811,7 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object = UseFixed(instr->object(), r0);
-  LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0);
+  LInstruction* result = DefineFixed(new LLoadNamedGeneric(object), r0);
   return MarkAsCall(result, instr);
 }
 
@@ -1888,20 +1819,20 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+      new LLoadFunctionPrototype(UseRegister(instr->function()))));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadElements(input));
+  return DefineAsRegister(new LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new LLoadExternalArrayPointer(input));
 }
 
 
@@ -1911,9 +1842,8 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterAtStart(instr->key());
-  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
-  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
-  return DefineAsRegister(result);
+  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
@@ -1924,7 +1854,7 @@
   LOperand* elements = UseTempRegister(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+      new LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1932,18 +1862,19 @@
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
+  Representation representation(instr->representation());
   ASSERT(
-      (instr->representation().IsInteger32() &&
+      (representation.IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (instr->representation().IsDouble() &&
+      (representation.IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
@@ -1957,7 +1888,7 @@
   LOperand* key = UseFixed(instr->key(), r0);
 
   LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0);
+      DefineFixed(new LLoadKeyedGeneric(object, key), r0);
   return MarkAsCall(result, instr);
 }
 
@@ -1976,7 +1907,8 @@
   LOperand* key = needs_write_barrier
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
-  return new(zone()) LStoreKeyedFastElement(obj, key, val);
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
 }
 
 
@@ -1990,18 +1922,19 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
+  Representation representation(instr->value()->representation());
   ElementsKind elements_kind = instr->elements_kind();
   ASSERT(
-      (instr->value()->representation().IsInteger32() &&
+      (representation.IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (instr->value()->representation().IsDouble() &&
+      (representation.IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2016,9 +1949,9 @@
       : UseRegister(instr->value());
   LOperand* key = UseRegisterOrConstant(instr->key());
 
-  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                        key,
-                                                        val);
+  return new LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                key,
+                                                val);
 }
 
 
@@ -2031,29 +1964,7 @@
   ASSERT(instr->key()->representation().IsTagged());
   ASSERT(instr->value()->representation().IsTagged());
 
-  return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
-    HTransitionElementsKind* instr) {
-  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
-      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
-    LOperand* object = UseRegister(instr->object());
-    LOperand* new_map_reg = TempRegister();
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
-    return DefineSameAsFirst(result);
-  } else {
-    LOperand* object = UseFixed(instr->object(), r0);
-    LOperand* fixed_object_reg = FixedTemp(r2);
-    LOperand* new_map_reg = FixedTemp(r3);
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object,
-                                            new_map_reg,
-                                            fixed_object_reg);
-    return MarkAsCall(DefineFixed(result, r0), instr);
-  }
+  return MarkAsCall(new LStoreKeyedGeneric(obj, key, val), instr);
 }
 
 
@@ -2068,7 +1979,7 @@
       ? UseTempRegister(instr->value())
       : UseRegister(instr->value());
 
-  return new(zone()) LStoreNamedField(obj, val);
+  return new LStoreNamedField(obj, val);
 }
 
 
@@ -2076,7 +1987,7 @@
   LOperand* obj = UseFixed(instr->object(), r1);
   LOperand* val = UseFixed(instr->value(), r0);
 
-  LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
+  LInstruction* result = new LStoreNamedGeneric(obj, val);
   return MarkAsCall(result, instr);
 }
 
@@ -2084,67 +1995,55 @@
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0),
-                    instr);
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
-  LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+  LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
-  LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+  LStringCharFromCode* result = new LStringCharFromCode(char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
-  LAllocateObject* result = new LAllocateObject(TempRegister(), TempRegister());
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
+  return DefineAsRegister(new LStringLength(string));
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new LArrayLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new LObjectLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr);
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
   LOperand* object = UseFixed(instr->object(), r0);
   LOperand* key = UseFixed(instr->key(), r1);
-  LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
+  LDeleteProperty* result = new LDeleteProperty(object, key);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -2152,13 +2051,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new(zone()) LOsrEntry);
+  return AssignEnvironment(new LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  return DefineAsSpilled(new LParameter, spill_index);
 }
 
 
@@ -2168,13 +2067,13 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr);
+  return MarkAsCall(DefineFixed(new LCallStub, r0), instr);
 }
 
 
@@ -2191,33 +2090,32 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = UseRegister(instr->index());
-  LAccessArgumentsAt* result =
-      new(zone()) LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), r0);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  LToFastProperties* result = new LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0));
+  LTypeof* result = new LTypeof(UseFixed(instr->value(), r0));
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new(zone()) LIsConstructCallAndBranch(TempRegister());
+  return new LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2240,7 +2138,7 @@
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
-    LInstruction* result = new(zone()) LLazyBailout;
+    LInstruction* result = new LLazyBailout;
     result = AssignEnvironment(result);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2254,10 +2152,10 @@
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
-    return MarkAsCall(new(zone()) LStackCheck, instr);
+    return MarkAsCall(new LStackCheck, instr);
   } else {
     ASSERT(instr->is_backwards_branch());
-    return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+    return AssignEnvironment(AssignPointerMap(new LStackCheck));
   }
 }
 
@@ -2266,11 +2164,9 @@
   HEnvironment* outer = current_block_->last_environment();
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
                                                instr->function(),
                                                undefined,
-                                               instr->call_kind(),
-                                               instr->is_construct());
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2278,8 +2174,7 @@
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment()->
-      DiscardInlined(false);
+  HEnvironment* outer = current_block_->last_environment()->outer();
   current_block_->UpdateEnvironment(outer);
   return NULL;
 }
@@ -2288,37 +2183,9 @@
 LInstruction* LChunkBuilder::DoIn(HIn* instr) {
   LOperand* key = UseRegisterAtStart(instr->key());
   LOperand* object = UseRegisterAtStart(instr->object());
-  LIn* result = new(zone()) LIn(key, object);
+  LIn* result = new LIn(key, object);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
 
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
-  LOperand* object = UseFixed(instr->enumerable(), r0);
-  LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
-  return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
-  LOperand* map = UseRegister(instr->map());
-  return AssignEnvironment(DefineAsRegister(
-      new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* map = UseRegisterAtStart(instr->map());
-  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
-  LOperand* object = UseRegister(instr->object());
-  LOperand* index = UseRegister(instr->index());
-  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 62cde6e..8c18760 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,7 +49,6 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
-  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -88,13 +87,11 @@
   V(ConstantI)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
-  V(DeclareGlobals)                             \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(ElementsKind)                               \
-  V(FastLiteral)                                \
   V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
@@ -110,12 +107,10 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNilAndBranch)                             \
+  V(IsNullAndBranch)                            \
   V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
-  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -143,7 +138,6 @@
   V(Parameter)                                  \
   V(Power)                                      \
   V(PushArgument)                               \
-  V(Random)                                     \
   V(RegExpLiteral)                              \
   V(Return)                                     \
   V(ShiftI)                                     \
@@ -168,18 +162,11 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
-  V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)
+  V(ValueOf)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -469,20 +456,6 @@
 };
 
 
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
-  LWrapReceiver(LOperand* receiver, LOperand* function) {
-    inputs_[0] = receiver;
-    inputs_[1] = function;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
-  LOperand* receiver() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-};
-
-
 class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
@@ -654,17 +627,16 @@
 };
 
 
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
+class LIsNullAndBranch: public LControlInstruction<1, 0> {
  public:
-  explicit LIsNilAndBranch(LOperand* value) {
+  explicit LIsNullAndBranch(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
 
-  EqualityKind kind() const { return hydrogen()->kind(); }
-  NilValue nil() const { return hydrogen()->nil(); }
+  bool is_strict() const { return hydrogen()->is_strict(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -684,20 +656,6 @@
 };
 
 
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
-  LIsStringAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -726,23 +684,6 @@
 };
 
 
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
-  LStringCompareAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
-                               "string-compare-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
-  Token::Value op() const { return hydrogen()->token(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -853,15 +794,18 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(LOperand* left, LOperand* right) {
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return hydrogen()->op(); }
+  Token::Value op() const { return op_; }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ private:
+  Token::Value op_;
 };
 
 
@@ -1005,41 +949,6 @@
 };
 
 
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
-  LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
-    inputs_[0] = date;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
-  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-  Smi* index() const { return index_; }
-
- private:
-  Smi* index_;
-};
-
-
-class LSetDateField: public LTemplateInstruction<1, 2, 1> {
- public:
-  LSetDateField(LOperand* date, LOperand* value, LOperand* temp, int index)
-      : index_(index) {
-    inputs_[0] = date;
-    inputs_[1] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(DateField, "date-set-field")
-  DECLARE_HYDROGEN_ACCESSOR(DateField)
-
-  int index() const { return index_; }
-
- private:
-  int index_;
-};
-
-
 class LThrow: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LThrow(LOperand* value) {
@@ -1084,17 +993,6 @@
 };
 
 
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LRandom(LOperand* global_object) {
-    inputs_[0] = global_object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
-  DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
 class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1311,8 +1209,6 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
-  LOperand* value() { return inputs_[0]; }
 };
 
 
@@ -1330,7 +1226,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1363,6 +1259,7 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
+  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1379,9 +1276,7 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
-  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1403,13 +1298,6 @@
 };
 
 
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
-  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
 class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGlobalObject(LOperand* context) {
@@ -1491,17 +1379,12 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LCallFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
-  LOperand* function() { return inputs_[0]; }
-  int arity() const { return hydrogen()->argument_count() - 1; }
+  int arity() const { return hydrogen()->argument_count() - 2; }
 };
 
 
@@ -1677,6 +1560,7 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
+  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1696,7 +1580,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1758,7 +1642,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
@@ -1784,30 +1668,6 @@
 };
 
 
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
-  LTransitionElementsKind(LOperand* object,
-                          LOperand* new_map_temp,
-                          LOperand* temp_reg) {
-    inputs_[0] = object;
-    temps_[0] = new_map_temp;
-    temps_[1] = temp_reg;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
-                               "transition-elements-kind")
-  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* new_map_reg() { return temps_[0]; }
-  LOperand* temp_reg() { return temps_[1]; }
-  Handle<Map> original_map() { return hydrogen()->original_map(); }
-  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
-};
-
-
 class LStringAdd: public LTemplateInstruction<1, 2, 0> {
  public:
   LStringAdd(LOperand* left, LOperand* right) {
@@ -1871,8 +1731,6 @@
     inputs_[0] = value;
   }
 
-  LOperand* value() { return InputAt(0); }
-
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -1973,25 +1831,6 @@
 };
 
 
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
- public:
-  LAllocateObject(LOperand* temp1, LOperand* temp2) {
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
-  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
 class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -2132,62 +1971,6 @@
 };
 
 
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInPrepareMap(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInCacheArray(LOperand* map) {
-    inputs_[0] = map;
-  }
-
-  LOperand* map() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
-  int idx() {
-    return HForInCacheArray::cast(this->hydrogen_value())->idx();
-  }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
-  LCheckMapValue(LOperand* value, LOperand* map) {
-    inputs_[0] = value;
-    inputs_[1] = map;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* map() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadFieldByIndex(LOperand* object, LOperand* index) {
-    inputs_[0] = object;
-    inputs_[1] = index;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* index() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2255,7 +2038,6 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
-        zone_(graph->isolate()->zone()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2285,7 +2067,6 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
-  Zone* zone() const { return zone_; }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2295,6 +2076,7 @@
   void Abort(const char* format, ...);
 
   // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(DoubleRegister reg);
 
@@ -2345,6 +2127,8 @@
       LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
                            LUnallocated* result);
   template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
       LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
   template<int I, int T>
       LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2375,12 +2159,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
-                                  int* argument_index_accumulator);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2390,7 +2174,6 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
-  Zone* zone_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 82b80a2..4a201ab 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -62,19 +62,11 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
+  HPhase phase("Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
   CpuFeatures::Scope scope1(VFP3);
   CpuFeatures::Scope scope2(ARMv7);
-
-  CodeStub::GenerateFPStubs();
-
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // NONE indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done in GeneratePrologue).
-  FrameScope frame_scope(masm_, StackFrame::NONE);
-
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -143,7 +135,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). r5 is zero for method calls and non-zero for
   // function calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
+  if (info_->is_strict_mode() || info_->is_native()) {
     Label ok;
     __ cmp(r5, Operand(0));
     __ b(eq, &ok);
@@ -198,11 +190,13 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        MemOperand target = ContextOperand(cp, var->index());
-        __ str(r0, target);
-        // Update the write barrier. This clobbers r3 and r0.
-        __ RecordWriteContextSlot(
-            cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
+        __ mov(r1, Operand(Context::SlotOffset(var->index())));
+        __ str(r0, MemOperand(cp, r1));
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have to use two more registers to avoid
+        // clobbering cp.
+        __ mov(r2, Operand(cp));
+        __ RecordWrite(r2, Operand(r1), r3, r0);
       }
     }
     Comment(";;; End allocate local context");
@@ -244,9 +238,6 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
-      Comment(";;; Deferred code @%d: %s.",
-              code->instruction_index(),
-              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -262,7 +253,7 @@
 
 bool LCodeGen::GenerateDeoptJumpTable() {
   // Check that the jump table is accessible from everywhere in the function
-  // code, i.e. that offsets to the table can be encoded in the 24bit signed
+  // code, ie that offsets to the table can be encoded in the 24bit signed
   // immediate of a branch instruction.
   // To simplify we consider the code size from the first instruction to the
   // end of the jump table. We also don't consider the pc load delta.
@@ -321,22 +312,7 @@
   if (op->IsRegister()) {
     return ToRegister(op->index());
   } else if (op->IsConstantOperand()) {
-    LConstantOperand* const_op = LConstantOperand::cast(op);
-    Handle<Object> literal = chunk_->LookupLiteral(const_op);
-    Representation r = chunk_->LookupLiteralRepresentation(const_op);
-    if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
-      __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
-    } else if (r.IsDouble()) {
-      Abort("EmitLoadRegister: Unsupported double immediate.");
-    } else {
-      ASSERT(r.IsTagged());
-      if (literal->IsSmi()) {
-        __ mov(scratch, Operand(literal));
-      } else {
-       __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
-      }
-    }
+    __ mov(scratch, ToOperand(op));
     return scratch;
   } else if (op->IsStackSlot() || op->IsArgument()) {
     __ ldr(scratch, ToMemOperand(op));
@@ -385,18 +361,6 @@
 }
 
 
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
-  Handle<Object> literal = chunk_->LookupLiteral(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
-  return literal;
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
-  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
 int LCodeGen::ToInteger32(LConstantOperand* op) const {
   Handle<Object> value = chunk_->LookupLiteral(op);
   ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
@@ -406,12 +370,6 @@
 }
 
 
-double LCodeGen::ToDouble(LConstantOperand* op) const {
-  Handle<Object> value = chunk_->LookupLiteral(op);
-  return value->Number();
-}
-
-
 Operand LCodeGen::ToOperand(LOperand* op) {
   if (op->IsConstantOperand()) {
     LConstantOperand* const_op = LConstantOperand::cast(op);
@@ -479,19 +437,7 @@
 
   WriteTranslation(environment->outer(), translation);
   int closure_id = DefineDeoptimizationLiteral(environment->closure());
-  switch (environment->frame_type()) {
-    case JS_FUNCTION:
-      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
-      break;
-    case JS_CONSTRUCT:
-      translation->BeginConstructStubFrame(closure_id, translation_size);
-      break;
-    case ARGUMENTS_ADAPTOR:
-      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
-      break;
-    default:
-      UNREACHABLE();
-  }
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
     // spilled_registers_ and spilled_double_registers_ are either
@@ -624,14 +570,10 @@
     // |>------------  translation_size ------------<|
 
     int frame_count = 0;
-    int jsframe_count = 0;
     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
       ++frame_count;
-      if (e->frame_type() == JS_FUNCTION) {
-        ++jsframe_count;
-      }
     }
-    Translation translation(&translations_, frame_count, jsframe_count);
+    Translation translation(&translations_, frame_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     int pc_offset = masm()->pc_offset();
@@ -648,6 +590,7 @@
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -680,6 +623,7 @@
 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
+  ASSERT(FLAG_deopt);
   Handle<DeoptimizationInputData> data =
       factory()->NewDeoptimizationInputData(length, TENURED);
 
@@ -755,7 +699,7 @@
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(expected_safepoint_kind_ == kind);
 
-  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
@@ -1042,7 +986,6 @@
     virtual void Generate() {
       codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
     }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LDivI* instr_;
   };
@@ -1378,13 +1321,8 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  Handle<Object> value = instr->value();
-  if (value->IsSmi()) {
-    __ mov(ToRegister(instr->result()), Operand(value));
-  } else {
-    __ LoadHeapObject(ToRegister(instr->result()),
-                      Handle<HeapObject>::cast(value));
-  }
+  ASSERT(instr->result()->IsRegister());
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
 }
 
 
@@ -1437,46 +1375,6 @@
 }
 
 
-void LCodeGen::DoDateField(LDateField* instr) {
-  Register object = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->TempAt(0));
-  Smi* index = instr->index();
-  Label runtime, done;
-  ASSERT(object.is(result));
-  ASSERT(object.is(r0));
-  ASSERT(!scratch.is(scratch0()));
-  ASSERT(!scratch.is(object));
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
-  __ Assert(eq, "Trying to get date field from non-date.");
-#endif
-
-  if (index->value() == 0) {
-    __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ mov(scratch, Operand(stamp));
-      __ ldr(scratch, MemOperand(scratch));
-      __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
-      __ cmp(scratch, scratch0());
-      __ b(ne, &runtime);
-      __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
-                                             kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch);
-    __ mov(r1, Operand(index));
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ bind(&done);
-  }
-}
-
-
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register result = ToRegister(instr->result());
@@ -1751,44 +1649,30 @@
 }
 
 
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  __ cmp(ToRegister(left), ToRegister(right));
+}
+
+
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-  Condition cond = TokenToCondition(instr->op(), false);
 
-  if (left->IsConstantOperand() && right->IsConstantOperand()) {
-    // We can statically evaluate the comparison.
-    double left_val = ToDouble(LConstantOperand::cast(left));
-    double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
-    EmitGoto(next_block);
+  if (instr->is_double()) {
+    // Compare left and right as doubles and load the
+    // resulting flags into the normal status register.
+    __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+    // If a NaN is involved, i.e. the result is unordered (V set),
+    // jump to false block label.
+    __ b(vs, chunk_->GetAssemblyLabel(false_block));
   } else {
-    if (instr->is_double()) {
-      // Compare left and right operands as doubles and load the
-      // resulting flags into the normal status register.
-      __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
-      // If a NaN is involved, i.e. the result is unordered (V set),
-      // jump to false block label.
-      __ b(vs, chunk_->GetAssemblyLabel(false_block));
-    } else {
-      if (right->IsConstantOperand()) {
-        __ cmp(ToRegister(left),
-               Operand(ToInteger32(LConstantOperand::cast(right))));
-      } else if (left->IsConstantOperand()) {
-        __ cmp(ToRegister(right),
-               Operand(ToInteger32(LConstantOperand::cast(left))));
-        // We transposed the operands. Reverse the condition.
-        cond = ReverseCondition(cond);
-      } else {
-        __ cmp(ToRegister(left), ToRegister(right));
-      }
-    }
-    EmitBranch(true_block, false_block, cond);
+    EmitCmpI(left, right);
   }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  EmitBranch(true_block, false_block, cc);
 }
 
 
@@ -1813,35 +1697,25 @@
 }
 
 
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
   Register scratch = scratch0();
   Register reg = ToRegister(instr->InputAt(0));
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  // If the expression is known to be untagged or a smi, then it's definitely
-  // not null, and it can't be a an undetectable object.
-  if (instr->hydrogen()->representation().IsSpecialization() ||
-      instr->hydrogen()->type().IsSmi()) {
-    EmitGoto(false_block);
-    return;
-  }
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Jump to the false block.
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
-      Heap::kNullValueRootIndex :
-      Heap::kUndefinedValueRootIndex;
-  __ LoadRoot(ip, nil_value);
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
   __ cmp(reg, ip);
-  if (instr->kind() == kStrictEquality) {
+  if (instr->is_strict()) {
     EmitBranch(true_block, false_block, eq);
   } else {
-    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
-        Heap::kUndefinedValueRootIndex :
-        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ b(eq, true_label);
-    __ LoadRoot(ip, other_nil_value);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     __ cmp(reg, ip);
     __ b(eq, true_label);
     __ JumpIfSmi(reg, false_label);
@@ -1898,31 +1772,6 @@
 }
 
 
-Condition LCodeGen::EmitIsString(Register input,
-                                 Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
-  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
-
-  return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register temp1 = ToRegister(instr->TempAt(0));
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  Condition true_cond =
-      EmitIsString(reg, temp1, false_label);
-
-  EmitBranch(true_block, false_block, true_cond);
-}
-
-
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1948,41 +1797,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return kNoCondition;
-  }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
-
-  Condition condition = ComputeCompareCondition(op);
-
-  EmitBranch(true_block, false_block, condition);
-}
-
-
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -2048,7 +1862,7 @@
 
 
 // Branches to a label or falls through with the answer in flags.  Trashes
-// the temp registers, but not the input.
+// the temp registers, but not the input.  Only input and temp2 may alias.
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
                                Handle<String>class_name,
@@ -2056,40 +1870,30 @@
                                Register temp,
                                Register temp2) {
   ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
-
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
   __ JumpIfSmi(input, is_false);
+  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+  __ b(lt, is_false);
 
+  // Map is now in temp.
+  // Functions have class 'Function'.
+  __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    // Assuming the following assertions, we can use the same compares to test
-    // for both being a function type and being in the object type range.
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  FIRST_SPEC_OBJECT_TYPE + 1);
-    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  LAST_SPEC_OBJECT_TYPE - 1);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
-    __ b(lt, is_false);
-    __ b(eq, is_true);
-    __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
-    __ b(eq, is_true);
+    __ b(ge, is_true);
   } else {
-    // Faster code path to avoid two compares: subtract lower bound from the
-    // actual type and do a signed compare with the width of the type range.
-    __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
-    __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
-    __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
-                          FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ b(gt, is_false);
+    __ b(ge, is_false);
   }
 
-  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
 
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+
   // Objects with a non-function constructor have class 'Object'.
   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -2166,8 +1970,9 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() { return instr_; }
+
     Label* map_check() { return &map_check_; }
+
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -2197,10 +2002,7 @@
   // We use Factory::the_hole_value() on purpose instead of loading from the
   // root array to force relocation to be able to later patch with
   // the cached map.
-  Handle<JSGlobalPropertyCell> cell =
-      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
-  __ mov(ip, Operand(Handle<Object>(cell)));
-  __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+  __ mov(ip, Operand(factory()->the_hole_value()));
   __ cmp(map, Operand(ip));
   __ b(ne, &cache_miss);
   // We use Factory::the_hole_value() on purpose instead of loading from the
@@ -2255,7 +2057,7 @@
   // offset to the location of the map check.
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(temp.is(r4));
-  __ LoadHeapObject(InstanceofStub::right(), instr->function());
+  __ mov(InstanceofStub::right(), Operand(instr->function()));
   static const int kAdditionalDelta = 4;
   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   Label before_push_delta;
@@ -2276,6 +2078,26 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
@@ -2284,6 +2106,9 @@
   __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
 
   Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
   __ LoadRoot(ToRegister(instr->result()),
               Heap::kTrueValueRootIndex,
               condition);
@@ -2312,7 +2137,7 @@
   Register result = ToRegister(instr->result());
   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
-  if (instr->hydrogen()->RequiresHoleCheck()) {
+  if (instr->hydrogen()->check_hole_value()) {
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(result, ip);
     DeoptimizeIf(eq, instr->environment());
@@ -2333,27 +2158,27 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->value());
-  Register cell = scratch0();
+  Register value = ToRegister(instr->InputAt(0));
+  Register scratch = scratch0();
 
   // Load the cell.
-  __ mov(cell, Operand(instr->hydrogen()->cell()));
+  __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
 
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    // We use a temp to check the payload (CompareRoot might clobber ip).
-    Register payload = ToRegister(instr->TempAt(0));
-    __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
-    __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
+  if (instr->hydrogen()->check_hole_value()) {
+    Register scratch2 = ToRegister(instr->TempAt(0));
+    __ ldr(scratch2,
+           FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(scratch2, ip);
     DeoptimizeIf(eq, instr->environment());
   }
 
   // Store the value.
-  __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
-  // Cells are always rescanned, so no write barrier here.
+  __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
 }
 
 
@@ -2362,7 +2187,7 @@
   ASSERT(ToRegister(instr->value()).is(r0));
 
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2373,53 +2198,17 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ ldr(result, ContextOperand(context, instr->slot_index()));
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-    __ cmp(result, ip);
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment());
-    } else {
-      __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
-    }
-  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  Register scratch = scratch0();
-  MemOperand target = ContextOperand(context, instr->slot_index());
-
-  Label skip_assignment;
-
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ ldr(scratch, target);
-    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-    __ cmp(scratch, ip);
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment());
-    } else {
-      __ b(ne, &skip_assignment);
-    }
+  __ str(value, ContextOperand(context, instr->slot_index()));
+  if (instr->needs_write_barrier()) {
+    int offset = Context::SlotOffset(instr->slot_index());
+    __ RecordWrite(context, Operand(offset), value, scratch0());
   }
-
-  __ str(value, target);
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    __ RecordWriteContextSlot(context,
-                              target.offset(),
-                              value,
-                              scratch,
-                              kLRHasBeenSaved,
-                              kSaveFPRegs,
-                              EMIT_REMEMBERED_SET,
-                              check_needed);
-  }
-
-  __ bind(&skip_assignment);
 }
 
 
@@ -2439,9 +2228,9 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsFound() &&
+  ASSERT(lookup.IsProperty() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
   if (lookup.type() == FIELD) {
     int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2457,7 +2246,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    __ LoadHeapObject(result, function);
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2668,9 +2457,13 @@
            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   }
 
-  __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-  __ cmp(scratch, Operand(kHoleNanUpper32));
-  DeoptimizeIf(eq, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    // TODO(danno): If no hole check is required, there is no need to allocate
+    // elements into a temporary register, instead scratch can be used.
+    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+    __ cmp(scratch, Operand(kHoleNanUpper32));
+    DeoptimizeIf(eq, instr->environment());
+  }
 
   __ vldr(result, elements, 0);
 }
@@ -2741,7 +2534,6 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -2799,10 +2591,15 @@
 }
 
 
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   Register receiver = ToRegister(instr->receiver());
   Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
   Register scratch = scratch0();
+  ASSERT(receiver.is(r0));  // Used for parameter count.
+  ASSERT(function.is(r1));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(r0));
 
   // If the receiver is null or undefined, we have to pass the global
   // object as a receiver to normal functions. Values have to be
@@ -2843,18 +2640,6 @@
   __ ldr(receiver,
          FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
-  Register scratch = scratch0();
-  ASSERT(receiver.is(r0));  // Used for parameter count.
-  ASSERT(function.is(r1));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(r0));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
@@ -2889,7 +2674,7 @@
       this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is r0, as expected
   // by InvokeFunction.
-  ParameterCount actual(receiver);
+  v8::internal::ParameterCount actual(receiver);
   __ InvokeFunction(function, actual, CALL_FUNCTION,
                     safepoint_generator, CALL_AS_METHOD);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2909,7 +2694,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
+  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
@@ -2927,16 +2712,6 @@
 }
 
 
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  __ push(cp);  // The context is the first argument.
-  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
-  __ push(scratch0());
-  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
-  __ push(scratch0());
-  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register result = ToRegister(instr->result());
   __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
@@ -2954,41 +2729,31 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
-      function->shared()->formal_parameter_count() == arity;
+  // Change context if needed.
+  bool change_context =
+      (info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+  }
+
+  // Set r0 to arguments count if adaption is not needed. Assumes that r0
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ mov(r0, Operand(arity));
+  }
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (can_invoke_directly) {
-    __ LoadHeapObject(r1, function);
-    // Change context if needed.
-    bool change_context =
-        (info()->closure()->context() != function->context()) ||
-        scope()->contains_with() ||
-        (scope()->num_heap_slots() > 0);
-    if (change_context) {
-      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-    }
+  // Invoke function.
+  __ SetCallKind(r5, call_kind);
+  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ Call(ip);
 
-    // Set r0 to arguments count if adaption is not needed. Assumes that r0
-    // is available to write to at this point.
-    if (!function->NeedsArgumentsAdaption()) {
-      __ mov(r0, Operand(arity));
-    }
-
-    // Invoke function.
-    __ SetCallKind(r5, call_kind);
-    __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-    __ Call(ip);
-
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-  } else {
-    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
-    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
-  }
+  // Setup deoptimization.
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 
   // Restore context.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2997,6 +2762,7 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
+  __ mov(r1, Operand(instr->function()));
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -3094,7 +2860,6 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -3225,131 +2990,68 @@
 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
   DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
   DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
-
-  // Note that according to ECMA-262 15.8.2.13:
-  // Math.pow(-Infinity, 0.5) == Infinity
-  // Math.sqrt(-Infinity) == NaN
-  Label done;
-  __ vmov(temp, -V8_INFINITY);
-  __ VFPCompareAndSetFlags(input, temp);
-  __ vneg(result, temp, eq);
-  __ b(&done, eq);
-
   // Add +0 to convert -0 to +0.
   __ vadd(result, input, kDoubleRegZero);
   __ vsqrt(result, result);
-  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  Register scratch = scratch0();
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  // Having marked this as a call, we can use any registers.
-  // Just make sure that the input/output registers are the expected ones.
-  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
-         ToDoubleRegister(instr->InputAt(1)).is(d2));
-  ASSERT(!instr->InputAt(1)->IsRegister() ||
-         ToRegister(instr->InputAt(1)).is(r2));
-  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
-  ASSERT(ToDoubleRegister(instr->result()).is(d3));
-
-  if (exponent_type.IsTagged()) {
-    Label no_deopt;
-    __ JumpIfSmi(r2, &no_deopt);
-    __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-    __ cmp(r7, Operand(ip));
-    DeoptimizeIf(ne, instr->environment());
-    __ bind(&no_deopt);
-    MathPowStub stub(MathPowStub::TAGGED);
-    __ CallStub(&stub);
+  if (exponent_type.IsDouble()) {
+    // Prepare arguments and call C function.
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left),
+                               ToDoubleRegister(right));
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
   } else if (exponent_type.IsInteger32()) {
-    MathPowStub stub(MathPowStub::INTEGER);
-    __ CallStub(&stub);
+    ASSERT(ToRegister(right).is(r0));
+    // Prepare arguments and call C function.
+    __ PrepareCallCFunction(1, 1, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(isolate()), 1, 1);
   } else {
-    ASSERT(exponent_type.IsDouble());
-    MathPowStub stub(MathPowStub::DOUBLE);
-    __ CallStub(&stub);
+    ASSERT(exponent_type.IsTagged());
+    ASSERT(instr->hydrogen()->left()->representation().IsDouble());
+
+    Register right_reg = ToRegister(right);
+
+    // Check for smi on the right hand side.
+    Label non_smi, call;
+    __ JumpIfNotSmi(right_reg, &non_smi);
+
+    // Untag smi and convert it to a double.
+    __ SmiUntag(right_reg);
+    SwVfpRegister single_scratch = double_scratch0().low();
+    __ vmov(single_scratch, right_reg);
+    __ vcvt_f64_s32(result_reg, single_scratch);
+    __ jmp(&call);
+
+    // Heap number map check.
+    __ bind(&non_smi);
+    __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(scratch, Operand(ip));
+    DeoptimizeIf(ne, instr->environment());
+    int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
+    __ add(scratch, right_reg, Operand(value_offset));
+    __ vldr(result_reg, scratch, 0);
+
+    // Prepare arguments and call C function.
+    __ bind(&call);
+    __ PrepareCallCFunction(0, 2, scratch);
+    __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 0, 2);
   }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
-  class DeferredDoRandom: public LDeferredCode {
-   public:
-    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LRandom* instr_;
-  };
-
-  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
-
-  // Having marked this instruction as a call we can use any
-  // registers.
-  ASSERT(ToDoubleRegister(instr->result()).is(d7));
-  ASSERT(ToRegister(instr->InputAt(0)).is(r0));
-
-  static const int kSeedSize = sizeof(uint32_t);
-  STATIC_ASSERT(kPointerSize == kSeedSize);
-
-  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
-  static const int kRandomSeedOffset =
-      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
-  __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
-  // r2: FixedArray of the global context's random seeds
-
-  // Load state[0].
-  __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
-  __ cmp(r1, Operand(0));
-  __ b(eq, deferred->entry());
-  // Load state[1].
-  __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
-  // r1: state[0].
-  // r0: state[1].
-
-  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
-  __ and_(r3, r1, Operand(0xFFFF));
-  __ mov(r4, Operand(18273));
-  __ mul(r3, r3, r4);
-  __ add(r1, r3, Operand(r1, LSR, 16));
-  // Save state[0].
-  __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
-
-  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
-  __ and_(r3, r0, Operand(0xFFFF));
-  __ mov(r4, Operand(36969));
-  __ mul(r3, r3, r4);
-  __ add(r0, r3, Operand(r0, LSR, 16));
-  // Save state[1].
-  __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
-
-  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
-  __ and_(r0, r0, Operand(0x3FFFF));
-  __ add(r0, r0, Operand(r1, LSL, 14));
-
-  __ bind(deferred->exit());
-  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
-  // Create this constant using mov/orr to avoid PC relative load.
-  __ mov(r1, Operand(0x41000000));
-  __ orr(r1, r1, Operand(0x300000));
-  // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
-  __ vmov(d7, r0, r1);
-  // Move 0x4130000000000000 to VFP.
-  __ mov(r0, Operand(0, RelocInfo::NONE));
-  __ vmov(d8, r0, r1);
-  // Subtract and store the result in the heap number.
-  __ vsub(d7, d7, d8);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
-  __ PrepareCallCFunction(1, scratch0());
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-  // Return value is in r0.
+  // Store the result in the result register.
+  __ GetCFunctionDoubleResult(result_reg);
 }
 
 
@@ -3361,14 +3063,6 @@
 }
 
 
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(d2));
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(d2));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -3408,9 +3102,6 @@
     case kMathSin:
       DoMathSin(instr);
       break;
-    case kMathTan:
-      DoMathTan(instr);
-      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -3460,12 +3151,12 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(r1));
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ Drop(1);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 }
 
@@ -3485,6 +3176,7 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(r0));
+  __ mov(r1, Operand(instr->target()));
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3493,9 +3185,9 @@
   ASSERT(ToRegister(instr->InputAt(0)).is(r1));
   ASSERT(ToRegister(instr->result()).is(r0));
 
-  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
   __ mov(r0, Operand(instr->arity()));
-  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
@@ -3518,36 +3210,19 @@
   }
 
   // Do the store.
-  HType type = instr->hydrogen()->value()->type();
-  SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ str(value, FieldMemOperand(object, offset));
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
+    if (instr->needs_write_barrier()) {
       // Update the write barrier for the object for in-object properties.
-      __ RecordWriteField(object,
-                          offset,
-                          value,
-                          scratch,
-                          kLRHasBeenSaved,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
+      __ RecordWrite(object, Operand(offset), value, scratch);
     }
   } else {
     __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
     __ str(value, FieldMemOperand(scratch, offset));
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
+    if (instr->needs_write_barrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWriteField(scratch,
-                          offset,
-                          value,
-                          object,
-                          kLRHasBeenSaved,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
+      __ RecordWrite(scratch, Operand(offset), value, object);
     }
   }
 }
@@ -3559,7 +3234,7 @@
 
   // Name is always in r2.
   __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3591,18 +3266,9 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
-    __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ RecordWrite(elements,
-                   key,
-                   value,
-                   kLRHasBeenSaved,
-                   kSaveFPRegs,
-                   EMIT_REMEMBERED_SET,
-                   check_needed);
+    __ add(key, scratch, Operand(FixedArray::kHeaderSize));
+    __ RecordWrite(elements, key, value);
   }
 }
 
@@ -3703,7 +3369,6 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -3718,55 +3383,13 @@
   ASSERT(ToRegister(instr->key()).is(r1));
   ASSERT(ToRegister(instr->value()).is(r0));
 
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
-  Register object_reg = ToRegister(instr->object());
-  Register new_map_reg = ToRegister(instr->new_map_reg());
-  Register scratch = scratch0();
-
-  Handle<Map> from_map = instr->original_map();
-  Handle<Map> to_map = instr->transitioned_map();
-  ElementsKind from_kind = from_map->elements_kind();
-  ElementsKind to_kind = to_map->elements_kind();
-
-  Label not_applicable;
-  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-  __ cmp(scratch, Operand(from_map));
-  __ b(ne, &not_applicable);
-  __ mov(new_map_reg, Operand(to_map));
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    // Write barrier.
-    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
-                        scratch, kLRHasBeenSaved, kDontSaveFPRegs);
-  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      to_kind == FAST_DOUBLE_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(r2));
-    ASSERT(new_map_reg.is(r3));
-    __ mov(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
-             RelocInfo::CODE_TARGET, instr);
-  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(r2));
-    ASSERT(new_map_reg.is(r3));
-    __ mov(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
-             RelocInfo::CODE_TARGET, instr);
-  } else {
-    UNREACHABLE();
-  }
-  __ bind(&not_applicable);
-}
-
-
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   __ push(ToRegister(instr->left()));
   __ push(ToRegister(instr->right()));
@@ -3781,19 +3404,87 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
+  Register string = ToRegister(instr->string());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  StringCharLoadGenerator::Generate(masm(),
-                                    ToRegister(instr->string()),
-                                    ToRegister(instr->index()),
-                                    ToRegister(instr->result()),
-                                    deferred->entry());
+  // Fetch the instance type of the receiver into result register.
+  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ tst(result, Operand(kIsIndirectStringMask));
+  __ b(eq, &check_sequential);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ tst(result, Operand(kSlicedNotConsMask));
+  __ b(eq, &cons_string);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ add(index, index, Operand(result, ASR, kSmiTagSize));
+  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle conses.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+  __ cmp(result, ip);
+  __ b(ne, deferred->entry());
+  // Get the first of the two strings and load its instance type.
+  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ tst(result, Operand(kStringRepresentationMask));
+  __ b(ne, deferred->entry());
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ tst(result, Operand(kStringEncodingMask));
+  __ b(ne, &ascii_string);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  Label done;
+  __ add(result,
+         string,
+         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ ldrh(result, MemOperand(result, index, LSL, 1));
+  __ jmp(&done);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ add(result,
+         string,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ ldrb(result, MemOperand(result, index));
+
+  __ bind(&done);
   __ bind(deferred->exit());
 }
 
@@ -3836,7 +3527,6 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3908,16 +3598,16 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
 
-  Register src = ToRegister(instr->InputAt(0));
-  Register dst = ToRegister(instr->result());
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
 
   DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
-  __ SmiTag(dst, src, SetCC);
+  __ SmiTag(reg, SetCC);
   __ b(vs, deferred->entry());
   __ bind(deferred->exit());
 }
@@ -3925,8 +3615,7 @@
 
 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
   Label slow;
-  Register src = ToRegister(instr->InputAt(0));
-  Register dst = ToRegister(instr->result());
+  Register reg = ToRegister(instr->InputAt(0));
   DoubleRegister dbl_scratch = double_scratch0();
   SwVfpRegister flt_scratch = dbl_scratch.low();
 
@@ -3937,16 +3626,14 @@
   // disagree. Try to allocate a heap number in new space and store
   // the value in there. If that fails, call the runtime system.
   Label done;
-  if (dst.is(src)) {
-    __ SmiUntag(src, dst);
-    __ eor(src, src, Operand(0x80000000));
-  }
-  __ vmov(flt_scratch, src);
+  __ SmiUntag(reg);
+  __ eor(reg, reg, Operand(0x80000000));
+  __ vmov(flt_scratch, reg);
   __ vcvt_f64_s32(dbl_scratch, flt_scratch);
   if (FLAG_inline_new) {
     __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
-    __ Move(dst, r5);
+    if (!reg.is(r5)) __ mov(reg, r5);
     __ b(&done);
   }
 
@@ -3957,16 +3644,16 @@
   // register is stored, as this register is in the pointer map, but contains an
   // integer value.
   __ mov(ip, Operand(0));
-  __ StoreToSafepointRegisterSlot(ip, dst);
+  __ StoreToSafepointRegisterSlot(ip, reg);
   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
-  __ Move(dst, r0);
+  if (!reg.is(r0)) __ mov(reg, r0);
 
   // Done. Put the value in dbl_scratch into the value of the allocated heap
   // number.
   __ bind(&done);
-  __ sub(ip, dst, Operand(kHeapObjectTag));
+  __ sub(ip, reg, Operand(kHeapObjectTag));
   __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
-  __ StoreToSafepointRegisterSlot(dst, dst);
+  __ StoreToSafepointRegisterSlot(reg, reg);
 }
 
 
@@ -3976,7 +3663,6 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -4014,21 +3700,23 @@
 
 
 void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
   ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
-  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
+  __ SmiTag(ToRegister(input));
 }
 
 
 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
+  LOperand* input = instr->InputAt(0);
+  ASSERT(input->IsRegister() && input->Equals(instr->result()));
   if (instr->needs_check()) {
     STATIC_ASSERT(kHeapObjectTag == 1);
     // If the input is a HeapObject, SmiUntag will set the carry flag.
-    __ SmiUntag(result, input, SetCC);
+    __ SmiUntag(ToRegister(input), SetCC);
     DeoptimizeIf(cs, instr->environment());
   } else {
-    __ SmiUntag(result, input);
+    __ SmiUntag(ToRegister(input));
   }
 }
 
@@ -4036,7 +3724,6 @@
 void LCodeGen::EmitNumberUntagD(Register input_reg,
                                 DoubleRegister result_reg,
                                 bool deoptimize_on_undefined,
-                                bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Register scratch = scratch0();
   SwVfpRegister flt_scratch = double_scratch0().low();
@@ -4045,7 +3732,7 @@
   Label load_smi, heap_number, done;
 
   // Smi check.
-  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+  __ JumpIfSmi(input_reg, &load_smi);
 
   // Heap number map check.
   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4072,25 +3759,28 @@
   // Heap number to double register conversion.
   __ sub(ip, input_reg, Operand(kHeapObjectTag));
   __ vldr(result_reg, ip, HeapNumber::kValueOffset);
-  if (deoptimize_on_minus_zero) {
-    __ vmov(ip, result_reg.low());
-    __ cmp(ip, Operand(0));
-    __ b(ne, &done);
-    __ vmov(ip, result_reg.high());
-    __ cmp(ip, Operand(HeapNumber::kSignMask));
-    DeoptimizeIf(eq, env);
-  }
   __ jmp(&done);
 
   // Smi to double register conversion
   __ bind(&load_smi);
-  // scratch: untagged value of input_reg
-  __ vmov(flt_scratch, scratch);
+  __ SmiUntag(input_reg);  // Untag smi before converting to float.
+  __ vmov(flt_scratch, input_reg);
   __ vcvt_f64_s32(result_reg, flt_scratch);
+  __ SmiTag(input_reg);  // Retag smi.
   __ bind(&done);
 }
 
 
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Register input_reg = ToRegister(instr->InputAt(0));
   Register scratch1 = scratch0();
@@ -4173,16 +3863,6 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI: public LDeferredCode {
-   public:
-    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LTaggedToI* instr_;
-  };
-
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -4212,7 +3892,6 @@
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
-                   instr->hydrogen()->deoptimize_on_minus_zero(),
                    instr->environment());
 }
 
@@ -4310,42 +3989,21 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  Register reg = ToRegister(instr->value());
-  Handle<JSFunction> target = instr->hydrogen()->target();
-  if (isolate()->heap()->InNewSpace(*target)) {
-    Register reg = ToRegister(instr->value());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(target);
-    __ mov(ip, Operand(Handle<Object>(cell)));
-    __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
-    __ cmp(reg, ip);
-  } else {
-    __ cmp(reg, Operand(target));
-  }
+  ASSERT(instr->InputAt(0)->IsRegister());
+  Register reg = ToRegister(instr->InputAt(0));
+  __ cmp(reg, Operand(instr->hydrogen()->target()));
   DeoptimizeIf(ne, instr->environment());
 }
 
 
-void LCodeGen::DoCheckMapCommon(Register reg,
-                                Register scratch,
-                                Handle<Map> map,
-                                CompareMapMode mode,
-                                LEnvironment* env) {
-  Label success;
-  __ CompareMap(reg, scratch, map, &success, mode);
-  DeoptimizeIf(ne, env);
-  __ bind(&success);
-}
-
-
 void LCodeGen::DoCheckMap(LCheckMap* instr) {
   Register scratch = scratch0();
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
-                   instr->environment());
+  __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(instr->hydrogen()->map()));
+  DeoptimizeIf(ne, instr->environment());
 }
 
 
@@ -4372,7 +4030,7 @@
   Label is_smi, done, heap_number;
 
   // Both smi and heap number cases are handled.
-  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+  __ JumpIfSmi(input_reg, &is_smi);
 
   // Check for heap number
   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4395,12 +4053,26 @@
 
   // smi
   __ bind(&is_smi);
+  __ SmiUntag(result_reg, input_reg);
   __ ClampUint8(result_reg, result_reg);
 
   __ bind(&done);
 }
 
 
+void LCodeGen::LoadHeapObject(Register result,
+                              Handle<HeapObject> object) {
+  if (heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        factory()->NewJSGlobalPropertyCell(object);
+    __ mov(result, Operand(cell));
+    __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+  } else {
+    __ mov(result, Operand(object));
+  }
+}
+
+
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register temp1 = ToRegister(instr->TempAt(0));
   Register temp2 = ToRegister(instr->TempAt(1));
@@ -4409,127 +4081,31 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  __ LoadHeapObject(temp1, current_prototype);
+  LoadHeapObject(temp1, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
-    DoCheckMapCommon(temp1, temp2,
-                     Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+    __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+    __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
+    DeoptimizeIf(ne, instr->environment());
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    __ LoadHeapObject(temp1, current_prototype);
+    LoadHeapObject(temp1, current_prototype);
   }
 
   // Check the holder map.
-  DoCheckMapCommon(temp1, temp2,
-                   Handle<Map>(current_prototype->map()),
-                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+  __ ldr(temp2, FieldMemOperand(temp1, HeapObject::kMapOffset));
+  __ cmp(temp2, Operand(Handle<Map>(current_prototype->map())));
   DeoptimizeIf(ne, instr->environment());
 }
 
 
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
-  class DeferredAllocateObject: public LDeferredCode {
-   public:
-    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LAllocateObject* instr_;
-  };
-
-  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
-
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->TempAt(0));
-  Register scratch2 = ToRegister(instr->TempAt(1));
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-  Handle<Map> initial_map(constructor->initial_map());
-  int instance_size = initial_map->instance_size();
-  ASSERT(initial_map->pre_allocated_property_fields() +
-         initial_map->unused_property_fields() -
-         initial_map->inobject_properties() == 0);
-
-  // Allocate memory for the object.  The initial map might change when
-  // the constructor's prototype changes, but instance size and property
-  // counts remain unchanged (if slack tracking finished).
-  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
-  __ AllocateInNewSpace(instance_size,
-                        result,
-                        scratch,
-                        scratch2,
-                        deferred->entry(),
-                        TAG_OBJECT);
-
-  // Load the initial map.
-  Register map = scratch;
-  __ LoadHeapObject(map, constructor);
-  __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // Initialize map and fields of the newly allocated object.
-  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
-  __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
-  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
-  __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
-  __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
-  if (initial_map->inobject_properties() != 0) {
-    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
-    for (int i = 0; i < initial_map->inobject_properties(); i++) {
-      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
-      __ str(scratch, FieldMemOperand(result, property_offset));
-    }
-  }
-
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
-  Register result = ToRegister(instr->result());
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ mov(result, Operand(0));
-
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-  __ LoadHeapObject(r0, constructor);
-  __ push(r0);
-  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
-  __ StoreToSafepointRegisterSlot(r0, result);
-}
-
-
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  Heap* heap = isolate()->heap();
-  ElementsKind boilerplate_elements_kind =
-      instr->hydrogen()->boilerplate_elements_kind();
-
-  // Deopt if the array literal boilerplate ElementsKind is of a type different
-  // than the expected one. The check isn't necessary if the boilerplate has
-  // already been converted to FAST_ELEMENTS.
-  if (boilerplate_elements_kind != FAST_ELEMENTS) {
-    __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
-    // Load map into r2.
-    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-    // Load the map's "bit field 2".
-    __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
-    __ cmp(r2, Operand(boilerplate_elements_kind));
-    DeoptimizeIf(ne, instr->environment());
-  }
-
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  // Boilerplate already exists, constant elements are never accessed.
-  // Pass an empty fixed array.
-  __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
+  __ mov(r1, Operand(instr->hydrogen()->constant_elements()));
   __ Push(r3, r2, r1);
 
   // Pick the right runtime function or stub to call.
@@ -4546,164 +4122,26 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
-            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
-                            Register result,
-                            Register source,
-                            int* offset) {
-  ASSERT(!source.is(r2));
-  ASSERT(!result.is(r2));
-
-  // Only elements backing stores for non-COW arrays need to be copied.
-  Handle<FixedArrayBase> elements(object->elements());
-  bool has_elements = elements->length() > 0 &&
-      elements->map() != isolate()->heap()->fixed_cow_array_map();
-
-  // Increase the offset so that subsequent objects end up right after
-  // this object and its backing store.
-  int object_offset = *offset;
-  int object_size = object->map()->instance_size();
-  int elements_offset = *offset + object_size;
-  int elements_size = has_elements ? elements->Size() : 0;
-  *offset += object_size + elements_size;
-
-  // Copy object header.
-  ASSERT(object->properties()->length() == 0);
-  int inobject_properties = object->map()->inobject_properties();
-  int header_size = object_size - inobject_properties * kPointerSize;
-  for (int i = 0; i < header_size; i += kPointerSize) {
-    if (has_elements && i == JSObject::kElementsOffset) {
-      __ add(r2, result, Operand(elements_offset));
-    } else {
-      __ ldr(r2, FieldMemOperand(source, i));
-    }
-    __ str(r2, FieldMemOperand(result, object_offset + i));
-  }
-
-  // Copy in-object properties.
-  for (int i = 0; i < inobject_properties; i++) {
-    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
-    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
-    if (value->IsJSObject()) {
-      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-      __ add(r2, result, Operand(*offset));
-      __ str(r2, FieldMemOperand(result, total_offset));
-      __ LoadHeapObject(source, value_object);
-      EmitDeepCopy(value_object, result, source, offset);
-    } else if (value->IsHeapObject()) {
-      __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
-      __ str(r2, FieldMemOperand(result, total_offset));
-    } else {
-      __ mov(r2, Operand(value));
-      __ str(r2, FieldMemOperand(result, total_offset));
-    }
-  }
-
-  if (has_elements) {
-    // Copy elements backing store header.
-    __ LoadHeapObject(source, elements);
-    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
-      __ ldr(r2, FieldMemOperand(source, i));
-      __ str(r2, FieldMemOperand(result, elements_offset + i));
-    }
-
-    // Copy elements backing store content.
-    int elements_length = has_elements ? elements->length() : 0;
-    if (elements->IsFixedDoubleArray()) {
-      Handle<FixedDoubleArray> double_array =
-          Handle<FixedDoubleArray>::cast(elements);
-      for (int i = 0; i < elements_length; i++) {
-        int64_t value = double_array->get_representation(i);
-        // We only support little endian mode...
-        int32_t value_low = value & 0xFFFFFFFF;
-        int32_t value_high = value >> 32;
-        int total_offset =
-            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
-        __ mov(r2, Operand(value_low));
-        __ str(r2, FieldMemOperand(result, total_offset));
-        __ mov(r2, Operand(value_high));
-        __ str(r2, FieldMemOperand(result, total_offset + 4));
-      }
-    } else if (elements->IsFixedArray()) {
-      for (int i = 0; i < elements_length; i++) {
-        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
-        if (value->IsJSObject()) {
-          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-          __ add(r2, result, Operand(*offset));
-          __ str(r2, FieldMemOperand(result, total_offset));
-          __ LoadHeapObject(source, value_object);
-          EmitDeepCopy(value_object, result, source, offset);
-        } else if (value->IsHeapObject()) {
-          __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
-          __ str(r2, FieldMemOperand(result, total_offset));
-        } else {
-          __ mov(r2, Operand(value));
-          __ str(r2, FieldMemOperand(result, total_offset));
-        }
-      }
-    } else {
-      UNREACHABLE();
-    }
-  }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
-  int size = instr->hydrogen()->total_size();
-
-  // Allocate all objects that are part of the literal in one big
-  // allocation. This avoids multiple limit checks.
-  Label allocated, runtime_allocate;
-  __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
-  __ jmp(&allocated);
-
-  __ bind(&runtime_allocate);
-  __ mov(r0, Operand(Smi::FromInt(size)));
-  __ push(r0);
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
-  __ bind(&allocated);
-  int offset = 0;
-  __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
-  EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
-  ASSERT_EQ(size, offset);
-}
-
-
 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
-  Handle<FixedArray> literals(instr->environment()->closure()->literals());
-  Handle<FixedArray> constant_properties =
-      instr->hydrogen()->constant_properties();
-
-  // Set up the parameters to the stub/runtime call.
-  __ LoadHeapObject(r4, literals);
+  __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r4, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
   __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ mov(r2, Operand(constant_properties));
-  int flags = instr->hydrogen()->fast_elements()
-      ? ObjectLiteral::kFastElements
-      : ObjectLiteral::kNoFlags;
-  __ mov(r1, Operand(Smi::FromInt(flags)));
+  __ mov(r2, Operand(instr->hydrogen()->constant_properties()));
+  __ mov(r1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
   __ Push(r4, r3, r2, r1);
 
-  // Pick the right runtime function or stub to call.
-  int properties_count = constant_properties->length() / 2;
+  // Pick the right runtime function to call.
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   }
 }
 
@@ -4776,7 +4214,8 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(shared_info->language_mode());
+    FastNewClosureStub stub(
+        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ mov(r1, Operand(shared_info));
     __ push(r1);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -4809,9 +4248,8 @@
                                                   false_label,
                                                   input,
                                                   instr->type_literal());
-  if (final_branch_condition != kNoCondition) {
-    EmitBranch(true_block, false_block, final_branch_condition);
-  }
+
+  EmitBranch(true_block, false_block, final_branch_condition);
 }
 
 
@@ -4857,12 +4295,10 @@
     final_branch_condition = ne;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
-    __ b(eq, true_label);
-    __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
-    final_branch_condition = eq;
+    __ CompareObjectType(input, input, scratch,
+                         FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+    final_branch_condition = ge;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4881,7 +4317,9 @@
     final_branch_condition = eq;
 
   } else {
+    final_branch_condition = ne;
     __ b(false_label);
+    // A dead branch instruction will be generated after this point.
   }
 
   return final_branch_condition;
@@ -4992,7 +4430,6 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
@@ -5049,88 +4486,6 @@
 }
 
 
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, ip);
-  DeoptimizeIf(eq, instr->environment());
-
-  Register null_value = r5;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ cmp(r0, null_value);
-  DeoptimizeIf(eq, instr->environment());
-
-  __ tst(r0, Operand(kSmiTagMask));
-  DeoptimizeIf(eq, instr->environment());
-
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
-  DeoptimizeIf(le, instr->environment());
-
-  Label use_cache, call_runtime;
-  __ CheckEnumCache(null_value, &call_runtime);
-
-  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ b(&use_cache);
-
-  // Get the set of properties to enumerate.
-  __ bind(&call_runtime);
-  __ push(r0);
-  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
-  __ cmp(r1, ip);
-  DeoptimizeIf(ne, instr->environment());
-  __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
-  Register map = ToRegister(instr->map());
-  Register result = ToRegister(instr->result());
-  __ LoadInstanceDescriptors(map, result);
-  __ ldr(result,
-         FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
-  __ ldr(result,
-         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
-  __ cmp(result, Operand(0));
-  DeoptimizeIf(eq, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
-  Register object = ToRegister(instr->value());
-  Register map = ToRegister(instr->map());
-  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
-  __ cmp(map, scratch0());
-  DeoptimizeIf(ne, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  Register object = ToRegister(instr->object());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-
-  Label out_of_object, done;
-  __ cmp(index, Operand(0));
-  __ b(lt, &out_of_object);
-
-  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
-  __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
-  __ b(&done);
-
-  __ bind(&out_of_object);
-  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
-  // Index is equal to negated out of object property index plus 1.
-  __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ ldr(result, FieldMemOperand(scratch,
-                                 FixedArray::kHeaderSize - kPointerSize));
-  __ bind(&done);
-}
 
 
 #undef __
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index adb6e1b..0e34c9f 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -87,15 +87,11 @@
                                         SwVfpRegister flt_scratch,
                                         DoubleRegister dbl_scratch);
   int ToInteger32(LConstantOperand* op) const;
-  double ToDouble(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op);
   MemOperand ToMemOperand(LOperand* op) const;
   // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
   MemOperand ToHighMemOperand(LOperand* op) const;
 
-  bool IsInteger32(LConstantOperand* op) const;
-  Handle<Object> ToHandle(LConstantOperand* op) const;
-
   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
   // code generation attempt succeeded.
@@ -114,16 +110,11 @@
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
   void DoDeferredStackCheck(LStackCheck* instr);
-  void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
-  void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
-
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
   void DoGap(LGap* instr);
@@ -149,8 +140,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  StrictModeFlag strict_mode_flag() const {
-    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+  int strict_mode_flag() const {
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -216,7 +207,7 @@
                                LInstruction* instr);
 
   // Generate a direct call to a known function.  Expects the function
-  // to be in r1.
+  // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
@@ -250,7 +241,6 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
-  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -272,19 +262,17 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
                         DoubleRegister result,
                         bool deoptimize_on_undefined,
-                        bool deoptimize_on_minus_zero,
                         LEnvironment* env);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label,
-                         Label* false_label,
-                         Register input,
-                         Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -294,13 +282,6 @@
                          Label* is_not_object,
                          Label* is_object);
 
-  // Emits optimized code for %_IsString(x).  Preserves input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitIsString(Register input,
-                         Register temp1,
-                         Label* is_not_string);
-
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp1, Register temp2);
@@ -310,13 +291,6 @@
                                        Handle<Map> type,
                                        Handle<String> name);
 
-  // Emits optimized code to deep-copy the contents of statically known
-  // object graphs (e.g. object literal boilerplate).
-  void EmitDeepCopy(Handle<JSObject> object,
-                    Register result,
-                    Register source,
-                    int* offset);
-
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
@@ -404,20 +378,16 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen),
-        external_exit_(NULL),
-        instruction_index_(codegen->current_instruction_) {
+      : codegen_(codegen), external_exit_(NULL) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
-  virtual LInstruction* instr() = 0;
 
-  void SetExit(Label* exit) { external_exit_ = exit; }
+  void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -428,7 +398,6 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
-  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index cefca47..1cfdc79 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -245,24 +245,13 @@
     }
 
   } else if (source->IsConstantOperand()) {
-    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    Operand source_operand = cgen_->ToOperand(source);
     if (destination->IsRegister()) {
-      Register dst = cgen_->ToRegister(destination);
-      if (cgen_->IsInteger32(constant_source)) {
-        __ mov(dst, Operand(cgen_->ToInteger32(constant_source)));
-      } else {
-        __ LoadObject(dst, cgen_->ToHandle(constant_source));
-      }
+      __ mov(cgen_->ToRegister(destination), source_operand);
     } else {
       ASSERT(destination->IsStackSlot());
       ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
-      if (cgen_->IsInteger32(constant_source)) {
-        __ mov(kSavedValueRegister,
-               Operand(cgen_->ToInteger32(constant_source)));
-      } else {
-        __ LoadObject(kSavedValueRegister,
-                      cgen_->ToHandle(constant_source));
-      }
+      __ mov(kSavedValueRegister, source_operand);
       __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
     }
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 857c2bf..7a1f802 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,8 +42,7 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      has_frame_(false) {
+      allow_stub_calls_(true) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -407,16 +406,29 @@
 }
 
 
-void MacroAssembler::LoadHeapObject(Register result,
-                                    Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    mov(result, Operand(cell));
-    ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
-  } else {
-    mov(result, Operand(object));
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register address,
+                                       Register scratch) {
+  if (emit_debug_code()) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, ne, &not_in_new_space);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
   }
+
+  // Calculate page address.
+  Bfc(object, 0, kPageSizeBits);
+
+  // Calculate region number.
+  Ubfx(address, address, Page::kRegionSizeLog2,
+       kPageSizeBits - Page::kRegionSizeLog2);
+
+  // Mark region dirty.
+  ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+  mov(ip, Operand(1));
+  orr(scratch, scratch, Operand(ip, LSL, address));
+  str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
 }
 
 
@@ -431,52 +443,38 @@
 }
 
 
-void MacroAssembler::RecordWriteField(
-    Register object,
-    int offset,
-    Register value,
-    Register dst,
-    LinkRegisterStatus lr_status,
-    SaveFPRegsMode save_fp,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check) {
-  // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis.
+// Will clobber 4 registers: object, offset, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+                                 Operand offset,
+                                 Register scratch0,
+                                 Register scratch1) {
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are cp.
+  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
+
   Label done;
 
-  // Skip barrier if writing a smi.
-  if (smi_check == INLINE_SMI_CHECK) {
-    JumpIfSmi(value, &done);
-  }
+  // First, test that the object is not in the new space.  We cannot set
+  // region marks for new space pages.
+  InNewSpace(object, scratch0, eq, &done);
 
-  // Although the object register is tagged, the offset is relative to the start
-  // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  // Add offset into the object.
+  add(scratch0, object, offset);
 
-  add(dst, object, Operand(offset - kHeapObjectTag));
-  if (emit_debug_code()) {
-    Label ok;
-    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
-    b(eq, &ok);
-    stop("Unaligned cell in write barrier");
-    bind(&ok);
-  }
-
-  RecordWrite(object,
-              dst,
-              value,
-              lr_status,
-              save_fp,
-              remembered_set_action,
-              OMIT_SMI_CHECK);
+  // Record the actual write.
+  RecordWriteHelper(object, scratch0, scratch1);
 
   bind(&done);
 
-  // Clobber clobbered input registers when running with the debug-code flag
+  // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
-    mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+    mov(object, Operand(BitCast<int32_t>(kZapValue)));
+    mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+    mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
   }
 }
 
@@ -486,100 +484,29 @@
 // tag is shifted away.
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value,
-                                 LinkRegisterStatus lr_status,
-                                 SaveFPRegsMode fp_mode,
-                                 RememberedSetAction remembered_set_action,
-                                 SmiCheck smi_check) {
+                                 Register scratch) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are cp.
-  ASSERT(!address.is(cp) && !value.is(cp));
-
-  if (emit_debug_code()) {
-    ldr(ip, MemOperand(address));
-    cmp(ip, value);
-    Check(eq, "Wrong address or value passed to RecordWrite");
-  }
+  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
 
   Label done;
 
-  if (smi_check == INLINE_SMI_CHECK) {
-    ASSERT_EQ(0, kSmiTag);
-    tst(value, Operand(kSmiTagMask));
-    b(eq, &done);
-  }
-
-  CheckPageFlag(value,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersToHereAreInterestingMask,
-                eq,
-                &done);
-  CheckPageFlag(object,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersFromHereAreInterestingMask,
-                eq,
-                &done);
+  // First, test that the object is not in the new space.  We cannot set
+  // region marks for new space pages.
+  InNewSpace(object, scratch, eq, &done);
 
   // Record the actual write.
-  if (lr_status == kLRHasNotBeenSaved) {
-    push(lr);
-  }
-  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
-  CallStub(&stub);
-  if (lr_status == kLRHasNotBeenSaved) {
-    pop(lr);
-  }
+  RecordWriteHelper(object, address, scratch);
 
   bind(&done);
 
-  // Clobber clobbered registers when running with the debug-code flag
+  // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
-    mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
-  }
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
-                                         Register address,
-                                         Register scratch,
-                                         SaveFPRegsMode fp_mode,
-                                         RememberedSetFinalAction and_then) {
-  Label done;
-  if (emit_debug_code()) {
-    Label ok;
-    JumpIfNotInNewSpace(object, scratch, &ok);
-    stop("Remembered set pointer is in new space");
-    bind(&ok);
-  }
-  // Load store buffer top.
-  ExternalReference store_buffer =
-      ExternalReference::store_buffer_top(isolate());
-  mov(ip, Operand(store_buffer));
-  ldr(scratch, MemOperand(ip));
-  // Store pointer to buffer and increment buffer top.
-  str(address, MemOperand(scratch, kPointerSize, PostIndex));
-  // Write back new top of buffer.
-  str(scratch, MemOperand(ip));
-  // Call stub on end of buffer.
-  // Check for end of buffer.
-  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
-  if (and_then == kFallThroughAtEnd) {
-    b(eq, &done);
-  } else {
-    ASSERT(and_then == kReturnAtEnd);
-    Ret(eq);
-  }
-  push(lr);
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(fp_mode);
-  CallStub(&store_buffer_overflow);
-  pop(lr);
-  bind(&done);
-  if (and_then == kReturnAtEnd) {
-    Ret();
+    mov(object, Operand(BitCast<int32_t>(kZapValue)));
+    mov(address, Operand(BitCast<int32_t>(kZapValue)));
+    mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
   }
 }
 
@@ -817,12 +744,12 @@
 
 
 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
-  // Set up the frame structure on the stack.
+  // Setup the frame structure on the stack.
   ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
   ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
   ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   Push(lr, fp);
-  mov(fp, Operand(sp));  // Set up new frame pointer.
+  mov(fp, Operand(sp));  // Setup new frame pointer.
   // Reserve room for saved entry sp and code object.
   sub(sp, sp, Operand(2 * kPointerSize));
   if (emit_debug_code()) {
@@ -957,12 +884,10 @@
                                     Handle<Code> code_constant,
                                     Register code_reg,
                                     Label* done,
-                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
-  *definitely_mismatches = false;
   Label regular_invoke;
 
   // Check whether the expected and actual arguments count match. If not,
@@ -993,7 +918,6 @@
         // arguments.
         definitely_matches = true;
       } else {
-        *definitely_mismatches = true;
         mov(r2, Operand(expected.immediate()));
       }
     }
@@ -1021,9 +945,7 @@
       SetCallKind(r5, call_kind);
       Call(adaptor);
       call_wrapper.AfterCall();
-      if (!*definitely_mismatches) {
-        b(done);
-      }
+      b(done);
     } else {
       SetCallKind(r5, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -1039,30 +961,24 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
-  bool definitely_mismatches = false;
-  InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, &definitely_mismatches, flag,
-                 call_wrapper, call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      call_wrapper.BeforeCall(CallSize(code));
-      SetCallKind(r5, call_kind);
-      Call(code);
-      call_wrapper.AfterCall();
-    } else {
-      ASSERT(flag == JUMP_FUNCTION);
-      SetCallKind(r5, call_kind);
-      Jump(code);
-    }
 
-    // Continue here if InvokePrologue does handle the invocation due to
-    // mismatched parameter counts.
-    bind(&done);
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+                 call_wrapper, call_kind);
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(r5, call_kind);
+    Call(code);
+    call_wrapper.AfterCall();
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(r5, call_kind);
+    Jump(code);
   }
+
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
 }
 
 
@@ -1072,27 +988,21 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
-  bool definitely_mismatches = false;
-  InvokePrologue(expected, actual, code, no_reg,
-                 &done, &definitely_mismatches, flag,
-                 NullCallWrapper(), call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      SetCallKind(r5, call_kind);
-      Call(code, rmode);
-    } else {
-      SetCallKind(r5, call_kind);
-      Jump(code, rmode);
-    }
 
-    // Continue here if InvokePrologue does handle the invocation due to
-    // mismatched parameter counts.
-    bind(&done);
+  InvokePrologue(expected, actual, code, no_reg, &done, flag,
+                 NullCallWrapper(), call_kind);
+  if (flag == CALL_FUNCTION) {
+    SetCallKind(r5, call_kind);
+    Call(code, rmode);
+  } else {
+    SetCallKind(r5, call_kind);
+    Jump(code, rmode);
   }
+
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
 }
 
 
@@ -1101,9 +1011,6 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   // Contract with called JS functions requires that function is passed in r1.
   ASSERT(fun.is(r1));
 
@@ -1124,24 +1031,28 @@
 }
 
 
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  ASSERT(function->is_compiled());
 
   // Get the function and setup the context.
-  LoadHeapObject(r1, function);
+  mov(r1, Operand(Handle<JSFunction>(function)));
   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
+  // Invoke the cached code.
+  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-  InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
+  } else {
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
+  }
 }
 
 
@@ -1179,49 +1090,56 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
+  ASSERT(allow_stub_calls());
   mov(r0, Operand(0, RelocInfo::NONE));
   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(1);
-  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif
 
 
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
-                                    int handler_index) {
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
 
-  // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
-  // We will build up the handler from the bottom by pushing on the stack.
-  // Set up the code object (r5) and the state (r6) for pushing.
-  unsigned state =
-      StackHandler::IndexField::encode(handler_index) |
-      StackHandler::KindField::encode(kind);
-  mov(r5, Operand(CodeObject()));
-  mov(r6, Operand(state));
-
-  // Push the frame pointer, context, state, and code object.
-  if (kind == StackHandler::JS_ENTRY) {
-    mov(r7, Operand(Smi::FromInt(0)));  // Indicates no context.
-    mov(ip, Operand(0, RelocInfo::NONE));  // NULL frame pointer.
-    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
+  // The pc (return address) is passed in register lr.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      mov(r3, Operand(StackHandler::TRY_CATCH));
+    } else {
+      mov(r3, Operand(StackHandler::TRY_FINALLY));
+    }
+    stm(db_w, sp, r3.bit() | cp.bit() | fp.bit() | lr.bit());
+    // Save the current handler as the next handler.
+    mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+    ldr(r1, MemOperand(r3));
+    push(r1);
+    // Link this handler as the new current one.
+    str(sp, MemOperand(r3));
   } else {
-    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
+    // Must preserve r0-r4, r5-r7 are available.
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for fp. We expect the code throwing an exception to check fp
+    // before dereferencing it to restore the context.
+    mov(r5, Operand(StackHandler::ENTRY));  // State.
+    mov(r6, Operand(Smi::FromInt(0)));  // Indicates no context.
+    mov(r7, Operand(0, RelocInfo::NONE));  // NULL frame pointer.
+    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | lr.bit());
+    // Save the current handler as the next handler.
+    mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+    ldr(r6, MemOperand(r7));
+    push(r6);
+    // Link this handler as the new current one.
+    str(sp, MemOperand(r7));
   }
-
-  // Link the current handler as the next handler.
-  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-  ldr(r5, MemOperand(r6));
-  push(r5);
-  // Set this new handler as the current one.
-  str(sp, MemOperand(r6));
 }
 
 
@@ -1234,90 +1152,111 @@
 }
 
 
-void MacroAssembler::JumpToHandlerEntry() {
-  // Compute the handler entry address and jump to it.  The handler table is
-  // a fixed array of (smi-tagged) code offsets.
-  // r0 = exception, r1 = code object, r2 = state.
-  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
-  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
-  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
-  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
-  add(pc, r1, Operand(r2, ASR, kSmiTagSize));  // Jump.
-}
-
-
 void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in r0.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  // r0 is expected to hold the exception.
   if (!value.is(r0)) {
     mov(r0, value);
   }
-  // Drop the stack pointer to the top of the top handler.
+
+  // Drop the sp to the top of the handler.
   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   ldr(sp, MemOperand(r3));
+
   // Restore the next handler.
   pop(r2);
   str(r2, MemOperand(r3));
 
-  // Get the code object (r1) and state (r2).  Restore the context and frame
-  // pointer.
-  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
+  // Restore context and frame pointer, discard state (r3).
+  ldm(ia_w, sp, r3.bit() | cp.bit() | fp.bit());
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
-  // or cp.
-  tst(cp, cp);
+  // (r3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
+  // of them.
+  cmp(r3, Operand(StackHandler::ENTRY));
   str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
 
-  JumpToHandlerEntry();
+#ifdef DEBUG
+  if (emit_debug_code()) {
+    mov(lr, Operand(pc));
+  }
+#endif
+  pop(pc);
 }
 
 
-void MacroAssembler::ThrowUncatchable(Register value) {
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+                                      Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in r0.
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  // r0 is expected to hold the exception.
   if (!value.is(r0)) {
     mov(r0, value);
   }
-  // Drop the stack pointer to the top of the top stack handler.
+
+  // Drop sp to the top stack handler.
   mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   ldr(sp, MemOperand(r3));
 
   // Unwind the handlers until the ENTRY handler is found.
-  Label fetch_next, check_kind;
-  jmp(&check_kind);
-  bind(&fetch_next);
-  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+  Label loop, done;
+  bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  ldr(r2, MemOperand(sp, kStateOffset));
+  cmp(r2, Operand(StackHandler::ENTRY));
+  b(eq, &done);
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  ldr(sp, MemOperand(sp, kNextOffset));
+  jmp(&loop);
+  bind(&done);
 
-  bind(&check_kind);
-  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
-  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
-  tst(r2, Operand(StackHandler::KindField::kMask));
-  b(ne, &fetch_next);
-
-  // Set the top handler address to next handler past the top ENTRY handler.
+  // Set the top handler address to next handler past the current ENTRY handler.
   pop(r2);
   str(r2, MemOperand(r3));
-  // Get the code object (r1) and state (r2).  Clear the context and frame
-  // pointer (0 was saved in the handler).
-  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
 
-  JumpToHandlerEntry();
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(
+        Isolate::kExternalCaughtExceptionAddress, isolate());
+    mov(r0, Operand(false, RelocInfo::NONE));
+    mov(r2, Operand(external_caught));
+    str(r0, MemOperand(r2));
+
+    // Set pending exception and r0 to out of memory exception.
+    Failure* out_of_memory = Failure::OutOfMemoryException();
+    mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+    mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      isolate())));
+    str(r0, MemOperand(r2));
+  }
+
+  // Stack layout at this point. See also StackHandlerConstants.
+  // sp ->   state (ENTRY)
+  //         cp
+  //         fp
+  //         lr
+
+  // Restore context and frame pointer, discard state (r2).
+  ldm(ia_w, sp, r2.bit() | cp.bit() | fp.bit());
+#ifdef DEBUG
+  if (emit_debug_code()) {
+    mov(lr, Operand(pc));
+  }
+#endif
+  pop(pc);
 }
 
 
@@ -1419,9 +1358,8 @@
   // hash = hash ^ (hash >> 4);
   eor(t0, t0, Operand(t0, LSR, 4));
   // hash = hash * 2057;
-  mov(scratch, Operand(t0, LSL, 11));
-  add(t0, t0, Operand(t0, LSL, 3));
-  add(t0, t0, scratch);
+  mov(scratch, Operand(2057));
+  mul(t0, t0, scratch);
   // hash = hash ^ (hash >> 16);
   eor(t0, t0, Operand(t0, LSR, 16));
 }
@@ -1610,7 +1548,6 @@
   ASSERT(!result.is(scratch1));
   ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
-  ASSERT(!object_size.is(ip));
   ASSERT(!result.is(ip));
   ASSERT(!scratch1.is(ip));
   ASSERT(!scratch2.is(ip));
@@ -1868,170 +1805,25 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
   b(hi, fail);
 }
 
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
-  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
-  b(ls, fail);
-  cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
-  b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
-                                              Register scratch,
-                                              Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
-  b(hi, fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
-                                                 Register key_reg,
-                                                 Register receiver_reg,
-                                                 Register elements_reg,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Register scratch3,
-                                                 Register scratch4,
-                                                 Label* fail) {
-  Label smi_value, maybe_nan, have_double_value, is_nan, done;
-  Register mantissa_reg = scratch2;
-  Register exponent_reg = scratch3;
-
-  // Handle smi values specially.
-  JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  CheckMap(value_reg,
-           scratch1,
-           isolate()->factory()->heap_number_map(),
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
-  // in the exponent.
-  mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
-  ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
-  cmp(exponent_reg, scratch1);
-  b(ge, &maybe_nan);
-
-  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
-  bind(&have_double_value);
-  add(scratch1, elements_reg,
-      Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  str(exponent_reg, FieldMemOperand(scratch1, offset));
-  jmp(&done);
-
-  bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  b(gt, &is_nan);
-  ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-  cmp(mantissa_reg, Operand(0));
-  b(eq, &have_double_value);
-  bind(&is_nan);
-  // Load canonical NaN for storing into the double array.
-  uint64_t nan_int64 = BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-  mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
-  mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
-  jmp(&have_double_value);
-
-  bind(&smi_value);
-  add(scratch1, elements_reg,
-      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  add(scratch1, scratch1,
-      Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
-  // scratch1 is now effective address of the double element
-
-  FloatingPointHelper::Destination destination;
-  if (CpuFeatures::IsSupported(VFP3)) {
-    destination = FloatingPointHelper::kVFPRegisters;
-  } else {
-    destination = FloatingPointHelper::kCoreRegisters;
-  }
-
-  Register untagged_value = receiver_reg;
-  SmiUntag(untagged_value, value_reg);
-  FloatingPointHelper::ConvertIntToDouble(this,
-                                          untagged_value,
-                                          destination,
-                                          d0,
-                                          mantissa_reg,
-                                          exponent_reg,
-                                          scratch4,
-                                          s2);
-  if (destination == FloatingPointHelper::kVFPRegisters) {
-    CpuFeatures::Scope scope(VFP3);
-    vstr(d0, scratch1, 0);
-  } else {
-    str(mantissa_reg, MemOperand(scratch1, 0));
-    str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
-  }
-  bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
-                                Register scratch,
-                                Handle<Map> map,
-                                Label* early_success,
-                                CompareMapMode mode) {
-  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  cmp(scratch, Operand(map));
-  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
-    Map* transitioned_fast_element_map(
-        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
-    ASSERT(transitioned_fast_element_map == NULL ||
-           map->elements_kind() != FAST_ELEMENTS);
-    if (transitioned_fast_element_map != NULL) {
-      b(eq, early_success);
-      cmp(scratch, Operand(Handle<Map>(transitioned_fast_element_map)));
-    }
-
-    Map* transitioned_double_map(
-        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
-    ASSERT(transitioned_double_map == NULL ||
-           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
-    if (transitioned_double_map != NULL) {
-      b(eq, early_success);
-      cmp(scratch, Operand(Handle<Map>(transitioned_double_map)));
-    }
-  }
-}
-
-
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type,
-                              CompareMapMode mode) {
+                              SmiCheckType smi_check_type) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-
-  Label success;
-  CompareMap(obj, scratch, map, &success, mode);
+  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  mov(ip, Operand(map));
+  cmp(scratch, ip);
   b(ne, fail);
-  bind(&success);
 }
 
 
@@ -2070,8 +1862,7 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss,
-                                             bool miss_on_bound_function) {
+                                             Label* miss) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -2079,16 +1870,6 @@
   CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
   b(ne, miss);
 
-  if (miss_on_bound_function) {
-    ldr(scratch,
-        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-    ldr(scratch,
-        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
-    tst(scratch,
-        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
-    b(ne, miss);
-  }
-
   // Make sure that the function has an instance prototype.
   Label non_instance;
   ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -2126,24 +1907,47 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
-  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
 }
 
 
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Handle<Code> code(Code::cast(result));
+  Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
+  return result;
+}
+
+
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
+  return result;
+}
+
+
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
-                                              int stack_space) {
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ExternalReference function, int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -2206,10 +2010,14 @@
   mov(pc, lr);
 
   bind(&promote_scheduled_exception);
-  TailCallExternalReference(
-      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
-      0,
-      1);
+  MaybeObject* result
+      = TryTailCallExternalReference(
+          ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+          0,
+          1);
+  if (result->IsFailure()) {
+    return result;
+  }
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -2221,12 +2029,8 @@
       ExternalReference::delete_handle_scope_extensions(isolate()), 1);
   mov(r0, r4);
   jmp(&leave_exit_frame);
-}
 
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
-  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
-  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+  return result;
 }
 
 
@@ -2374,7 +2178,7 @@
     b(gt, not_int32);
 
     // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
     // it rounds to zero.
     const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
     sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
@@ -2625,7 +2429,8 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   mov(r0, Operand(function->nargs));
   mov(r1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1, kSaveFPRegs);
+  CEntryStub stub(1);
+  stub.SaveDoubles();
   CallStub(&stub);
 }
 
@@ -2652,6 +2457,17 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  mov(r0, Operand(num_arguments));
+  return TryJumpToExternalReference(ext);
+}
+
+
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -2672,12 +2488,21 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& builtin) {
+#if defined(__thumb__)
+  // Thumb mode builtin.
+  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+  mov(r1, Operand(builtin));
+  CEntryStub stub(1);
+  return TryTailCallStub(&stub);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   GetBuiltinEntry(r2, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(r2));
@@ -2809,20 +2634,14 @@
     RecordComment(msg);
   }
 #endif
+  // Disable stub call restrictions to always allow calls to abort.
+  AllowStubCallsScope allow_scope(this, true);
 
   mov(r0, Operand(p0));
   push(r0);
   mov(r0, Operand(Smi::FromInt(p1 - p0)));
   push(r0);
-  // Disable stub call restrictions to always allow calls to abort.
-  if (!has_frame_) {
-    // We don't actually want to generate a pile of code for this, so just
-    // claim there is a stack frame, without generating one.
-    FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 2);
-  } else {
-    CallRuntime(Runtime::kAbort, 2);
-  }
+  CallRuntime(Runtime::kAbort, 2);
   // will not return here
   if (is_const_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -2854,47 +2673,6 @@
 }
 
 
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  // Load the global or builtins object from the current context.
-  ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
-
-  // Check that the function's map is the same as the expected cached map.
-  int expected_index =
-      Context::GetContextMapIndexFromElementsKind(expected_kind);
-  ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
-  cmp(map_in_out, ip);
-  b(ne, no_map_match);
-
-  // Use the transitioned cached map.
-  int trans_index =
-      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
-  ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
-    Register function_in, Register scratch, Register map_out) {
-  ASSERT(!function_in.is(map_out));
-  Label done;
-  ldr(map_out, FieldMemOperand(function_in,
-                               JSFunction::kPrototypeOrInitialMapOffset));
-  if (!FLAG_smi_only_arrays) {
-    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                        FAST_ELEMENTS,
-                                        map_out,
-                                        scratch,
-                                        &done);
-  }
-  bind(&done);
-}
-
-
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the global or builtins object from the current context.
   ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -2955,22 +2733,6 @@
 }
 
 
-void MacroAssembler::UntagAndJumpIfSmi(
-    Register dst, Register src, Label* smi_case) {
-  STATIC_ASSERT(kSmiTag == 0);
-  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
-  b(cc, smi_case);  // Shifter carry is not set for a smi.
-}
-
-
-void MacroAssembler::UntagAndJumpIfNotSmi(
-    Register dst, Register src, Label* non_smi_case) {
-  STATIC_ASSERT(kSmiTag == 0);
-  mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
-  b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
-}
-
-
 void MacroAssembler::JumpIfEitherSmi(Register reg1,
                                      Register reg2,
                                      Label* on_either_smi) {
@@ -3180,19 +2942,6 @@
 }
 
 
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
-                                                Register end_offset,
-                                                Register filler) {
-  Label loop, entry;
-  b(&entry);
-  bind(&loop);
-  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
-  bind(&entry);
-  cmp(start_offset, end_offset);
-  b(lt, &loop);
-}
-
-
 void MacroAssembler::CountLeadingZeros(Register zeros,   // Answer.
                                        Register source,  // Input.
                                        Register scratch) {
@@ -3204,10 +2953,8 @@
 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
   clz(zeros, source);  // This instruction is only supported after ARM5.
 #else
-  // Order of the next two lines is important: zeros register
-  // can be the same as source register.
-  Move(scratch, source);
   mov(zeros, Operand(0, RelocInfo::NONE));
+  Move(scratch, source);
   // Top 16.
   tst(scratch, Operand(0xffff0000));
   add(zeros, zeros, Operand(16), LeaveCC, eq);
@@ -3354,15 +3101,23 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_reg_arguments,
                                    int num_double_arguments) {
-  mov(ip, Operand(function));
-  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+  CallCFunctionHelper(no_reg,
+                      function,
+                      ip,
+                      num_reg_arguments,
+                      num_double_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                   int num_reg_arguments,
-                                   int num_double_arguments) {
-  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+                                     Register scratch,
+                                     int num_reg_arguments,
+                                     int num_double_arguments) {
+  CallCFunctionHelper(function,
+                      ExternalReference::the_hole_value_location(isolate()),
+                      scratch,
+                      num_reg_arguments,
+                      num_double_arguments);
 }
 
 
@@ -3373,15 +3128,17 @@
 
 
 void MacroAssembler::CallCFunction(Register function,
+                                   Register scratch,
                                    int num_arguments) {
-  CallCFunction(function, num_arguments, 0);
+  CallCFunction(function, scratch, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
+                                         ExternalReference function_reference,
+                                         Register scratch,
                                          int num_reg_arguments,
                                          int num_double_arguments) {
-  ASSERT(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -3405,6 +3162,10 @@
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
+  if (function.is(no_reg)) {
+    mov(scratch, Operand(function_reference));
+    function = scratch;
+  }
   Call(function);
   int stack_passed_arguments = CalculateStackPassedWords(
       num_reg_arguments, num_double_arguments);
@@ -3436,185 +3197,6 @@
 }
 
 
-void MacroAssembler::CheckPageFlag(
-    Register object,
-    Register scratch,
-    int mask,
-    Condition cc,
-    Label* condition_met) {
-  and_(scratch, object, Operand(~Page::kPageAlignmentMask));
-  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
-  tst(scratch, Operand(mask));
-  b(cc, condition_met);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
-                                 Register scratch0,
-                                 Register scratch1,
-                                 Label* on_black) {
-  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
-                              Register bitmap_scratch,
-                              Register mask_scratch,
-                              Label* has_color,
-                              int first_bit,
-                              int second_bit) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
-
-  GetMarkBits(object, bitmap_scratch, mask_scratch);
-
-  Label other_color, word_boundary;
-  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  tst(ip, Operand(mask_scratch));
-  b(first_bit == 1 ? eq : ne, &other_color);
-  // Shift left 1 by adding.
-  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
-  b(eq, &word_boundary);
-  tst(ip, Operand(mask_scratch));
-  b(second_bit == 1 ? ne : eq, has_color);
-  jmp(&other_color);
-
-  bind(&word_boundary);
-  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
-  tst(ip, Operand(1));
-  b(second_bit == 1 ? ne : eq, has_color);
-  bind(&other_color);
-}
-
-
-// Detect some, but not all, common pointer-free objects.  This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
-                                      Register scratch,
-                                      Label* not_data_object) {
-  Label is_data_object;
-  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  b(eq, &is_data_object);
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
-  b(ne, not_data_object);
-  bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
-                                 Register bitmap_reg,
-                                 Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
-  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
-  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
-  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
-  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
-  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
-  mov(ip, Operand(1));
-  mov(mask_reg, Operand(ip, LSL, mask_reg));
-}
-
-
-void MacroAssembler::EnsureNotWhite(
-    Register value,
-    Register bitmap_scratch,
-    Register mask_scratch,
-    Register load_scratch,
-    Label* value_is_white_and_not_data) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
-  GetMarkBits(value, bitmap_scratch, mask_scratch);
-
-  // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  Label done;
-
-  // Since both black and grey have a 1 in the first position and white does
-  // not have a 1 there we only need to check one bit.
-  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  tst(mask_scratch, load_scratch);
-  b(ne, &done);
-
-  if (emit_debug_code()) {
-    // Check for impossible bit pattern.
-    Label ok;
-    // LSL may overflow, making the check conservative.
-    tst(load_scratch, Operand(mask_scratch, LSL, 1));
-    b(eq, &ok);
-    stop("Impossible marking bit pattern");
-    bind(&ok);
-  }
-
-  // Value is white.  We check whether it is data that doesn't need scanning.
-  // Currently only checks for HeapNumber and non-cons strings.
-  Register map = load_scratch;  // Holds map while checking type.
-  Register length = load_scratch;  // Holds length of object after testing type.
-  Label is_data_object;
-
-  // Check for heap-number
-  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
-  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
-  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
-  b(eq, &is_data_object);
-
-  // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  Register instance_type = load_scratch;
-  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
-  b(ne, value_is_white_and_not_data);
-  // It's a non-indirect (non-cons and non-slice) string.
-  // If it's external, the length is just ExternalString::kSize.
-  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
-  // External strings are the only ones with the kExternalStringTag bit
-  // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
-  tst(instance_type, Operand(kExternalStringTag));
-  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
-  b(ne, &is_data_object);
-
-  // Sequential string, either ASCII or UC16.
-  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
-  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
-  // getting the length multiplied by 2.
-  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
-  tst(instance_type, Operand(kStringEncodingMask));
-  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
-  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
-  and_(length, length, Operand(~kObjectAlignmentMask));
-
-  bind(&is_data_object);
-  // Value is a data object, and it is white.  Mark it black.  Since we know
-  // that the object is white we can make it black by flipping one bit.
-  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  orr(ip, ip, Operand(mask_scratch));
-  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
-  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
-  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-  add(ip, ip, Operand(length));
-  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
-  bind(&done);
-}
-
-
 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
   Usat(output_reg, 8, Operand(input_reg));
 }
@@ -3647,8 +3229,8 @@
   bind(&in_bounds);
   Vmov(temp_double_reg, 0.5);
   vadd(temp_double_reg, input_reg, temp_double_reg);
-  vcvt_u32_f64(temp_double_reg.low(), temp_double_reg);
-  vmov(result_reg, temp_double_reg.low());
+  vcvt_u32_f64(s0, temp_double_reg);
+  vmov(result_reg, s0);
   bind(&done);
 }
 
@@ -3664,63 +3246,6 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
-  Label next;
-  // Preload a couple of values used in the loop.
-  Register  empty_fixed_array_value = r6;
-  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
-  Register empty_descriptor_array_value = r7;
-  LoadRoot(empty_descriptor_array_value,
-           Heap::kEmptyDescriptorArrayRootIndex);
-  mov(r1, r0);
-  bind(&next);
-
-  // Check that there are no elements.  Register r1 contains the
-  // current JS object we've reached through the prototype chain.
-  ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
-  cmp(r2, empty_fixed_array_value);
-  b(ne, call_runtime);
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in r2 for the subsequent
-  // prototype load.
-  ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  ldr(r3, FieldMemOperand(r2, Map::kInstanceDescriptorsOrBitField3Offset));
-  JumpIfSmi(r3, call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (r3).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumerationIndexOffset));
-  JumpIfSmi(r3, call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  cmp(r1, r0);
-  b(eq, &check_prototype);
-  ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  cmp(r3, empty_fixed_array_value);
-  b(ne, call_runtime);
-
-  // Load the prototype from the map and loop if non-null.
-  bind(&check_prototype);
-  ldr(r1, FieldMemOperand(r2, Map::kPrototypeOffset));
-  cmp(r1, null_value);
-  b(ne, &next);
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
-}
-
-
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 47afa93..0546e6a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,6 @@
 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
 
 #include "assembler.h"
-#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -39,12 +38,12 @@
 // Static helper functions
 
 // Generate a MemOperand for loading a field from an object.
-inline MemOperand FieldMemOperand(Register object, int offset) {
+static inline MemOperand FieldMemOperand(Register object, int offset) {
   return MemOperand(object, offset - kHeapObjectTag);
 }
 
 
-inline Operand SmiUntagOperand(Register object) {
+static inline Operand SmiUntagOperand(Register object) {
   return Operand(object, ASR, kSmiTagSize);
 }
 
@@ -80,14 +79,6 @@
 };
 
 
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -166,136 +157,40 @@
                  Heap::RootListIndex index,
                  Condition cond = al);
 
-  void LoadHeapObject(Register dst, Handle<HeapObject> object);
 
-  void LoadObject(Register result, Handle<Object> object) {
-    if (object->IsHeapObject()) {
-      LoadHeapObject(result, Handle<HeapObject>::cast(object));
-    } else {
-      Move(result, object);
-    }
-  }
+  // Check if object is in new space.
+  // scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise
+                  Label* branch);
 
-  // ---------------------------------------------------------------------------
-  // GC Support
 
-  void IncrementalMarkingRecordWriteHelper(Register object,
-                                           Register value,
-                                           Register address);
+  // For the page containing |object| mark the region covering [address]
+  // dirty. The object address must be in the first 8K of an allocated page.
+  void RecordWriteHelper(Register object,
+                         Register address,
+                         Register scratch);
 
-  enum RememberedSetFinalAction {
-    kReturnAtEnd,
-    kFallThroughAtEnd
-  };
-
-  // Record in the remembered set the fact that we have a pointer to new space
-  // at the address pointed to by the addr register.  Only works if addr is not
-  // in new space.
-  void RememberedSetHelper(Register object,  // Used for debug code.
-                           Register addr,
-                           Register scratch,
-                           SaveFPRegsMode save_fp,
-                           RememberedSetFinalAction and_then);
-
-  void CheckPageFlag(Register object,
-                     Register scratch,
-                     int mask,
-                     Condition cc,
-                     Label* condition_met);
-
-  // Check if object is in new space.  Jumps if the object is not in new space.
-  // The register scratch can be object itself, but scratch will be clobbered.
-  void JumpIfNotInNewSpace(Register object,
-                           Register scratch,
-                           Label* branch) {
-    InNewSpace(object, scratch, ne, branch);
-  }
-
-  // Check if object is in new space.  Jumps if the object is in new space.
-  // The register scratch can be object itself, but it will be clobbered.
-  void JumpIfInNewSpace(Register object,
-                        Register scratch,
-                        Label* branch) {
-    InNewSpace(object, scratch, eq, branch);
-  }
-
-  // Check if an object has a given incremental marking color.
-  void HasColor(Register object,
-                Register scratch0,
-                Register scratch1,
-                Label* has_color,
-                int first_bit,
-                int second_bit);
-
-  void JumpIfBlack(Register object,
+  // For the page containing |object| mark the region covering
+  // [object+offset] dirty. The object address must be in the first 8K
+  // of an allocated page.  The 'scratch' registers are used in the
+  // implementation and all 3 registers are clobbered by the
+  // operation, as well as the ip register. RecordWrite updates the
+  // write barrier even when storing smis.
+  void RecordWrite(Register object,
+                   Operand offset,
                    Register scratch0,
-                   Register scratch1,
-                   Label* on_black);
+                   Register scratch1);
 
-  // Checks the color of an object.  If the object is already grey or black
-  // then we just fall through, since it is already live.  If it is white and
-  // we can determine that it doesn't need to be scanned, then we just mark it
-  // black and fall through.  For the rest we jump to the label so the
-  // incremental marker can fix its assumptions.
-  void EnsureNotWhite(Register object,
-                      Register scratch1,
-                      Register scratch2,
-                      Register scratch3,
-                      Label* object_is_white_and_not_data);
-
-  // Detects conservatively whether an object is data-only, i.e. it does need to
-  // be scanned by the garbage collector.
-  void JumpIfDataObject(Register value,
-                        Register scratch,
-                        Label* not_data_object);
-
-  // Notify the garbage collector that we wrote a pointer into an object.
-  // |object| is the object being stored into, |value| is the object being
-  // stored.  value and scratch registers are clobbered by the operation.
-  // The offset is the offset from the start of the object, not the offset from
-  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
-  void RecordWriteField(
-      Register object,
-      int offset,
-      Register value,
-      Register scratch,
-      LinkRegisterStatus lr_status,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
-
-  // As above, but the offset has the tag presubtracted.  For use with
-  // MemOperand(reg, off).
-  inline void RecordWriteContextSlot(
-      Register context,
-      int offset,
-      Register value,
-      Register scratch,
-      LinkRegisterStatus lr_status,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK) {
-    RecordWriteField(context,
-                     offset + kHeapObjectTag,
-                     value,
-                     scratch,
-                     lr_status,
-                     save_fp,
-                     remembered_set_action,
-                     smi_check);
-  }
-
-  // For a given |object| notify the garbage collector that the slot |address|
-  // has been written.  |value| is the object being stored. The value and
-  // address registers are clobbered by the operation.
-  void RecordWrite(
-      Register object,
-      Register address,
-      Register value,
-      LinkRegisterStatus lr_status,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
+  // For the page containing |object| mark the region covering
+  // [address] dirty. The object address must be in the first 8K of an
+  // allocated page.  All 3 registers are clobbered by the operation,
+  // as well as the ip register. RecordWrite updates the write barrier
+  // even when storing smis.
+  void RecordWrite(Register object,
+                   Register address,
+                   Register scratch);
 
   // Push a handle.
   void Push(Handle<Object> handle);
@@ -330,11 +225,8 @@
   }
 
   // Push four registers.  Pushes leftmost register first (to highest address).
-  void Push(Register src1,
-            Register src2,
-            Register src3,
-            Register src4,
-            Condition cond = al) {
+  void Push(Register src1, Register src2,
+            Register src3, Register src4, Condition cond = al) {
     ASSERT(!src1.is(src2));
     ASSERT(!src2.is(src3));
     ASSERT(!src1.is(src3));
@@ -373,57 +265,6 @@
     }
   }
 
-  // Pop three registers.  Pops rightmost register first (from lower address).
-  void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
-    ASSERT(!src1.is(src2));
-    ASSERT(!src2.is(src3));
-    ASSERT(!src1.is(src3));
-    if (src1.code() > src2.code()) {
-      if (src2.code() > src3.code()) {
-        ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
-      } else {
-        ldr(src3, MemOperand(sp, 4, PostIndex), cond);
-        ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
-      }
-    } else {
-      Pop(src2, src3, cond);
-      str(src1, MemOperand(sp, 4, PostIndex), cond);
-    }
-  }
-
-  // Pop four registers.  Pops rightmost register first (from lower address).
-  void Pop(Register src1,
-           Register src2,
-           Register src3,
-           Register src4,
-           Condition cond = al) {
-    ASSERT(!src1.is(src2));
-    ASSERT(!src2.is(src3));
-    ASSERT(!src1.is(src3));
-    ASSERT(!src1.is(src4));
-    ASSERT(!src2.is(src4));
-    ASSERT(!src3.is(src4));
-    if (src1.code() > src2.code()) {
-      if (src2.code() > src3.code()) {
-        if (src3.code() > src4.code()) {
-          ldm(ia_w,
-              sp,
-              src1.bit() | src2.bit() | src3.bit() | src4.bit(),
-              cond);
-        } else {
-          ldr(src4, MemOperand(sp, 4, PostIndex), cond);
-          ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
-        }
-      } else {
-        Pop(src3, src4, cond);
-        ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
-      }
-    } else {
-      Pop(src2, src3, src4, cond);
-      ldr(src1, MemOperand(sp, 4, PostIndex), cond);
-    }
-  }
-
   // Push and pop the registers that can hold pointers, as defined by the
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
@@ -477,6 +318,16 @@
             const double imm,
             const Condition cond = al);
 
+
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
   // Enter exit frame.
   // stack_space - extra stack space, used for alignment before call to C.
   void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -491,22 +342,6 @@
 
   void LoadContext(Register dst, int context_chain_length);
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the global context if the map in register
-  // map_in_out is the cached Array map in the global context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
-  // Load the initial map for new Arrays from a JSFunction.
-  void LoadInitialArrayMap(Register function_in,
-                           Register scratch,
-                           Register map_out);
-
   void LoadGlobalFunction(int index, Register function);
 
   // Load the initial map from the global function. The registers
@@ -516,15 +351,15 @@
                                     Register scratch);
 
   void InitializeRootRegister() {
-    ExternalReference roots_array_start =
-        ExternalReference::roots_array_start(isolate());
-    mov(kRootRegister, Operand(roots_array_start));
+    ExternalReference roots_address =
+        ExternalReference::roots_address(isolate());
+    mov(kRootRegister, Operand(roots_address));
   }
 
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
-  // Set up call kind marking in ecx. The method takes ecx as an
+  // Setup call kind marking in ecx. The method takes ecx as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
@@ -552,10 +387,9 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(Handle<JSFunction> function,
+  void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
   void IsObjectJSObjectType(Register heap_object,
@@ -582,18 +416,20 @@
   // Exception handling
 
   // Push a new try handler and link into try handler chain.
-  void PushTryHandler(StackHandler::Kind kind, int handler_index);
+  // The return address must be passed in register lr.
+  // On exit, r0 contains TOS (code slot).
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   // Must preserve the result register.
   void PopTryHandler();
 
-  // Passes thrown value to the handler of top of the try handler chain.
+  // Passes thrown value (in r0) to the handler of top of the try handler chain.
   void Throw(Register value);
 
   // Propagates an uncatchable exception to the top of the current JS stack's
   // handler chain.
-  void ThrowUncatchable(Register value);
+  void ThrowUncatchable(UncatchableExceptionType type, Register value);
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -621,7 +457,7 @@
   }
 
   // Check if the given instruction is a 'type' marker.
-  // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
+  // ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
   // These instructions are generated to mark special location in the code,
   // like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
@@ -740,13 +576,6 @@
                  Register length,
                  Register scratch);
 
-  // Initialize fields with filler values.  Fields starting at |start_offset|
-  // not including end_offset are overwritten with the value in |filler|.  At
-  // the end the loop, |start_offset| takes the value of |end_offset|.
-  void InitializeFieldsWithFiller(Register start_offset,
-                                  Register end_offset,
-                                  Register filler);
-
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -758,8 +587,7 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss,
-                               bool miss_on_bound_function = false);
+                               Label* miss);
 
   // Compare object type for heap object.  heap_object contains a non-Smi
   // whose object type should be compared with the given type.  This both
@@ -787,52 +615,15 @@
                          Register scratch,
                          Label* fail);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Register scratch,
-                               Label* fail);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiOnlyElements(Register map,
-                                Register scratch,
-                                Label* fail);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements. Otherwise jump to fail, in which
-  // case scratch2, scratch3 and scratch4 are unmodified.
-  void StoreNumberToDoubleElements(Register value_reg,
-                                   Register key_reg,
-                                   Register receiver_reg,
-                                   Register elements_reg,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Register scratch3,
-                                   Register scratch4,
-                                   Label* fail);
-
-  // Compare an object's map with the specified map and its transitioned
-  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
-  // set with result of map compare. If multiple map compares are required, the
-  // compare sequences branches to early_success.
-  void CompareMap(Register obj,
-                  Register scratch,
-                  Handle<Map> map,
-                  Label* early_success,
-                  CompareMapMode mode = REQUIRE_EXACT_MAP);
-
-  // Check if the map of an object is equal to a specified map and branch to
-  // label if not. Skip the smi check if not required (object is known to be a
-  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
-  // against maps that are ElementsKind transition maps of the specified map.
+  // Check if the map of an object is equal to a specified map (either
+  // given directly or as an index into the root list) and branch to
+  // label if not. Skip the smi check if not required (object is known
+  // to be a heap object)
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type,
-                CompareMapMode mode = REQUIRE_EXACT_MAP);
+                SmiCheckType smi_check_type);
 
 
   void CheckMap(Register obj,
@@ -924,7 +715,7 @@
   // Truncates a double using a specific rounding mode.
   // Clears the z flag (ne condition) if an overflow occurs.
   // If exact_conversion is true, the z flag is also cleared if the conversion
-  // was inexact, i.e. if the double value could not be converted exactly
+  // was inexact, ie. if the double value could not be converted exactly
   // to a 32bit integer.
   void EmitVFPTruncate(VFPRoundingMode rounding_mode,
                        SwVfpRegister result,
@@ -970,9 +761,20 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
+
   // Call a code stub.
   void TailCallStub(CodeStub* stub, Condition cond = al);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+                                               Condition cond = al);
+
   // Call a runtime routine.
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -991,6 +793,12 @@
                                  int num_arguments,
                                  int result_size);
 
+  // Tail call of a runtime routine (jump). Try to generate the code if
+  // necessary. Do not perform a GC but instead return a retry after GC
+  // failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
@@ -1029,25 +837,28 @@
   // return address (unless this is somehow accounted for by the called
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
-  void CallCFunction(Register function, int num_arguments);
+  void CallCFunction(Register function, Register scratch, int num_arguments);
   void CallCFunction(ExternalReference function,
                      int num_reg_arguments,
                      int num_double_arguments);
-  void CallCFunction(Register function,
+  void CallCFunction(Register function, Register scratch,
                      int num_reg_arguments,
                      int num_double_arguments);
 
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
-  // Calls an API function.  Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions.  Restores context.  stack_space
-  // - space to be unwound on exit (includes the call JS arguments space and
-  // the additional space allocated for the fast call).
-  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
+  // Calls an API function. Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions. Restores context.
+  // stack_space - space to be unwound on exit (includes the call js
+  // arguments space and the additional space allocated for the fast call).
+  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+                                           int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& builtin);
 
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
   void InvokeBuiltin(Builtins::JavaScript id,
@@ -1098,9 +909,6 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
-  void set_has_frame(bool value) { has_frame_ = value; }
-  bool has_frame() { return has_frame_; }
-  inline bool AllowThisStubCall(CodeStub* stub);
 
   // EABI variant for double arguments in use.
   bool use_eabi_hardfloat() {
@@ -1159,14 +967,6 @@
     mov(dst, Operand(src, ASR, kSmiTagSize), s);
   }
 
-  // Untag the source value into destination and jump if source is a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
-  // Untag the source value into destination and jump if source is not a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
   // Jump the register contains a smi.
   inline void JumpIfSmi(Register value, Label* smi_label) {
     tst(value, Operand(kSmiTagMask));
@@ -1255,16 +1055,10 @@
 
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
-  // Expects object in r0 and returns map with validated enum cache
-  // in r0.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value, Label* call_runtime);
-
  private:
   void CallCFunctionHelper(Register function,
+                           ExternalReference function_reference,
+                           Register scratch,
                            int num_reg_arguments,
                            int num_double_arguments);
 
@@ -1276,34 +1070,20 @@
                       Handle<Code> code_constant,
                       Register code_reg,
                       Label* done,
-                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
                            Register scratch1,
                            Register scratch2);
 
-  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cond,  // eq for new space, ne otherwise.
-                  Label* branch);
-
-  // Helper for finding the mark bits for an address.  Afterwards, the
-  // bitmap register points at the word with the mark bits and the mask
-  // the position of the first bit.  Leaves addr_reg unchanged.
-  inline void GetMarkBits(Register addr_reg,
-                          Register bitmap_reg,
-                          Register mask_reg);
-
-  // Helper for throwing exceptions.  Compute a handler address and jump to
-  // it.  See the implementation for register usage.
-  void JumpToHandlerEntry();
-
   // Compute memory operands for safepoint stack slots.
   static int SafepointRegisterStackIndex(int reg_code);
   MemOperand SafepointRegisterSlot(Register reg);
@@ -1311,7 +1091,6 @@
 
   bool generating_stub_;
   bool allow_stub_calls_;
-  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -1357,12 +1136,12 @@
 // -----------------------------------------------------------------------------
 // Static helper functions.
 
-inline MemOperand ContextOperand(Register context, int index) {
+static MemOperand ContextOperand(Register context, int index) {
   return MemOperand(context, Context::SlotOffset(index));
 }
 
 
-inline MemOperand GlobalObjectOperand()  {
+static inline MemOperand GlobalObjectOperand()  {
   return ContextOperand(cp, Context::GLOBAL_INDEX);
 }
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 10ff2dd..cd76edb 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -371,12 +371,9 @@
     // Isolate.
     __ mov(r3, Operand(ExternalReference::isolate_address()));
 
-    {
-      AllowExternalCallThatCantCauseGC scope(masm_);
-      ExternalReference function =
-          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-      __ CallCFunction(function, argument_count);
-    }
+    ExternalReference function =
+        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+    __ CallCFunction(function, argument_count);
 
     // Check if function returned non-zero for success or zero for failure.
     __ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -472,7 +469,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  ASSERT(minus < String::kMaxUC16CharCode);
   __ sub(r0, current_character(), Operand(minus));
   __ and_(r0, r0, Operand(mask));
   __ cmp(r0, Operand(c));
@@ -571,7 +568,7 @@
     ExternalReference map = ExternalReference::re_word_character_map();
     __ mov(r0, Operand(map));
     __ ldrb(r0, MemOperand(r0, current_character()));
-    __ cmp(r0, Operand(0));
+    __ tst(r0, Operand(r0));
     BranchOrBacktrack(eq, on_no_match);
     return true;
   }
@@ -585,7 +582,7 @@
     ExternalReference map = ExternalReference::re_word_character_map();
     __ mov(r0, Operand(map));
     __ ldrb(r0, MemOperand(r0, current_character()));
-    __ cmp(r0, Operand(0));
+    __ tst(r0, Operand(r0));
     BranchOrBacktrack(ne, on_no_match);
     if (mode_ != ASCII) {
       __ bind(&done);
@@ -614,12 +611,6 @@
 
   // Entry code:
   __ bind(&entry_label_);
-
-  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
-  // is generated.
-  FrameScope scope(masm_, StackFrame::MANUAL);
-
-  // Actually emit code to start a new stack frame.
   // Push arguments
   // Save callee-save registers.
   // Start new stack frame.
@@ -681,7 +672,7 @@
 
   // Determine whether the start index is zero, that is at the start of the
   // string, and store that value in a local variable.
-  __ cmp(r1, Operand(0));
+  __ tst(r1, Operand(r1));
   __ mov(r1, Operand(1), LeaveCC, eq);
   __ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
   __ str(r1, MemOperand(frame_pointer(), kAtStart));
@@ -1055,7 +1046,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+  MaybeObject* result = Execution::HandleStackGuardInterrupt();
 
   if (*code_handle != re_code) {  // Return address no longer valid
     int delta = code_handle->address() - re_code->address();
@@ -1111,11 +1102,6 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
-  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
-    // Subject string might have been a ConsString that underwent
-    // short-circuiting during GC. That will not change start_address but
-    // will change pointer inside the subject handle.
-    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 629c209..6af5355 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -53,7 +53,7 @@
 // code.
 class ArmDebugger {
  public:
-  explicit ArmDebugger(Simulator* sim) : sim_(sim) { }
+  explicit ArmDebugger(Simulator* sim);
   ~ArmDebugger();
 
   void Stop(Instruction* instr);
@@ -84,6 +84,11 @@
 };
 
 
+ArmDebugger::ArmDebugger(Simulator* sim) {
+  sim_ = sim;
+}
+
+
 ArmDebugger::~ArmDebugger() {
 }
 
@@ -291,13 +296,6 @@
     if (line == NULL) {
       break;
     } else {
-      char* last_input = sim_->last_debugger_input();
-      if (strcmp(line, "\n") == 0 && last_input != NULL) {
-        line = last_input;
-      } else {
-        // Ownership is transferred to sim_;
-        sim_->set_last_debugger_input(line);
-      }
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
       int argc = SScanF(line,
@@ -613,6 +611,7 @@
         PrintF("Unknown command: %s\n", cmd);
       }
     }
+    DeleteArray(line);
   }
 
   // Add all the breakpoints back to stop execution and enter the debugger
@@ -646,12 +645,6 @@
 }
 
 
-void Simulator::set_last_debugger_input(char* input) {
-  DeleteArray(last_debugger_input_);
-  last_debugger_input_ = input;
-}
-
-
 void Simulator::FlushICache(v8::internal::HashMap* i_cache,
                             void* start_addr,
                             size_t size) {
@@ -741,7 +734,7 @@
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
-  // Set up simulator support first. Some of this information is needed to
+  // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
   size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
   stack_ = reinterpret_cast<char*>(malloc(stack_size));
@@ -750,7 +743,7 @@
   break_pc_ = NULL;
   break_instr_ = 0;
 
-  // Set up architecture state.
+  // Setup architecture state.
   // All registers are initialized to zero to start with.
   for (int i = 0; i < num_registers; i++) {
     registers_[i] = 0;
@@ -788,8 +781,6 @@
   registers_[pc] = bad_lr;
   registers_[lr] = bad_lr;
   InitializeCoverage();
-
-  last_debugger_input_ = NULL;
 }
 
 
@@ -1277,9 +1268,9 @@
 
 // Returns the limit of the stack area to enable checking for stack overflows.
 uintptr_t Simulator::StackLimit() const {
-  // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
   // pushing values.
-  return reinterpret_cast<uintptr_t>(stack_) + 1024;
+  return reinterpret_cast<uintptr_t>(stack_) + 256;
 }
 
 
@@ -1627,8 +1618,6 @@
   ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
 
   intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
-  // Catch null pointers a little earlier.
-  ASSERT(start_address > 8191 || start_address < 0);
   int reg = 0;
   while (rlist != 0) {
     if ((rlist & 1) != 0) {
@@ -3324,7 +3313,7 @@
 int32_t Simulator::Call(byte* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
-  // Set up arguments
+  // Setup arguments
 
   // First four arguments passed in registers.
   ASSERT(argument_count >= 4);
@@ -3367,7 +3356,7 @@
   int32_t r10_val = get_register(r10);
   int32_t r11_val = get_register(r11);
 
-  // Set up the callee-saved registers with a known value. To be able to check
+  // Setup the callee-saved registers with a known value. To be able to check
   // that they are preserved properly across JS execution.
   int32_t callee_saved_value = icount_;
   set_register(r4, callee_saved_value);
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 585f1e0..391ef69 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -194,10 +194,6 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
-  // Debugger input.
-  void set_last_debugger_input(char* input);
-  char* last_debugger_input() { return last_debugger_input_; }
-
   // ICache checking.
   static void FlushICache(v8::internal::HashMap* i_cache, void* start,
                           size_t size);
@@ -364,9 +360,6 @@
   bool pc_modified_;
   int icount_;
 
-  // Debugger input.
-  char* last_debugger_input_;
-
   // Icache simulation
   v8::internal::HashMap* i_cache_;
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 06f8385..f856592 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,83 +43,47 @@
                        MacroAssembler* masm,
                        Code::Flags flags,
                        StubCache::Table table,
-                       Register receiver,
                        Register name,
-                       // Number of the cache entry, not scaled.
                        Register offset,
                        Register scratch,
-                       Register scratch2,
-                       Register offset_scratch) {
+                       Register scratch2) {
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
   ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
 
   uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
   uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
 
   // Check the relative positions of the address fields.
   ASSERT(value_off_addr > key_off_addr);
   ASSERT((value_off_addr - key_off_addr) % 4 == 0);
   ASSERT((value_off_addr - key_off_addr) < (256 * 4));
-  ASSERT(map_off_addr > key_off_addr);
-  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
-  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
 
   Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
-  // Calculate the base address of the entry.
-  __ mov(base_addr, Operand(key_offset));
-  __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
+  Register offsets_base_addr = scratch;
 
   // Check that the key in the entry matches the name.
-  __ ldr(ip, MemOperand(base_addr, 0));
+  __ mov(offsets_base_addr, Operand(key_offset));
+  __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
   __ cmp(name, ip);
   __ b(ne, &miss);
 
-  // Check the map matches.
-  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(ip, scratch2);
-  __ b(ne, &miss);
-
   // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+  __ add(offsets_base_addr, offsets_base_addr,
+         Operand(value_off_addr - key_off_addr));
+  __ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
 
   // Check that the flags match what we're looking for.
-  Register flags_reg = base_addr;
-  base_addr = no_reg;
-  __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
-  // It's a nice optimization if this constant is encodable in the bic insn.
-
-  uint32_t mask = Code::kFlagsNotUsedInLookup;
-  ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
-  __ bic(flags_reg, flags_reg, Operand(mask));
-  // Using cmn and the negative instead of cmp means we can use movw.
-  if (flags < 0) {
-    __ cmn(flags_reg, Operand(-flags));
-  } else {
-    __ cmp(flags_reg, Operand(flags));
-  }
+  __ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+  __ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
+  __ cmp(scratch2, Operand(flags));
   __ b(ne, &miss);
 
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
+  // Re-load code entry from cache.
+  __ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
 
   // Jump to the first instruction in the code stub.
-  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(offset);
 
   // Miss: fall through.
   __ bind(&miss);
@@ -131,12 +95,13 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             Handle<String> name,
-                                             Register scratch0,
-                                             Register scratch1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register scratch0,
+    Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -173,15 +138,20 @@
   __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
 
-  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                     miss_label,
-                                                     &done,
-                                                     receiver,
-                                                     properties,
-                                                     name,
-                                                     scratch1);
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      receiver,
+      properties,
+      name,
+      scratch1);
+  if (result->IsFailure()) return result;
+
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  return result;
 }
 
 
@@ -191,14 +161,13 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2,
-                              Register extra3) {
+                              Register extra2) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  ASSERT(sizeof(Entry) == 12);
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
 
   // Make sure the flags does not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -218,11 +187,6 @@
   ASSERT(!scratch.is(no_reg));
   ASSERT(!extra.is(no_reg));
   ASSERT(!extra2.is(no_reg));
-  ASSERT(!extra3.is(no_reg));
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
-                      extra2, extra3);
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
@@ -231,51 +195,27 @@
   __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
   __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ add(scratch, scratch, Operand(ip));
-  uint32_t mask = kPrimaryTableSize - 1;
-  // We shift out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.
-  __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
-  // Mask down the eor argument to the minimum to keep the immediate
-  // ARM-encodable.
-  __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
-  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
-  __ and_(scratch, scratch, Operand(mask));
+  __ eor(scratch, scratch, Operand(flags));
+  __ and_(scratch,
+          scratch,
+          Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the primary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kPrimary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
 
   // Primary miss: Compute hash for secondary probe.
-  __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
-  uint32_t mask2 = kSecondaryTableSize - 1;
-  __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
-  __ and_(scratch, scratch, Operand(mask2));
+  __ sub(scratch, scratch, Operand(name));
+  __ add(scratch, scratch, Operand(flags));
+  __ and_(scratch,
+          scratch,
+          Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the secondary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kSecondary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
-                      extra2, extra3);
 }
 
 
@@ -298,10 +238,7 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -309,8 +246,8 @@
   __ cmp(prototype, ip);
   __ b(ne, miss);
   // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->global_context()->get(index)));
+  JSFunction* function =
+      JSFunction::cast(isolate->global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -322,10 +259,8 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            Handle<JSObject> holder,
-                                            int index) {
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -432,9 +367,9 @@
 // may be clobbered.  Upon branch to miss_label, the receiver and name
 // registers have their original values.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Handle<JSObject> object,
+                                      JSObject* object,
                                       int index,
-                                      Handle<Map> transition,
+                                      Map* transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
@@ -442,9 +377,13 @@
   // r0 : value
   Label exit;
 
-  // Check that the map of the object hasn't changed.
-  __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label);
+
+  // Check that the map of the receiver hasn't changed.
+  __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(Handle<Map>(object->map())));
+  __ b(ne, miss_label);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -456,11 +395,11 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ push(receiver_reg);
-    __ mov(r2, Operand(transition));
+    __ mov(r2, Operand(Handle<Map>(transition)));
     __ Push(r2, r0);
     __ TailCallExternalReference(
         ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -470,10 +409,10 @@
     return;
   }
 
-  if (!transition.is_null()) {
+  if (transition != NULL) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ mov(ip, Operand(transition));
+    __ mov(ip, Operand(Handle<Map>(transition)));
     __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
   }
 
@@ -492,13 +431,7 @@
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
-    __ mov(name_reg, r0);
-    __ RecordWriteField(receiver_reg,
-                        offset,
-                        name_reg,
-                        scratch,
-                        kLRHasNotBeenSaved,
-                        kDontSaveFPRegs);
+    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -511,13 +444,7 @@
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
-    __ mov(name_reg, r0);
-    __ RecordWriteField(scratch,
-                        offset,
-                        name_reg,
-                        receiver_reg,
-                        kLRHasNotBeenSaved,
-                        kDontSaveFPRegs);
+    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
   }
 
   // Return the value (register r0).
@@ -528,15 +455,20 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Handle<Code> code = (kind == Code::LOAD_IC)
-      ? masm->isolate()->builtins()->LoadIC_Miss()
-      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
-  __ Jump(code, RelocInfo::CODE_TARGET);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+  } else {
+    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
 static void GenerateCallFunction(MacroAssembler* masm,
-                                 Handle<Object> object,
+                                 Object* object,
                                  const ParameterCount& arguments,
                                  Label* miss,
                                  Code::ExtraICState extra_ic_state) {
@@ -569,12 +501,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     Handle<JSObject> holder_obj) {
+                                     JSObject* holder_obj) {
   __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
   Register scratch = name;
-  __ mov(scratch, Operand(interceptor));
+  __ mov(scratch, Operand(Handle<Object>(interceptor)));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -583,12 +515,11 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Register name,
+                                                   JSObject* holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -601,7 +532,6 @@
   __ CallStub(&stub);
 }
 
-
 static const int kFastApiCallArguments = 3;
 
 // Reserves space for the extra arguments to FastHandleApiCall in the
@@ -623,42 +553,44 @@
 }
 
 
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
                                       int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
-  //  -- sp[4]              : callee JS function
+  //  -- sp[4]              : callee js function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last JS argument
+  //  -- sp[12]             : last js argument
   //  -- ...
-  //  -- sp[(argc + 3) * 4] : first JS argument
+  //  -- sp[(argc + 3) * 4] : first js argument
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  Handle<JSFunction> function = optimization.constant_function();
-  __ LoadHeapObject(r5, function);
+  JSFunction* function = optimization.constant_function();
+  __ mov(r5, Operand(Handle<JSFunction>(function)));
   __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data(api_call_info->data());
-  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
-    __ Move(r0, api_call_info);
+  Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+  if (masm->isolate()->heap()->InNewSpace(call_data)) {
+    __ Move(r0, api_call_info_handle);
     __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
   } else {
-    __ Move(r6, call_data);
+    __ Move(r6, Handle<Object>(call_data));
   }
-  // Store JS function and call data.
+  // Store js function and call data.
   __ stm(ib, sp, r5.bit() | r6.bit());
 
   // r2 points to call data as expected by Arguments
   // (refer to layout above).
   __ add(r2, sp, Operand(2 * kPointerSize));
 
-  const int kApiStackSpace = 4;
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
 
-  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  const int kApiStackSpace = 4;
   __ EnterExitFrame(false, kApiStackSpace);
 
   // r0 = v8::Arguments&
@@ -676,18 +608,17 @@
   __ mov(ip, Operand(0));
   __ str(ip, MemOperand(r0, 3 * kPointerSize));
 
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated). Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  ApiFunction fun(function_address);
   ExternalReference ref = ExternalReference(&fun,
                                             ExternalReference::DIRECT_API_CALL,
                                             masm->isolate());
-  AllowExternalCallThatCantCauseGC scope(masm);
-
-  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
-
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -699,63 +630,86 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  void Compile(MacroAssembler* masm,
-               Handle<JSObject> object,
-               Handle<JSObject> holder,
-               Handle<String> name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
+  MaybeObject* Compile(MacroAssembler* masm,
+                       JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
+
     CallOptimization optimization(lookup);
+
     if (optimization.is_constant_call()) {
-      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
-                       holder, lookup, name, optimization, miss);
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
     } else {
-      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
-                     name, holder, miss);
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     scratch3,
+                     name,
+                     holder,
+                     miss);
+      return masm->isolate()->heap()->undefined_value();
     }
   }
 
  private:
-  void CompileCacheable(MacroAssembler* masm,
-                        Handle<JSObject> object,
-                        Register receiver,
-                        Register scratch1,
-                        Register scratch2,
-                        Register scratch3,
-                        Handle<JSObject> interceptor_holder,
-                        LookupResult* lookup,
-                        Handle<String> name,
-                        const CallOptimization& optimization,
-                        Label* miss_label) {
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
+
     Counters* counters = masm->isolate()->counters();
+
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-        !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(
-          object, interceptor_holder);
-      if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(
-            interceptor_holder, Handle<JSObject>(lookup->holder()));
-      }
-      can_do_fast_api_call =
-          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
+       !lookup->holder()->IsGlobalObject()) {
+     depth1 =
+         optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
+     if (depth1 == kInvalidProtoDepth) {
+       depth2 =
+           optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
+     }
+     can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                            (depth2 != kInvalidProtoDepth);
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                        scratch1, scratch2);
+                      scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -768,9 +722,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver,
+                                        interceptor_holder, scratch1,
+                                        scratch2, scratch3, name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -783,11 +737,10 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      Handle<JSObject>(lookup->holder()),
-                                      scratch1, scratch2, scratch3,
-                                      name, depth2, miss);
+                                      lookup->holder(), scratch1,
+                                      scratch2, scratch3, name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -798,13 +751,16 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+      MaybeObject* result = GenerateFastApiDirectCall(masm,
+                                                      optimization,
+                                                      arguments_.immediate());
+      if (result->IsFailure()) return result;
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
+                        JUMP_FUNCTION, call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -819,53 +775,64 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
+
+    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      Handle<JSObject> object,
+                      JSObject* object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      Handle<String> name,
-                      Handle<JSObject> interceptor_holder,
+                      String* name,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, miss_label);
+                                        scratch1, scratch2, scratch3, name,
+                                        miss_label);
 
     // Call a runtime function to load the interceptor property.
-    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ EnterInternalFrame();
     // Save the name_ register across the call.
     __ push(name_);
-    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
+
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             interceptor_holder);
+
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
                           masm->isolate()),
         5);
+
     // Restore the name_ register.
     __ pop(name_);
-    // Leave the internal frame.
+    __ LeaveInternalFrame();
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           Handle<JSObject> holder_obj,
+                           JSObject* holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(holder, name_);
-      CompileCallLoadPropertyWithInterceptor(masm,
-                                             receiver,
-                                             holder,
-                                             name_,
-                                             holder_obj);
-      __ pop(name_);  // Restore the name.
-      __ pop(receiver);  // Restore the holder.
-    }
+    __ EnterInternalFrame();
+    __ Push(holder, name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
+
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ cmp(r0, scratch);
@@ -882,42 +849,52 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
-                                      Handle<GlobalObject> global,
-                                      Handle<String> name,
-                                      Register scratch,
-                                      Label* miss) {
-  Handle<JSGlobalPropertyCell> cell =
-      GlobalObject::EnsurePropertyCell(global, name);
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+    MacroAssembler* masm,
+    GlobalObject* global,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  Object* probe;
+  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
   ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Operand(cell));
+  __ mov(scratch, Operand(Handle<Object>(cell)));
   __ ldr(scratch,
          FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   __ cmp(scratch, ip);
   __ b(ne, miss);
+  return cell;
 }
 
-
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
-                                       Handle<JSObject> object,
-                                       Handle<JSObject> holder,
-                                       Handle<String> name,
-                                       Register scratch,
-                                       Label* miss) {
-  Handle<JSObject> current = object;
-  while (!current.is_identical_to(holder)) {
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
     if (current->IsGlobalObject()) {
-      GenerateCheckPropertyCell(masm,
-                                Handle<GlobalObject>::cast(current),
-                                name,
-                                scratch,
-                                miss);
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
     }
-    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
   }
+  return NULL;
 }
 
 
@@ -1031,13 +1008,13 @@
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(JSObject* object,
                                        Register object_reg,
-                                       Handle<JSObject> holder,
+                                       JSObject* holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       Handle<String> name,
+                                       String* name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -1055,51 +1032,83 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  Handle<JSObject> current = object;
-  while (!current.is_identical_to(holder)) {
-    ++depth;
+  JSObject* current = object;
+  while (current != holder) {
+    depth++;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
+    ASSERT(current->GetPrototype()->IsJSObject());
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        name = factory()->LookupSymbol(name);
+        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
+          return reg;
+        }
+        name = String::cast(lookup_result);
       }
-      ASSERT(current->property_dictionary()->FindEntry(*name) ==
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
 
       __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else if (heap()->InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+
+      // Branch on the result of the map check.
+      __ b(ne, miss);
+
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        // Restore scratch register to be the map of the object.  In the
+        // new space case below, we load the prototype from the map in
+        // the scratch register.
+        __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      reg = holder_reg;  // from now the object is in holder_reg
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      Handle<Map> current_map(current->map());
-      __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
-                  ALLOW_ELEMENT_TRANSITION_MAPS);
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
+      // Check the map of the current object.
+      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+      // Branch on the result of the map check.
+      __ b(ne, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
       }
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (heap()->InNewSpace(*prototype)) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ mov(reg, Operand(prototype));
-      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, Operand(Handle<JSObject>(prototype)));
     }
 
     if (save_at_depth == depth) {
@@ -1110,130 +1119,143 @@
     current = prototype;
   }
 
+  // Check the holder map.
+  __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ cmp(scratch1, Operand(Handle<Map>(current->map())));
+  __ b(ne, miss);
+
   // Log the check depth.
   LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  // Check the holder map.
-  __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
-              DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
+  };
 
-  // If we've skipped any global objects, it's not enough to verify that
-  // their maps haven't changed.  We also need to check that the property
-  // cell for the property is still empty.
-  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
-                                     Handle<JSObject> holder,
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     Handle<String> name,
+                                     String* name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg = CheckPrototypes(
-      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                      name, miss);
   GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<JSFunction> value,
-                                        Handle<String> name,
+                                        Object* value,
+                                        String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(
-      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, name,
+                  miss);
 
   // Return the constant value.
-  __ LoadHeapObject(r0, value);
+  __ mov(r0, Operand(Handle<Object>(value)));
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        Handle<AccessorInfo> callback,
-                                        Handle<String> name,
-                                        Label* miss) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
-                                 scratch2, scratch3, name, miss);
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                      name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  if (heap()->InNewSpace(callback->data())) {
-    __ Move(scratch3, callback);
+  Handle<AccessorInfo> callback_handle(callback);
+  if (heap()->InNewSpace(callback_handle->data())) {
+    __ Move(scratch3, callback_handle);
     __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ Move(scratch3, Handle<Object>(callback->data()));
+    __ Move(scratch3, Handle<Object>(callback_handle->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(r0, sp);  // r0 = Handle<String>
 
-  const int kApiStackSpace = 1;
-  FrameScope frame_scope(masm(), StackFrame::MANUAL);
-  __ EnterExitFrame(false, kApiStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
 
+  const int kApiStackSpace = 1;
+  __ EnterExitFrame(false, kApiStackSpace);
   // Create AccessorInfo instance on the stack above the exit frame with
-  // scratch2 (internal::Object** args_) as the data.
+  // scratch2 (internal::Object **args_) as the data.
   __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
   __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo&
 
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
   const int kStackUnwindSpace = 4;
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
-                                           Handle<JSObject> interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           Handle<String> name,
+                                           String* name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1245,13 +1267,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsAccessorInfo()) {
-      compile_followup_inline =
-          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+        lookup->GetCallbackObject()->IsAccessorInfo() &&
+        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+      compile_followup_inline = true;
     }
   }
 
@@ -1266,45 +1288,48 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
-        __ Push(receiver, holder_reg, name_reg);
-      } else {
-        __ Push(holder_reg, name_reg);
-      }
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(masm(),
-                                             receiver,
-                                             holder_reg,
-                                             name_reg,
-                                             interceptor_holder);
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-      __ cmp(r0, scratch1);
-      __ b(eq, &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ Ret();
+    __ EnterInternalFrame();
 
-      __ bind(&interceptor_failed);
-      __ pop(name_reg);
-      __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        __ pop(receiver);
-      }
-      // Leave the internal frame.
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      // CALLBACKS case needs a receiver to be passed into C++ callback.
+      __ Push(receiver, holder_reg, name_reg);
+    } else {
+      __ Push(holder_reg, name_reg);
     }
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           interceptor_holder);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ cmp(r0, scratch1);
+    __ b(eq, &interceptor_failed);
+    __ LeaveInternalFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   Handle<JSObject>(lookup->holder()),
+                                   lookup->holder(),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1316,21 +1341,21 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), r0, holder_reg,
-                               Handle<JSObject>(lookup->holder()),
-                               lookup->GetFieldIndex());
+                               lookup->holder(), lookup->GetFieldIndex());
       __ Ret();
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      Handle<AccessorInfo> callback(
-          AccessorInfo::cast(lookup->GetCallbackObject()));
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ Move(scratch2, callback);
+      __ Move(scratch2, Handle<AccessorInfo>(callback));
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1367,17 +1392,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ cmp(r2, Operand(name));
+    __ cmp(r2, Operand(Handle<String>(name)));
     __ b(ne, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<String> name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+                                                   JSObject* holder,
+                                                   String* name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1387,22 +1412,27 @@
   // Get the receiver from the stack.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(r0, miss);
+  }
+
   // Check that the maps haven't changed.
-  __ JumpIfSmi(r0, miss);
   CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    Label* miss) {
   // Get the value from the cell.
-  __ mov(r3, Operand(cell));
+  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(*function)) {
+  if (heap()->InNewSpace(function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1416,26 +1446,30 @@
     __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
     __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
     __ cmp(r4, r3);
+    __ b(ne, miss);
   } else {
-    __ cmp(r1, Operand(function));
+    __ cmp(r1, Operand(Handle<JSFunction>(function)));
+    __ b(ne, miss);
   }
-  __ b(ne, miss);
 }
 
 
-void CallStubCompiler::GenerateMissBranch() {
-  Handle<Code> code =
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
+  MaybeObject* maybe_obj =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_state_);
-  __ Jump(code, RelocInfo::CODE_TARGET);
+                                               extra_ic_state_);
+  Object* obj;
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+  return obj;
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
+MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
+                                                JSObject* holder,
                                                 int index,
-                                                Handle<String> name) {
+                                                String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1455,23 +1489,23 @@
   Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
   GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1481,12 +1515,14 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   Register receiver = r1;
+
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1495,8 +1531,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
-                  name, &miss);
+  CheckPrototypes(JSObject::cast(object), receiver,
+                  holder, r3, r0, r4, name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1506,21 +1542,21 @@
   } else {
     Label call_builtin;
 
+    Register elements = r3;
+    Register end_elements = r5;
+
+    // Get the elements array of the object.
+    __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode and writable.
+    __ CheckMap(elements,
+                r0,
+                Heap::kFixedArrayMapRootIndex,
+                &call_builtin,
+                DONT_DO_SMI_CHECK);
+
     if (argc == 1) {  // Otherwise fall through to call the builtin.
-      Label attempt_to_grow_elements;
-
-      Register elements = r6;
-      Register end_elements = r5;
-      // Get the elements array of the object.
-      __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
-      // Check that the elements are in fast mode and writable.
-      __ CheckMap(elements,
-                  r0,
-                  Heap::kFixedArrayMapRootIndex,
-                  &call_builtin,
-                  DONT_DO_SMI_CHECK);
-
+      Label exit, with_write_barrier, attempt_to_grow_elements;
 
       // Get the array's length into r0 and calculate new length.
       __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1528,22 +1564,18 @@
       STATIC_ASSERT(kSmiTag == 0);
       __ add(r0, r0, Operand(Smi::FromInt(argc)));
 
-      // Get the elements' length.
+      // Get the element's length.
       __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
       __ cmp(r0, r4);
       __ b(gt, &attempt_to_grow_elements);
 
-      // Check if value is a smi.
-      Label with_write_barrier;
-      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
-      __ JumpIfNotSmi(r4, &with_write_barrier);
-
       // Save new length.
       __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
 
-      // Store the value.
+      // Push the element.
+      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
       // We may need a register containing the address end_elements below,
       // so write back the value in end_elements.
       __ add(end_elements, elements,
@@ -1553,51 +1585,14 @@
       __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
 
       // Check for a smi.
+      __ JumpIfNotSmi(r4, &with_write_barrier);
+      __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
 
       __ bind(&with_write_barrier);
-
-      __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
-        Label fast_object, not_fast_object;
-        __ CheckFastObjectElements(r3, r7, &not_fast_object);
-        __ jmp(&fast_object);
-        // In case of fast smi-only, convert to fast object, otherwise bail out.
-        __ bind(&not_fast_object);
-        __ CheckFastSmiOnlyElements(r3, r7, &call_builtin);
-        // edx: receiver
-        // r3: map
-        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                               FAST_ELEMENTS,
-                                               r3,
-                                               r7,
-                                               &call_builtin);
-        __ mov(r2, receiver);
-        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
-        __ bind(&fast_object);
-      } else {
-        __ CheckFastObjectElements(r3, r3, &call_builtin);
-      }
-
-      // Save new length.
-      __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
-      // Store the value.
-      // We may need a register containing the address end_elements below,
-      // so write back the value in end_elements.
-      __ add(end_elements, elements,
-             Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-      __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
-
-      __ RecordWrite(elements,
-                     end_elements,
-                     r4,
-                     kLRHasNotBeenSaved,
-                     kDontSaveFPRegs,
-                     EMIT_REMEMBERED_SET,
-                     OMIT_SMI_CHECK);
+      __ InNewSpace(elements, r4, eq, &exit);
+      __ RecordWriteHelper(elements, end_elements, r4);
       __ Drop(argc + 1);
       __ Ret();
 
@@ -1609,15 +1604,6 @@
         __ b(&call_builtin);
       }
 
-      __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
-      // Growing elements that are SMI-only requires special handling in case
-      // the new element is non-Smi. For now, delegate to the builtin.
-      Label no_fast_elements_check;
-      __ JumpIfSmi(r2, &no_fast_elements_check);
-      __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
-      __ CheckFastObjectElements(r7, r7, &call_builtin);
-      __ bind(&no_fast_elements_check);
-
       Isolate* isolate = masm()->isolate();
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate);
@@ -1630,25 +1616,26 @@
              Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
       __ add(end_elements, end_elements, Operand(kEndElementsOffset));
       __ mov(r7, Operand(new_space_allocation_top));
-      __ ldr(r3, MemOperand(r7));
-      __ cmp(end_elements, r3);
+      __ ldr(r6, MemOperand(r7));
+      __ cmp(end_elements, r6);
       __ b(ne, &call_builtin);
 
       __ mov(r9, Operand(new_space_allocation_limit));
       __ ldr(r9, MemOperand(r9));
-      __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
-      __ cmp(r3, r9);
+      __ add(r6, r6, Operand(kAllocationDelta * kPointerSize));
+      __ cmp(r6, r9);
       __ b(hi, &call_builtin);
 
       // We fit and could grow elements.
       // Update new_space_allocation_top.
-      __ str(r3, MemOperand(r7));
+      __ str(r6, MemOperand(r7));
       // Push the argument.
-      __ str(r2, MemOperand(end_elements));
+      __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ str(r6, MemOperand(end_elements));
       // Fill the rest with holes.
-      __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+      __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
-        __ str(r3, MemOperand(end_elements, i * kPointerSize));
+        __ str(r6, MemOperand(end_elements, i * kPointerSize));
       }
 
       // Update elements' and array's sizes.
@@ -1669,19 +1656,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                                   JSObject* holder,
+                                                   JSGlobalPropertyCell* cell,
+                                                   JSFunction* function,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
@@ -1691,22 +1678,25 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
 
   Label miss, return_undefined, call_builtin;
+
   Register receiver = r1;
   Register elements = r3;
+
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
   const int argc = arguments().immediate();
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
-                  r4, r0, name, &miss);
+  CheckPrototypes(JSObject::cast(object),
+                  receiver, holder, elements, r4, r0, name, &miss);
 
   // Get the elements array of the object.
   __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1755,19 +1745,20 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1777,19 +1768,21 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
 
   const int argc = arguments().immediate();
+
   Label miss;
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
+
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1797,92 +1790,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             r0,
                                             &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  r0, holder, r1, r3, r4, name, &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
+                  r1, r3, r4, name, &miss);
 
   Register receiver = r1;
   Register index = r4;
-  Register result = r0;
-  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
-  if (argc > 0) {
-    __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
-  } else {
-    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
-  }
-
-  StringCharCodeAtGenerator generator(receiver,
-                                      index,
-                                      result,
-                                      &miss,  // When not a string.
-                                      &miss,  // When not a number.
-                                      index_out_of_range_label,
-                                      STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
-  __ Drop(argc + 1);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ LoadRoot(r0, Heap::kNanValueRootIndex);
-    __ Drop(argc + 1);
-    __ Ret();
-  }
-
-  __ bind(&miss);
-  // Restore function name in r2.
-  __ Move(r2, name);
-  __ bind(&name_miss);
-  GenerateMissBranch();
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
-  // ----------- S t a t e -------------
-  //  -- r2                     : function name
-  //  -- lr                     : return address
-  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- sp[argc * 4]           : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
-  const int argc = arguments().immediate();
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            r0,
-                                            &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  r0, holder, r1, r3, r4, name, &miss);
-
-  Register receiver = r0;
-  Register index = r4;
   Register scratch = r3;
   Register result = r0;
   __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1892,20 +1805,108 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator generator(receiver,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &miss,  // When not a string.
-                                  &miss,  // When not a number.
-                                  index_out_of_range_label,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
+  StringCharCodeAtGenerator char_code_at_generator(receiver,
+                                                   index,
+                                                   scratch,
+                                                   result,
+                                                   &miss,  // When not a string.
+                                                   &miss,  // When not a number.
+                                                   index_out_of_range_label,
+                                                   STRING_INDEX_IS_NUMBER);
+  char_code_at_generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(r0, Heap::kNanValueRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in r2.
+  __ Move(r2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
+  // ----------- S t a t e -------------
+  //  -- r2                     : function name
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            r0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
+                  r1, r3, r4, name, &miss);
+
+  Register receiver = r0;
+  Register index = r4;
+  Register scratch1 = r1;
+  Register scratch2 = r3;
+  Register result = r0;
+  __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          index_out_of_range_label,
+                                          STRING_INDEX_IS_NUMBER);
+  char_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1916,21 +1917,22 @@
 
   __ bind(&miss);
   // Restore function name in r2.
-  __ Move(r2, name);
+  __ Move(r2, Handle<String>(name));
   __ bind(&name_miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -1943,23 +1945,22 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
 
-    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
-                    name, &miss);
+    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1975,35 +1976,34 @@
   // Convert the smi code to uint16.
   __ and_(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator generator(code, r0);
-  generator.GenerateFast(masm());
+  StringCharFromCodeGenerator char_from_code_generator(code, r0);
+  char_from_code_generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(
-      function,  arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2013,28 +2013,31 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(VFP3)) {
-    return Handle<Code>::null();
+      return heap()->undefined_value();
   }
 
   CpuFeatures::Scope scope_vfp3(VFP3);
+
   const int argc = arguments().immediate();
+
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss, slow;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
-                    name, &miss);
+
+    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2066,7 +2069,7 @@
   __ vmrs(r3);
   // Set custom FPCSR:
   //  - Set rounding mode to "Round towards Minus Infinity"
-  //    (i.e. bits [23:22] = 0b10).
+  //    (ie bits [23:22] = 0b10).
   //  - Clear vfp cumulative exception flags (bits [3:0]).
   //  - Make sure Flush-to-zero mode control bit is unset (bit 22).
   __ bic(r9, r3,
@@ -2132,24 +2135,23 @@
   __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+                                                  JSObject* holder,
+                                                  JSGlobalPropertyCell* cell,
+                                                  JSFunction* function,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- r2                     : function name
   //  -- lr                     : return address
@@ -2159,22 +2161,25 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
+
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss;
   GenerateNameCheck(name, &miss);
-  if (cell.is_null()) {
+
+  if (cell == NULL) {
     __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
+
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(r1, &miss);
-    CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
-                    name, &miss);
+
+    CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2231,38 +2236,39 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // r2: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileFastApiCall(
+MaybeObject* CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return Handle<Code>::null();
-  if (!cell.is_null()) return Handle<Code>::null();
-  if (!object->IsJSObject()) return Handle<Code>::null();
+  if (object->IsGlobalObject()) return heap()->undefined_value();
+  if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-      Handle<JSObject>::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
+            JSObject::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
 
   Label miss, miss_before_stack_reserved;
+
   GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
@@ -2278,40 +2284,44 @@
   ReserveSpaceForFastApiCall(masm(), r0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
+  CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
                   depth, &miss);
 
-  GenerateFastApiDirectCall(masm(), optimization, argc);
+  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+  if (result->IsFailure()) return result;
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
+                                                   JSObject* holder,
+                                                   JSFunction* function,
+                                                   String* name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, NULL, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack
@@ -2326,14 +2336,16 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
                           1, r0, r3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
-                      name, &miss);
+      CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
+                      &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2344,25 +2356,28 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
         // Check that the object is a two-byte string or a symbol.
         __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
         __ b(ge, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            r0, holder, r3, r1, r4, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
+                        r1, r4, name, &miss);
       }
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+    case NUMBER_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(r1, &fast);
@@ -2372,18 +2387,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            r0, holder, r3, r1, r4, name, &miss);
-      } else {
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
+                        r1, r4, name, &miss);
+      }
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      }
-      break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      } else {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -2396,92 +2411,112 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            r0, holder, r3, r1, r4, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, r3,
+                        r1, r4, name, &miss);
       }
       break;
+    }
+
+    default:
+      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
+
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-  LookupResult lookup(isolate());
+
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
-  compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
-                   &miss);
+  CallInterceptorCompiler compiler(this, arguments(), r2, extra_ic_state_);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         r1,
+                                         r3,
+                                         r4,
+                                         r0,
+                                         &miss);
+  if (result->IsFailure()) {
+      return result;
+  }
 
   // Move returned value, the function to call, to r1.
   __ mov(r1, r0);
   // Restore receiver.
   __ ldr(r0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 JSFunction* function,
+                                                 String* name) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
+
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, cell, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
+
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
+
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy if
@@ -2491,37 +2526,45 @@
     __ str(r3, MemOperand(sp, argc * kPointerSize));
   }
 
-  // Set up the context (function already in r1).
+  // Setup the context (function already in r1).
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
-  __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
-                NullCallWrapper(), call_kind);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+    __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
+  } else {
+    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+                  JUMP_FUNCTION, call_kind);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
                                                   int index,
-                                                  Handle<Map> transition,
-                                                  Handle<String> name) {
+                                                  Map* transition,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2530,20 +2573,24 @@
   // -----------------------------------
   Label miss;
 
-  GenerateStoreField(masm(), object, index, transition, r1, r2, r3, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     r1, r2, r3,
+                     &miss);
   __ bind(&miss);
   Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<AccessorInfo> callback,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                     AccessorInfo* callback,
+                                                     String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2552,9 +2599,13 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(r1, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(r1, r3, Handle<Map>(object->map()), &miss,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r3, Operand(Handle<Map>(object->map())));
+  __ b(ne, &miss);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2566,7 +2617,7 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   __ push(r1);  // receiver
-  __ mov(ip, Operand(callback));  // callback info
+  __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback info
   __ Push(ip, r2, r0);
 
   // Do tail-call to the runtime system.
@@ -2585,9 +2636,8 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> receiver,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                        String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2596,9 +2646,13 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(r1, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r3, Operand(Handle<Map>(receiver->map())));
+  __ b(ne, &miss);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2630,10 +2684,9 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
-    Handle<GlobalObject> object,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                                   JSGlobalPropertyCell* cell,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : receiver
@@ -2651,7 +2704,7 @@
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ mov(r4, Operand(cell));
+  __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
   __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
   __ cmp(r5, r6);
@@ -2659,7 +2712,6 @@
 
   // Store the value in the cell.
   __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
-  // Cells are always rescanned, so no write barrier here.
 
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
@@ -2676,9 +2728,9 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
-                                                      Handle<JSObject> object,
-                                                      Handle<JSObject> last) {
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                      JSObject* object,
+                                                      JSObject* last) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- lr    : return address
@@ -2694,8 +2746,15 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
+    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                  GlobalObject::cast(last),
+                                                  name,
+                                                  r1,
+                                                  &miss);
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2707,14 +2766,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, factory()->empty_string());
+  return GetCode(NONEXISTENT, heap()->empty_string());
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                                JSObject* holder,
                                                 int index,
-                                                Handle<String> name) {
+                                                String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2731,19 +2790,24 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
   Label miss;
-  GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, callback, name,
-                       &miss);
+
+  MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
+                                             callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2752,10 +2816,10 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> value,
-                                                   Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* value,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2772,9 +2836,9 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2782,9 +2846,17 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
+  GenerateLoadInterceptor(object,
+                          holder,
+                          &lookup,
+                          r0,
+                          r2,
+                          r3,
+                          r1,
+                          r4,
+                          name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2794,12 +2866,11 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name,
-    bool is_dont_delete) {
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 String* name,
+                                                 bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- r2    : name
@@ -2807,12 +2878,18 @@
   // -----------------------------------
   Label miss;
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(r0, &miss);
+  }
+
   // Check that the map of the global has not changed.
-  __ JumpIfSmi(r0, &miss);
   CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
 
   // Get the value from the cell.
-  __ mov(r3, Operand(cell));
+  __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2836,9 +2913,9 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
-                                                     Handle<JSObject> receiver,
-                                                     Handle<JSObject> holder,
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                     JSObject* receiver,
+                                                     JSObject* holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
@@ -2848,7 +2925,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(name));
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
@@ -2859,11 +2936,11 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+    String* name,
+    JSObject* receiver,
+    JSObject* holder,
+    AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2872,11 +2949,16 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(name));
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, callback, name,
-                       &miss);
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
+                                             r4, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2884,11 +2966,10 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<JSFunction> value) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                        JSObject* receiver,
+                                                        JSObject* holder,
+                                                        Object* value) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2897,7 +2978,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(name));
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
@@ -2909,10 +2990,9 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                           JSObject* holder,
+                                                           String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2921,12 +3001,20 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(name));
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          r1,
+                          r0,
+                          r2,
+                          r3,
+                          r4,
+                          name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2935,8 +3023,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2945,7 +3032,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(name));
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   GenerateLoadArrayLength(masm(), r1, r2, &miss);
@@ -2956,8 +3043,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2969,7 +3055,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
 
   // Check the key is the cached one.
-  __ cmp(r0, Operand(name));
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
@@ -2982,8 +3068,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -2995,7 +3080,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
 
   // Check the name hasn't changed.
-  __ cmp(r0, Operand(name));
+  __ cmp(r0, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
@@ -3007,29 +3092,33 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
   //  -- r1    : receiver
   // -----------------------------------
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r1,
+                 r2,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_ics) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- lr    : return address
   //  -- r0    : key
@@ -3041,9 +3130,11 @@
   int receiver_count = receiver_maps->length();
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
   for (int current = 0; current < receiver_count; ++current) {
-    __ mov(ip, Operand(receiver_maps->at(current)));
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ mov(ip, Operand(map));
     __ cmp(r2, ip);
-    __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
+    __ Jump(code, RelocInfo::CODE_TARGET, eq);
   }
 
   __ bind(&miss);
@@ -3051,14 +3142,14 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                        int index,
-                                                       Handle<Map> transition,
-                                                       Handle<String> name) {
+                                                       Map* transition,
+                                                       String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : name
@@ -3071,12 +3162,17 @@
   __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4);
 
   // Check that the name has not changed.
-  __ cmp(r1, Operand(name));
+  __ cmp(r1, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
   // r3 is used as scratch register. r1 and r2 keep their values if a jump to
   // the miss label is generated.
-  GenerateStoreField(masm(), object, index, transition, r2, r1, r3, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     r2, r1, r3,
+                     &miss);
   __ bind(&miss);
 
   __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4);
@@ -3084,12 +3180,11 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -3097,25 +3192,29 @@
   //  -- lr    : return address
   //  -- r3    : scratch
   // -----------------------------------
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  Handle<Code> stub =
-      KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
-  __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(r2,
+                 r3,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -3128,18 +3227,12 @@
 
   int receiver_count = receiver_maps->length();
   __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  for (int i = 0; i < receiver_count; ++i) {
-    __ mov(ip, Operand(receiver_maps->at(i)));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ mov(ip, Operand(map));
     __ cmp(r3, ip);
-    if (transitioned_maps->at(i).is_null()) {
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
-    } else {
-      Label next_map;
-      __ b(ne, &next_map);
-      __ mov(r3, Operand(transitioned_maps->at(i)));
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
-      __ bind(&next_map);
-    }
+    __ Jump(code, RelocInfo::CODE_TARGET, eq);
   }
 
   __ bind(&miss);
@@ -3147,12 +3240,11 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
-    Handle<JSFunction> function) {
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // ----------- S t a t e -------------
   //  -- r0    : argc
   //  -- r1    : constructor
@@ -3198,7 +3290,12 @@
   // r2: initial map
   // r7: undefined
   __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
+  __ AllocateInNewSpace(r3,
+                        r4,
+                        r5,
+                        r6,
+                        &generic_stub_call,
+                        SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3230,7 +3327,7 @@
   // r7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  Handle<SharedFunctionInfo> shared(function->shared());
+  SharedFunctionInfo* shared = function->shared();
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -3357,7 +3454,6 @@
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3444,7 +3540,6 @@
       }
       break;
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3689,9 +3784,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
 }
 
 
@@ -3785,7 +3880,6 @@
       }
       break;
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3849,7 +3943,6 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
-          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3989,7 +4082,6 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
-          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4065,9 +4157,9 @@
   __ Ret();
 
   __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
 }
 
 
@@ -4142,11 +4234,8 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
-    MacroAssembler* masm,
-    bool is_js_array,
-    ElementsKind elements_kind,
-    KeyedAccessGrowMode grow_mode) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -4155,16 +4244,13 @@
   //  -- r3    : scratch
   //  -- r4    : scratch (elements)
   // -----------------------------------
-  Label miss_force_generic, transition_elements_kind, grow, slow;
-  Label finish_store, check_capacity;
+  Label miss_force_generic;
 
   Register value_reg = r0;
   Register key_reg = r1;
   Register receiver_reg = r2;
-  Register scratch = r4;
-  Register elements_reg = r3;
-  Register length_reg = r5;
-  Register scratch2 = r6;
+  Register scratch = r3;
+  Register elements_reg = r4;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4172,13 +4258,16 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(key_reg, &miss_force_generic);
 
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    __ JumpIfNotSmi(value_reg, &transition_elements_kind);
-  }
-
-  // Check that the key is within bounds.
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
   __ ldr(elements_reg,
          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+  __ CheckMap(elements_reg,
+              scratch,
+              Heap::kFixedArrayMapRootIndex,
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check that the key is within bounds.
   if (is_js_array) {
     __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
@@ -4186,46 +4275,17 @@
   }
   // Compare smis.
   __ cmp(key_reg, scratch);
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    __ b(hs, &grow);
-  } else {
-    __ b(hs, &miss_force_generic);
-  }
+  __ b(hs, &miss_force_generic);
 
-  // Make sure elements is a fast element array, not 'cow'.
-  __ CheckMap(elements_reg,
-              scratch,
-              Heap::kFixedArrayMapRootIndex,
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
+  __ add(scratch,
+         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ str(value_reg,
+         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ RecordWrite(scratch,
+                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
+                 receiver_reg , elements_reg);
 
-  __ bind(&finish_store);
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    __ add(scratch,
-           elements_reg,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-    __ add(scratch,
-           scratch,
-           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
-    __ str(value_reg, MemOperand(scratch));
-  } else {
-    ASSERT(elements_kind == FAST_ELEMENTS);
-    __ add(scratch,
-           elements_reg,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-    __ add(scratch,
-           scratch,
-           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
-    __ str(value_reg, MemOperand(scratch));
-    __ mov(receiver_reg, value_reg);
-    __ RecordWrite(elements_reg,  // Object.
-                   scratch,       // Address.
-                   receiver_reg,  // Value.
-                   kLRHasNotBeenSaved,
-                   kDontSaveFPRegs);
-  }
   // value_reg (r0) is preserved.
   // Done.
   __ Ret();
@@ -4234,84 +4294,12 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&transition_elements_kind);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Grow the array by a single element if possible.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime. Flags already set by previous compare.
-    __ b(ne, &miss_force_generic);
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ ldr(length_reg,
-           FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ ldr(elements_reg,
-           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
-    __ b(ne, &check_capacity);
-
-    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
-                          TAG_OBJECT);
-
-    __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-    __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
-    __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-    __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
-      __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
-    }
-
-    // Store the element at index zero.
-    __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
-    // Install the new backing store in the JSArray.
-    __ str(elements_reg,
-           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
-                        scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
-                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ mov(length_reg, Operand(Smi::FromInt(1)));
-    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ Ret();
-
-    __ bind(&check_capacity);
-    // Check for cow elements, in general they are not handled by this stub
-    __ CheckMap(elements_reg,
-                scratch,
-                Heap::kFixedCOWArrayMapRootIndex,
-                &miss_force_generic,
-                DONT_DO_SMI_CHECK);
-
-    __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-    __ cmp(length_reg, scratch);
-    __ b(hs, &slow);
-
-    // Grow the array and finish the store.
-    __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
-    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ jmp(&finish_store);
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array,
-    KeyedAccessGrowMode grow_mode) {
+    bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -4321,18 +4309,16 @@
   //  -- r4    : scratch
   //  -- r5    : scratch
   // -----------------------------------
-  Label miss_force_generic, transition_elements_kind, grow, slow;
-  Label finish_store, check_capacity;
+  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
 
   Register value_reg = r0;
   Register key_reg = r1;
   Register receiver_reg = r2;
-  Register elements_reg = r3;
-  Register scratch1 = r4;
-  Register scratch2 = r5;
-  Register scratch3 = r6;
+  Register scratch = r3;
+  Register elements_reg = r4;
+  Register mantissa_reg = r5;
+  Register exponent_reg = r6;
   Register scratch4 = r7;
-  Register length_reg = r7;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4343,30 +4329,90 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-    __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
-    __ ldr(scratch1,
+    __ ldr(scratch,
            FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
   // Compare smis, unsigned compare catches both negative and out-of-bound
   // indexes.
-  __ cmp(key_reg, scratch1);
-  if (grow_mode == ALLOW_JSARRAY_GROWTH) {
-    __ b(hs, &grow);
+  __ cmp(key_reg, scratch);
+  __ b(hs, &miss_force_generic);
+
+  // Handle smi values specially.
+  __ JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  __ CheckMap(value_reg,
+              scratch,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+  __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  __ cmp(exponent_reg, scratch);
+  __ b(ge, &maybe_nan);
+
+  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  __ bind(&have_double_value);
+  __ add(scratch, elements_reg,
+         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ str(exponent_reg, FieldMemOperand(scratch, offset));
+  __ Ret();
+
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ b(gt, &is_nan);
+  __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  __ cmp(mantissa_reg, Operand(0));
+  __ b(eq, &have_double_value);
+  __ bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  __ jmp(&have_double_value);
+
+  __ bind(&smi_value);
+  __ add(scratch, elements_reg,
+         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  __ add(scratch, scratch,
+         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+  // scratch is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(VFP3)) {
+    destination = FloatingPointHelper::kVFPRegisters;
   } else {
-    __ b(hs, &miss_force_generic);
+    destination = FloatingPointHelper::kCoreRegisters;
   }
 
-  __ bind(&finish_store);
-  __ StoreNumberToDoubleElements(value_reg,
-                                 key_reg,
-                                 receiver_reg,
-                                 elements_reg,
-                                 scratch1,
-                                 scratch2,
-                                 scratch3,
-                                 scratch4,
-                                 &transition_elements_kind);
+  Register untagged_value = receiver_reg;
+  __ SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(
+      masm,
+      untagged_value,
+      destination,
+      d0,
+      mantissa_reg,
+      exponent_reg,
+      scratch4,
+      s2);
+  if (destination == FloatingPointHelper::kVFPRegisters) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vstr(d0, scratch, 0);
+  } else {
+    __ str(mantissa_reg, MemOperand(scratch, 0));
+    __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+  }
   __ Ret();
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -4374,77 +4420,6 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&transition_elements_kind);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Grow the array by a single element if possible.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime. Flags already set by previous compare.
-    __ b(ne, &miss_force_generic);
-
-    // Transition on values that can't be stored in a FixedDoubleArray.
-    Label value_is_smi;
-    __ JumpIfSmi(value_reg, &value_is_smi);
-    __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
-    __ b(ne, &transition_elements_kind);
-    __ bind(&value_is_smi);
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ ldr(length_reg,
-           FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ ldr(elements_reg,
-           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
-    __ b(ne, &check_capacity);
-
-    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
-                          TAG_OBJECT);
-
-    // Initialize the new FixedDoubleArray. Leave elements unitialized for
-    // efficiency, they are guaranteed to be initialized before use.
-    __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
-    __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
-    __ mov(scratch1,
-           Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-    __ str(scratch1,
-           FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
-    // Install the new backing store in the JSArray.
-    __ str(elements_reg,
-           FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
-                        scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
-                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ mov(length_reg, Operand(Smi::FromInt(1)));
-    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ jmp(&finish_store);
-
-    __ bind(&check_capacity);
-    // Make sure that the backing store can hold additional elements.
-    __ ldr(scratch1,
-           FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-    __ cmp(length_reg, scratch1);
-    __ b(hs, &slow);
-
-    // Grow the array and finish the store.
-    __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
-    __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ jmp(&finish_store);
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
diff --git a/src/array.js b/src/array.js
index daa75d5..98fe3ac 100644
--- a/src/array.js
+++ b/src/array.js
@@ -27,7 +27,7 @@
 
 // This file relies on the fact that the following declarations have been made
 // in runtime.js:
-// var $Array = global.Array;
+// const $Array = global.Array;
 
 // -------------------------------------------------------------------
 
@@ -201,14 +201,17 @@
 
 
 function ConvertToLocaleString(e) {
-  if (IS_NULL_OR_UNDEFINED(e)) {
+  if (e == null) {
     return '';
   } else {
-    // According to ES5, section 15.4.4.3, the toLocaleString conversion
-    // must throw a TypeError if ToObject(e).toLocaleString isn't
-    // callable.
+    // e_obj's toLocaleString might be overwritten, check if it is a function.
+    // Call ToString if toLocaleString is not a function.
+    // See issue 877615.
     var e_obj = ToObject(e);
-    return %ToString(e_obj.toLocaleString());
+    if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
+      return ToString(e_obj.toLocaleString());
+    else
+      return ToString(e);
   }
 }
 
@@ -328,9 +331,8 @@
     // would be the appropriate test.  We follow KJS in consulting the
     // prototype.
     var current = array[index];
-    if (!IS_UNDEFINED(current) || index in array) {
+    if (!IS_UNDEFINED(current) || index in array)
       deleted_elements[i] = current;
-    }
   }
 }
 
@@ -379,31 +381,18 @@
 
 
 function ArrayToString() {
-  var array;
-  var func;
-  if (IS_ARRAY(this)) {
-    func = this.join;
-    if (func === ArrayJoin) {
-      return Join(this, this.length, ',', ConvertToString);
-    }
-    array = this;
-  } else {
-    array = ToObject(this);
-    func = array.join;
+  if (!IS_ARRAY(this)) {
+    throw new $TypeError('Array.prototype.toString is not generic');
   }
-  if (!IS_SPEC_FUNCTION(func)) {
-    return %_CallFunction(array, ObjectToString);
-  }
-  return %_CallFunction(array, func);
+  return Join(this, this.length, ',', ConvertToString);
 }
 
 
 function ArrayToLocaleString() {
-  var array = ToObject(this);
-  var arrayLen = array.length;
-  var len = TO_UINT32(arrayLen);
-  if (len === 0) return "";
-  return Join(array, len, ',', ConvertToLocaleString);
+  if (!IS_ARRAY(this)) {
+    throw new $TypeError('Array.prototype.toString is not generic');
+  }
+  return Join(this, this.length, ',', ConvertToLocaleString);
 }
 
 
@@ -496,12 +485,12 @@
 
     if (j_complement <= i) {
       high = j;
-      while (keys[--high_counter] == j) { }
+      while (keys[--high_counter] == j);
       low = j_complement;
     }
     if (j_complement >= i) {
       low = i;
-      while (keys[++low_counter] == i) { }
+      while (keys[++low_counter] == i);
       high = len - i - 1;
     }
 
@@ -577,11 +566,10 @@
 
   var first = this[0];
 
-  if (IS_ARRAY(this)) {
+  if (IS_ARRAY(this))
     SmartMove(this, 0, 1, len, 0);
-  } else {
+  else
     SimpleMove(this, 0, 1, len, 0);
-  }
 
   this.length = len - 1;
 
@@ -598,11 +586,10 @@
   var len = TO_UINT32(this.length);
   var num_arguments = %_ArgumentsLength();
 
-  if (IS_ARRAY(this)) {
+  if (IS_ARRAY(this))
     SmartMove(this, 0, 0, len, num_arguments);
-  } else {
+  else
     SimpleMove(this, 0, 0, len, num_arguments);
-  }
 
   for (var i = 0; i < num_arguments; i++) {
     this[i] = %_Arguments(i);
@@ -757,7 +744,7 @@
   }
   var receiver = %GetDefaultReceiver(comparefn);
 
-  var InsertionSort = function InsertionSort(a, from, to) {
+  function InsertionSort(a, from, to) {
     for (var i = from + 1; i < to; i++) {
       var element = a[i];
       for (var j = i - 1; j >= from; j--) {
@@ -771,9 +758,9 @@
       }
       a[j + 1] = element;
     }
-  };
+  }
 
-  var QuickSort = function QuickSort(a, from, to) {
+  function QuickSort(a, from, to) {
     // Insertion sort is faster for short arrays.
     if (to - from <= 10) {
       InsertionSort(a, from, to);
@@ -841,12 +828,12 @@
     }
     QuickSort(a, from, low_end);
     QuickSort(a, high_start, to);
-  };
+  }
 
   // Copy elements in the range 0..length from obj's prototype chain
   // to obj itself, if obj has holes. Return one more than the maximal index
   // of a prototype property.
-  var CopyFromPrototype = function CopyFromPrototype(obj, length) {
+  function CopyFromPrototype(obj, length) {
     var max = 0;
     for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
       var indices = %GetArrayKeys(proto, length);
@@ -873,12 +860,12 @@
       }
     }
     return max;
-  };
+  }
 
   // Set a value of "undefined" on all indices in the range from..to
   // where a prototype of obj has an element. I.e., shadow all prototype
   // elements in that range.
-  var ShadowPrototypeElements = function(obj, from, to) {
+  function ShadowPrototypeElements(obj, from, to) {
     for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
       var indices = %GetArrayKeys(proto, to);
       if (indices.length > 0) {
@@ -901,9 +888,9 @@
         }
       }
     }
-  };
+  }
 
-  var SafeRemoveArrayHoles = function SafeRemoveArrayHoles(obj) {
+  function SafeRemoveArrayHoles(obj) {
     // Copy defined elements from the end to fill in all holes and undefineds
     // in the beginning of the array.  Write undefineds and holes at the end
     // after loop is finished.
@@ -958,7 +945,7 @@
 
     // Return the number of defined elements.
     return first_undefined;
-  };
+  }
 
   var length = TO_UINT32(this.length);
   if (length < 2) return this;
@@ -1006,32 +993,25 @@
                         ["Array.prototype.filter"]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping and side effects are visible.
-  var array = ToObject(this);
-  var length = ToUint32(array.length);
-
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
-  } else if (!IS_SPEC_OBJECT(receiver)) {
-    receiver = ToObject(receiver);
   }
-
-  var result = new $Array();
-  var accumulator = new InternalArray();
-  var accumulator_length = 0;
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = ToUint32(this.length);
+  var result = [];
+  var result_length = 0;
   for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      if (%_CallFunction(receiver, element, i, array, f)) {
-        accumulator[accumulator_length++] = element;
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (%_CallFunction(receiver, current, i, this, f)) {
+        result[result_length++] = current;
       }
     }
   }
-  %MoveArrayContents(accumulator, result);
   return result;
 }
 
@@ -1042,24 +1022,19 @@
                         ["Array.prototype.forEach"]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping and side effects are visible.
-  var array = ToObject(this);
-  var length = TO_UINT32(array.length);
-
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
-  } else if (!IS_SPEC_OBJECT(receiver)) {
-    receiver = ToObject(receiver);
   }
-
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length =  TO_UINT32(this.length);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      %_CallFunction(receiver, element, i, array, f);
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      %_CallFunction(receiver, current, i, this, f);
     }
   }
 }
@@ -1073,24 +1048,19 @@
                         ["Array.prototype.some"]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping and side effects are visible.
-  var array = ToObject(this);
-  var length = TO_UINT32(array.length);
-
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
-  } else if (!IS_SPEC_OBJECT(receiver)) {
-    receiver = ToObject(receiver);
   }
-
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = TO_UINT32(this.length);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      if (%_CallFunction(receiver, element, i, array, f)) return true;
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (%_CallFunction(receiver, current, i, this, f)) return true;
     }
   }
   return false;
@@ -1103,24 +1073,19 @@
                         ["Array.prototype.every"]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping and side effects are visible.
-  var array = ToObject(this);
-  var length = TO_UINT32(array.length);
-
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
-  } else if (!IS_SPEC_OBJECT(receiver)) {
-    receiver = ToObject(receiver);
   }
-
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = TO_UINT32(this.length);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      if (!%_CallFunction(receiver, element, i, array, f)) return false;
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      if (!%_CallFunction(receiver, current, i, this, f)) return false;
     }
   }
   return true;
@@ -1132,26 +1097,21 @@
                         ["Array.prototype.map"]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping and side effects are visible.
-  var array = ToObject(this);
-  var length = TO_UINT32(array.length);
-
   if (!IS_SPEC_FUNCTION(f)) {
     throw MakeTypeError('called_non_callable', [ f ]);
   }
   if (IS_NULL_OR_UNDEFINED(receiver)) {
     receiver = %GetDefaultReceiver(f) || receiver;
-  } else if (!IS_SPEC_OBJECT(receiver)) {
-    receiver = ToObject(receiver);
   }
-
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = TO_UINT32(this.length);
   var result = new $Array();
   var accumulator = new InternalArray(length);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      accumulator[i] = %_CallFunction(receiver, element, i, array, f);
+    var current = this[i];
+    if (!IS_UNDEFINED(current) || i in this) {
+      accumulator[i] = %_CallFunction(receiver, current, i, this, f);
     }
   }
   %MoveArrayContents(accumulator, result);
@@ -1285,20 +1245,19 @@
                         ["Array.prototype.reduce"]);
   }
 
-  // Pull out the length so that modifications to the length in the
-  // loop will not affect the looping and side effects are visible.
-  var array = ToObject(this);
-  var length = ToUint32(array.length);
-
   if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
 
+  // Pull out the length so that modifications to the length in the
+  // loop will not affect the looping.
+  var length = ToUint32(this.length);
   var i = 0;
+
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i < length; i++) {
-      current = array[i];
-      if (!IS_UNDEFINED(current) || i in array) {
+      current = this[i];
+      if (!IS_UNDEFINED(current) || i in this) {
         i++;
         break find_initial;
       }
@@ -1308,9 +1267,9 @@
 
   var receiver = %GetDefaultReceiver(callback);
   for (; i < length; i++) {
-    if (i in array) {
-      var element = array[i];
-      current = %_CallFunction(receiver, current, element, i, array, callback);
+    var element = this[i];
+    if (!IS_UNDEFINED(element) || i in this) {
+      current = %_CallFunction(receiver, current, element, i, this, callback);
     }
   }
   return current;
@@ -1322,20 +1281,15 @@
                         ["Array.prototype.reduceRight"]);
   }
 
-  // Pull out the length so that side effects are visible before the
-  // callback function is checked.
-  var array = ToObject(this);
-  var length = ToUint32(array.length);
-
   if (!IS_SPEC_FUNCTION(callback)) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
+  var i = ToUint32(this.length) - 1;
 
-  var i = length - 1;
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i >= 0; i--) {
-      current = array[i];
-      if (!IS_UNDEFINED(current) || i in array) {
+      current = this[i];
+      if (!IS_UNDEFINED(current) || i in this) {
         i--;
         break find_initial;
       }
@@ -1345,9 +1299,9 @@
 
   var receiver = %GetDefaultReceiver(callback);
   for (; i >= 0; i--) {
-    if (i in array) {
-      var element = array[i];
-      current = %_CallFunction(receiver, current, element, i, array, callback);
+    var element = this[i];
+    if (!IS_UNDEFINED(element) || i in this) {
+      current = %_CallFunction(receiver, current, element, i, this, callback);
     }
   }
   return current;
@@ -1373,7 +1327,7 @@
 
   var specialFunctions = %SpecialArrayFunctions({});
 
-  var getFunction = function(name, jsBuiltin, len) {
+  function getFunction(name, jsBuiltin, len) {
     var f = jsBuiltin;
     if (specialFunctions.hasOwnProperty(name)) {
       f = specialFunctions[name];
@@ -1382,13 +1336,13 @@
       %FunctionSetLength(f, len);
     }
     return f;
-  };
+  }
 
   // Set up non-enumerable functions of the Array.prototype object and
   // set their names.
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
-  InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
     "toString", getFunction("toString", ArrayToString),
     "toLocaleString", getFunction("toLocaleString", ArrayToLocaleString),
     "join", getFunction("join", ArrayJoin),
diff --git a/src/assembler.cc b/src/assembler.cc
index 4944202..ad5f350 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// Copyright (c) 2011 Sun Microsystems Inc.
 // All Rights Reserved.
 //
 // Redistribution and use in source and binary forms, with or without
@@ -30,43 +30,23 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
-#include "assembler.h"
+#include "v8.h"
 
-#include <math.h>  // For cos, log, pow, sin, tan, etc.
-#include "api.h"
-#include "builtins.h"
-#include "counters.h"
-#include "cpu.h"
-#include "debug.h"
+#include "arguments.h"
 #include "deoptimizer.h"
 #include "execution.h"
-#include "ic.h"
-#include "isolate.h"
-#include "jsregexp.h"
-#include "lazy-instance.h"
-#include "platform.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
+#include "ic-inl.h"
+#include "factory.h"
 #include "runtime.h"
+#include "runtime-profiler.h"
 #include "serialize.h"
-#include "store-buffer-inl.h"
 #include "stub-cache.h"
-#include "token.h"
-
-#if V8_TARGET_ARCH_IA32
-#include "ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips-inl.h"
-#else
-#error "Unknown architecture."
-#endif
-
+#include "regexp-stack.h"
+#include "ast.h"
+#include "regexp-macro-assembler.h"
+#include "platform.h"
 // Include native regexp-macro-assembler.
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
@@ -85,36 +65,15 @@
 namespace v8 {
 namespace internal {
 
-// -----------------------------------------------------------------------------
-// Common double constants.
 
-struct DoubleConstant BASE_EMBEDDED {
-  double min_int;
-  double one_half;
-  double minus_zero;
-  double zero;
-  double uint8_max_value;
-  double negative_infinity;
-  double canonical_non_hole_nan;
-  double the_hole_nan;
-};
-
-struct InitializeDoubleConstants {
-  static void Construct(DoubleConstant* double_constants) {
-    double_constants->min_int = kMinInt;
-    double_constants->one_half = 0.5;
-    double_constants->minus_zero = -0.0;
-    double_constants->uint8_max_value = 255;
-    double_constants->zero = 0.0;
-    double_constants->canonical_non_hole_nan = OS::nan_value();
-    double_constants->the_hole_nan = BitCast<double>(kHoleNanInt64);
-    double_constants->negative_infinity = -V8_INFINITY;
-  }
-};
-
-static LazyInstance<DoubleConstant, InitializeDoubleConstants>::type
-    double_constants = LAZY_INSTANCE_INITIALIZER;
-
+const double DoubleConstant::min_int = kMinInt;
+const double DoubleConstant::one_half = 0.5;
+const double DoubleConstant::minus_zero = -0.0;
+const double DoubleConstant::uint8_max_value = 255;
+const double DoubleConstant::zero = 0.0;
+const double DoubleConstant::canonical_non_hole_nan = OS::nan_value();
+const double DoubleConstant::the_hole_nan = BitCast<double>(kHoleNanInt64);
+const double DoubleConstant::negative_infinity = -V8_INFINITY;
 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
 
 // -----------------------------------------------------------------------------
@@ -557,7 +516,6 @@
 
 
 RelocIterator::RelocIterator(Code* code, int mode_mask) {
-  rinfo_.host_ = code;
   rinfo_.pc_ = code->instruction_start();
   rinfo_.data_ = 0;
   // Relocation info is read backwards.
@@ -778,38 +736,9 @@
   : address_(table_ref.address()) {}
 
 
-ExternalReference ExternalReference::
-    incremental_marking_record_write_function(Isolate* isolate) {
-  return ExternalReference(Redirect(
-      isolate,
-      FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
-}
-
-
-ExternalReference ExternalReference::
-    incremental_evacuation_record_write_function(Isolate* isolate) {
-  return ExternalReference(Redirect(
-      isolate,
-      FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
-}
-
-
-ExternalReference ExternalReference::
-    store_buffer_overflow_function(Isolate* isolate) {
-  return ExternalReference(Redirect(
-      isolate,
-      FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
-}
-
-
-ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
-}
-
-
 ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
-  return
-      ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
+  return ExternalReference(Redirect(isolate,
+                                    FUNCTION_ADDR(Runtime::PerformGC)));
 }
 
 
@@ -835,17 +764,6 @@
 }
 
 
-ExternalReference ExternalReference::get_date_field_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
-}
-
-
-ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
-  return ExternalReference(isolate->date_cache()->stamp_address());
-}
-
-
 ExternalReference ExternalReference::transcendental_cache_array_address(
     Isolate* isolate) {
   return ExternalReference(
@@ -867,6 +785,11 @@
 }
 
 
+ExternalReference ExternalReference::global_contexts_list(Isolate* isolate) {
+  return ExternalReference(isolate->heap()->global_contexts_list_address());
+}
+
+
 ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
   return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
 }
@@ -879,8 +802,19 @@
 }
 
 
-ExternalReference ExternalReference::roots_array_start(Isolate* isolate) {
-  return ExternalReference(isolate->heap()->roots_array_start());
+ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
+  return ExternalReference(isolate->factory()->the_hole_value().location());
+}
+
+
+ExternalReference ExternalReference::arguments_marker_location(
+    Isolate* isolate) {
+  return ExternalReference(isolate->factory()->arguments_marker().location());
+}
+
+
+ExternalReference ExternalReference::roots_address(Isolate* isolate) {
+  return ExternalReference(isolate->heap()->roots_address());
 }
 
 
@@ -906,14 +840,9 @@
 }
 
 
-ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
-  return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
-}
-
-
 ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
-  return ExternalReference(reinterpret_cast<Address>(
-      isolate->heap()->NewSpaceMask()));
+  Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
+  return ExternalReference(mask);
 }
 
 
@@ -959,49 +888,49 @@
 
 ExternalReference ExternalReference::address_of_min_int() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->min_int));
+      const_cast<double*>(&DoubleConstant::min_int)));
 }
 
 
 ExternalReference ExternalReference::address_of_one_half() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->one_half));
+      const_cast<double*>(&DoubleConstant::one_half)));
 }
 
 
 ExternalReference ExternalReference::address_of_minus_zero() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->minus_zero));
+      const_cast<double*>(&DoubleConstant::minus_zero)));
 }
 
 
 ExternalReference ExternalReference::address_of_zero() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->zero));
+      const_cast<double*>(&DoubleConstant::zero)));
 }
 
 
 ExternalReference ExternalReference::address_of_uint8_max_value() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->uint8_max_value));
+      const_cast<double*>(&DoubleConstant::uint8_max_value)));
 }
 
 
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->negative_infinity));
+      const_cast<double*>(&DoubleConstant::negative_infinity)));
 }
 
 
 ExternalReference ExternalReference::address_of_canonical_non_hole_nan() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->canonical_non_hole_nan));
+      const_cast<double*>(&DoubleConstant::canonical_non_hole_nan)));
 }
 
 
 ExternalReference ExternalReference::address_of_the_hole_nan() {
   return ExternalReference(reinterpret_cast<void*>(
-      &double_constants.Pointer()->the_hole_nan));
+      const_cast<double*>(&DoubleConstant::the_hole_nan)));
 }
 
 
@@ -1096,11 +1025,6 @@
 }
 
 
-static double math_tan_double(double x) {
-  return tan(x);
-}
-
-
 static double math_log_double(double x) {
   return log(x);
 }
@@ -1122,14 +1046,6 @@
 }
 
 
-ExternalReference ExternalReference::math_tan_double_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(math_tan_double),
-                                    BUILTIN_FP_CALL));
-}
-
-
 ExternalReference ExternalReference::math_log_double_function(
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
@@ -1158,9 +1074,17 @@
 
 
 double power_double_double(double x, double y) {
-  // The checks for special cases can be dropped in ia32 because it has already
-  // been done in generated code before bailing out here.
-  if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
+  int y_int = static_cast<int>(y);
+  if (y == y_int) {
+    return power_double_int(x, y_int);  // Returns 1.0 for exponent 0.
+  }
+  if (!isinf(x)) {
+    if (y == 0.5) return sqrt(x + 0.0);  // -0 must be converted to +0.
+    if (y == -0.5) return 1.0 / sqrt(x + 0.0);
+  }
+  if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+    return OS::nan_value();
+  }
   return pow(x, y);
 }
 
@@ -1187,23 +1111,6 @@
 }
 
 
-bool EvalComparison(Token::Value op, double op1, double op2) {
-  ASSERT(Token::IsCompareOp(op));
-  switch (op) {
-    case Token::EQ:
-    case Token::EQ_STRICT: return (op1 == op2);
-    case Token::NE: return (op1 != op2);
-    case Token::LT: return (op1 < op2);
-    case Token::GT: return (op1 > op2);
-    case Token::LTE: return (op1 <= op2);
-    case Token::GTE: return (op1 >= op2);
-    default:
-      UNREACHABLE();
-      return false;
-  }
-}
-
-
 ExternalReference ExternalReference::double_fp_operation(
     Token::Value operation, Isolate* isolate) {
   typedef double BinaryFPOperation(double x, double y);
diff --git a/src/assembler.h b/src/assembler.h
index 918a2a6..d58034d 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -30,27 +30,19 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #ifndef V8_ASSEMBLER_H_
 #define V8_ASSEMBLER_H_
 
-#include "v8.h"
-
 #include "allocation.h"
-#include "builtins.h"
 #include "gdb-jit.h"
-#include "isolate.h"
 #include "runtime.h"
 #include "token.h"
 
 namespace v8 {
-
-class ApiFunction;
-
 namespace internal {
 
-struct StatsCounter;
 const unsigned kNoASTId = -1;
 // -----------------------------------------------------------------------------
 // Platform independent assembler base class.
@@ -67,6 +59,21 @@
   int jit_cookie_;
 };
 
+// -----------------------------------------------------------------------------
+// Common double constants.
+
+class DoubleConstant: public AllStatic {
+ public:
+  static const double min_int;
+  static const double one_half;
+  static const double minus_zero;
+  static const double zero;
+  static const double uint8_max_value;
+  static const double negative_infinity;
+  static const double canonical_non_hole_nan;
+  static const double the_hole_nan;
+};
+
 
 // -----------------------------------------------------------------------------
 // Labels represent pc locations; they are typically jump or call targets.
@@ -136,9 +143,6 @@
 };
 
 
-enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
-
-
 // -----------------------------------------------------------------------------
 // Relocation information
 
@@ -212,9 +216,8 @@
 
 
   RelocInfo() {}
-
-  RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
-      : pc_(pc), rmode_(rmode), data_(data), host_(host) {
+  RelocInfo(byte* pc, Mode rmode, intptr_t data)
+      : pc_(pc), rmode_(rmode), data_(data) {
   }
 
   static inline bool IsConstructCall(Mode mode) {
@@ -223,9 +226,6 @@
   static inline bool IsCodeTarget(Mode mode) {
     return mode <= LAST_CODE_ENUM;
   }
-  static inline bool IsEmbeddedObject(Mode mode) {
-    return mode == EMBEDDED_OBJECT;
-  }
   // Is the relocation mode affected by GC?
   static inline bool IsGCRelocMode(Mode mode) {
     return mode <= LAST_GCED_ENUM;
@@ -258,13 +258,12 @@
   void set_pc(byte* pc) { pc_ = pc; }
   Mode rmode() const {  return rmode_; }
   intptr_t data() const { return data_; }
-  Code* host() const { return host_; }
 
   // Apply a relocation by delta bytes
   INLINE(void apply(intptr_t delta));
 
   // Is the pointer this relocation info refers to coded like a plain pointer
-  // or is it strange in some way (e.g. relative or patched into a series of
+  // or is it strange in some way (eg relative or patched into a series of
   // instructions).
   bool IsCodedSpecially();
 
@@ -272,17 +271,14 @@
   // this relocation applies to;
   // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
   INLINE(Address target_address());
-  INLINE(void set_target_address(Address target,
-                                 WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+  INLINE(void set_target_address(Address target));
   INLINE(Object* target_object());
   INLINE(Handle<Object> target_object_handle(Assembler* origin));
   INLINE(Object** target_object_address());
-  INLINE(void set_target_object(Object* target,
-                                WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+  INLINE(void set_target_object(Object* target));
   INLINE(JSGlobalPropertyCell* target_cell());
   INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
-  INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
-                              WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
+  INLINE(void set_target_cell(JSGlobalPropertyCell* cell));
 
 
   // Read the address of the word containing the target_address in an
@@ -357,14 +353,13 @@
   byte* pc_;
   Mode rmode_;
   intptr_t data_;
-  Code* host_;
 #ifdef V8_TARGET_ARCH_MIPS
   // Code and Embedded Object pointers in mips are stored split
   // across two consecutive 32-bit instructions. Heap management
   // routines expect to access these pointers indirectly. The following
   // location provides a place for these pointers to exist natually
   // when accessed via the Iterator.
-  Object* reconstructed_obj_ptr_;
+  Object *reconstructed_obj_ptr_;
   // External-reference pointers are also split across instruction-pairs
   // in mips, but are accessed via indirect pointers. This location
   // provides a place for that pointer to exist naturally. Its address
@@ -566,13 +561,6 @@
   // pattern. This means that they have to be added to the
   // ExternalReferenceTable in serialize.cc manually.
 
-  static ExternalReference incremental_marking_record_write_function(
-      Isolate* isolate);
-  static ExternalReference incremental_evacuation_record_write_function(
-      Isolate* isolate);
-  static ExternalReference store_buffer_overflow_function(
-      Isolate* isolate);
-  static ExternalReference flush_icache_function(Isolate* isolate);
   static ExternalReference perform_gc_function(Isolate* isolate);
   static ExternalReference fill_heap_number_with_random_function(
       Isolate* isolate);
@@ -580,19 +568,23 @@
   static ExternalReference transcendental_cache_array_address(Isolate* isolate);
   static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
 
-  static ExternalReference get_date_field_function(Isolate* isolate);
-  static ExternalReference date_cache_stamp(Isolate* isolate);
-
   // Deoptimization support.
   static ExternalReference new_deoptimizer_function(Isolate* isolate);
   static ExternalReference compute_output_frames_function(Isolate* isolate);
+  static ExternalReference global_contexts_list(Isolate* isolate);
 
   // Static data in the keyed lookup cache.
   static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
   static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
 
-  // Static variable Heap::roots_array_start()
-  static ExternalReference roots_array_start(Isolate* isolate);
+  // Static variable Factory::the_hole_value.location()
+  static ExternalReference the_hole_value_location(Isolate* isolate);
+
+  // Static variable Factory::arguments_marker.location()
+  static ExternalReference arguments_marker_location(Isolate* isolate);
+
+  // Static variable Heap::roots_address()
+  static ExternalReference roots_address(Isolate* isolate);
 
   // Static variable StackGuard::address_of_jslimit()
   static ExternalReference address_of_stack_limit(Isolate* isolate);
@@ -614,10 +606,6 @@
   static ExternalReference new_space_start(Isolate* isolate);
   static ExternalReference new_space_mask(Isolate* isolate);
   static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
-  static ExternalReference new_space_mark_bits(Isolate* isolate);
-
-  // Write barrier.
-  static ExternalReference store_buffer_top(Isolate* isolate);
 
   // Used for fast allocation in generated code.
   static ExternalReference new_space_allocation_top_address(Isolate* isolate);
@@ -647,7 +635,6 @@
 
   static ExternalReference math_sin_double_function(Isolate* isolate);
   static ExternalReference math_cos_double_function(Isolate* isolate);
-  static ExternalReference math_tan_double_function(Isolate* isolate);
   static ExternalReference math_log_double_function(Isolate* isolate);
 
   Address address() const {return reinterpret_cast<Address>(address_);}
@@ -812,33 +799,33 @@
 // -----------------------------------------------------------------------------
 // Utility functions
 
-inline bool is_intn(int x, int n)  {
+static inline bool is_intn(int x, int n)  {
   return -(1 << (n-1)) <= x && x < (1 << (n-1));
 }
 
-inline bool is_int8(int x)  { return is_intn(x, 8); }
-inline bool is_int16(int x)  { return is_intn(x, 16); }
-inline bool is_int18(int x)  { return is_intn(x, 18); }
-inline bool is_int24(int x)  { return is_intn(x, 24); }
+static inline bool is_int8(int x)  { return is_intn(x, 8); }
+static inline bool is_int16(int x)  { return is_intn(x, 16); }
+static inline bool is_int18(int x)  { return is_intn(x, 18); }
+static inline bool is_int24(int x)  { return is_intn(x, 24); }
 
-inline bool is_uintn(int x, int n) {
+static inline bool is_uintn(int x, int n) {
   return (x & -(1 << n)) == 0;
 }
 
-inline bool is_uint2(int x)  { return is_uintn(x, 2); }
-inline bool is_uint3(int x)  { return is_uintn(x, 3); }
-inline bool is_uint4(int x)  { return is_uintn(x, 4); }
-inline bool is_uint5(int x)  { return is_uintn(x, 5); }
-inline bool is_uint6(int x)  { return is_uintn(x, 6); }
-inline bool is_uint8(int x)  { return is_uintn(x, 8); }
-inline bool is_uint10(int x)  { return is_uintn(x, 10); }
-inline bool is_uint12(int x)  { return is_uintn(x, 12); }
-inline bool is_uint16(int x)  { return is_uintn(x, 16); }
-inline bool is_uint24(int x)  { return is_uintn(x, 24); }
-inline bool is_uint26(int x)  { return is_uintn(x, 26); }
-inline bool is_uint28(int x)  { return is_uintn(x, 28); }
+static inline bool is_uint2(int x)  { return is_uintn(x, 2); }
+static inline bool is_uint3(int x)  { return is_uintn(x, 3); }
+static inline bool is_uint4(int x)  { return is_uintn(x, 4); }
+static inline bool is_uint5(int x)  { return is_uintn(x, 5); }
+static inline bool is_uint6(int x)  { return is_uintn(x, 6); }
+static inline bool is_uint8(int x)  { return is_uintn(x, 8); }
+static inline bool is_uint10(int x)  { return is_uintn(x, 10); }
+static inline bool is_uint12(int x)  { return is_uintn(x, 12); }
+static inline bool is_uint16(int x)  { return is_uintn(x, 16); }
+static inline bool is_uint24(int x)  { return is_uintn(x, 24); }
+static inline bool is_uint26(int x)  { return is_uintn(x, 26); }
+static inline bool is_uint28(int x)  { return is_uintn(x, 28); }
 
-inline int NumberOfBitsSet(uint32_t x) {
+static inline int NumberOfBitsSet(uint32_t x) {
   unsigned int num_bits_set;
   for (num_bits_set = 0; x; x >>= 1) {
     num_bits_set += x & 1;
@@ -846,8 +833,6 @@
   return num_bits_set;
 }
 
-bool EvalComparison(Token::Value op, double op1, double op2);
-
 // Computes pow(x, y) with the special cases in the spec for Math.pow.
 double power_double_int(double x, int y);
 double power_double_double(double x, double y);
diff --git a/src/ast-inl.h b/src/ast-inl.h
new file mode 100644
index 0000000..731ad2f
--- /dev/null
+++ b/src/ast-inl.h
@@ -0,0 +1,121 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_AST_INL_H_
+#define V8_AST_INL_H_
+
+#include "v8.h"
+
+#include "ast.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+SwitchStatement::SwitchStatement(Isolate* isolate,
+                                 ZoneStringList* labels)
+    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+      tag_(NULL), cases_(NULL) {
+}
+
+
+Block::Block(Isolate* isolate,
+             ZoneStringList* labels,
+             int capacity,
+             bool is_initializer_block)
+    : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
+      statements_(capacity),
+      is_initializer_block_(is_initializer_block),
+      block_scope_(NULL) {
+}
+
+
+BreakableStatement::BreakableStatement(Isolate* isolate,
+                                       ZoneStringList* labels,
+                                       Type type)
+    : labels_(labels),
+      type_(type),
+      entry_id_(GetNextId(isolate)),
+      exit_id_(GetNextId(isolate)) {
+  ASSERT(labels == NULL || labels->length() > 0);
+}
+
+
+IterationStatement::IterationStatement(Isolate* isolate, ZoneStringList* labels)
+    : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
+      body_(NULL),
+      continue_target_(),
+      osr_entry_id_(GetNextId(isolate)) {
+}
+
+
+DoWhileStatement::DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
+      cond_(NULL),
+      condition_position_(-1),
+      continue_id_(GetNextId(isolate)),
+      back_edge_id_(GetNextId(isolate)) {
+}
+
+
+WhileStatement::WhileStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
+      cond_(NULL),
+      may_have_function_literal_(true),
+      body_id_(GetNextId(isolate)) {
+}
+
+
+ForStatement::ForStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
+      init_(NULL),
+      cond_(NULL),
+      next_(NULL),
+      may_have_function_literal_(true),
+      loop_variable_(NULL),
+      continue_id_(GetNextId(isolate)),
+      body_id_(GetNextId(isolate)) {
+}
+
+
+ForInStatement::ForInStatement(Isolate* isolate, ZoneStringList* labels)
+    : IterationStatement(isolate, labels),
+      each_(NULL),
+      enumerable_(NULL),
+      assignment_id_(GetNextId(isolate)) {
+}
+
+
+bool FunctionLiteral::strict_mode() const {
+  return scope()->is_strict_mode();
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_AST_INL_H_
diff --git a/src/ast.cc b/src/ast.cc
index 4b6ae68..418cc43 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,15 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "ast.h"
+#include "v8.h"
 
-#include <math.h>  // For isfinite.
-#include "builtins.h"
-#include "conversions.h"
-#include "hashmap.h"
+#include "ast.h"
 #include "parser.h"
-#include "property-details.h"
-#include "property.h"
 #include "scopes.h"
 #include "string-stream.h"
 #include "type-info.h"
@@ -53,19 +48,16 @@
 // ----------------------------------------------------------------------------
 // Implementation of other node functionality.
 
-
-bool Expression::IsSmiLiteral() {
-  return AsLiteral() != NULL && AsLiteral()->handle()->IsSmi();
+Assignment* ExpressionStatement::StatementAsSimpleAssignment() {
+  return (expression()->AsAssignment() != NULL &&
+          !expression()->AsAssignment()->is_compound())
+      ? expression()->AsAssignment()
+      : NULL;
 }
 
 
-bool Expression::IsStringLiteral() {
-  return AsLiteral() != NULL && AsLiteral()->handle()->IsString();
-}
-
-
-bool Expression::IsNullLiteral() {
-  return AsLiteral() != NULL && AsLiteral()->handle()->IsNull();
+CountOperation* ExpressionStatement::StatementAsCountOperation() {
+  return expression()->AsCountOperation();
 }
 
 
@@ -74,10 +66,9 @@
       name_(var->name()),
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
+      inside_with_(false),
       is_trivial_(false),
-      is_lvalue_(false),
-      position_(RelocInfo::kNoPosition),
-      interface_(var->interface()) {
+      position_(RelocInfo::kNoPosition) {
   BindTo(var);
 }
 
@@ -85,16 +76,15 @@
 VariableProxy::VariableProxy(Isolate* isolate,
                              Handle<String> name,
                              bool is_this,
-                             int position,
-                             Interface* interface)
+                             bool inside_with,
+                             int position)
     : Expression(isolate),
       name_(name),
       var_(NULL),
       is_this_(is_this),
+      inside_with_(inside_with),
       is_trivial_(false),
-      is_lvalue_(false),
-      position_(position),
-      interface_(interface) {
+      position_(position) {
   // Names must be canonicalized for fast equality checks.
   ASSERT(name->IsSymbol());
 }
@@ -129,7 +119,18 @@
       assignment_id_(GetNextId(isolate)),
       block_start_(false),
       block_end_(false),
-      is_monomorphic_(false) { }
+      is_monomorphic_(false) {
+  ASSERT(Token::IsAssignmentOp(op));
+  if (is_compound()) {
+    binary_operation_ =
+        new(isolate->zone()) BinaryOperation(isolate,
+                                             binary_op(),
+                                             target,
+                                             value,
+                                             pos + 1);
+    compound_load_id_ = GetNextId(isolate);
+  }
+}
 
 
 Token::Value Assignment::binary_op() const {
@@ -156,30 +157,12 @@
 }
 
 
-int FunctionLiteral::start_position() const {
-  return scope()->start_position();
-}
-
-
-int FunctionLiteral::end_position() const {
-  return scope()->end_position();
-}
-
-
-LanguageMode FunctionLiteral::language_mode() const {
-  return scope()->language_mode();
-}
-
-
-ObjectLiteral::Property::Property(Literal* key,
-                                  Expression* value,
-                                  Isolate* isolate) {
+ObjectLiteral::Property::Property(Literal* key, Expression* value) {
   emit_store_ = true;
   key_ = key;
   value_ = value;
   Object* k = *key->handle();
-  if (k->IsSymbol() &&
-      isolate->heap()->Proto_symbol()->Equals(String::cast(k))) {
+  if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) {
     kind_ = PROTOTYPE;
   } else if (value_->AsMaterializedLiteral() != NULL) {
     kind_ = MATERIALIZED_LITERAL;
@@ -192,7 +175,9 @@
 
 
 ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
+  Isolate* isolate = Isolate::Current();
   emit_store_ = true;
+  key_ = new(isolate->zone()) Literal(isolate, value->name());
   value_ = value;
   kind_ = is_getter ? GETTER : SETTER;
 }
@@ -243,21 +228,55 @@
 
 
 void ObjectLiteral::CalculateEmitStore() {
-  ZoneHashMap table(Literal::Match);
-  for (int i = properties()->length() - 1; i >= 0; i--) {
-    ObjectLiteral::Property* property = properties()->at(i);
+  HashMap properties(&IsEqualString);
+  HashMap elements(&IsEqualNumber);
+  for (int i = this->properties()->length() - 1; i >= 0; i--) {
+    ObjectLiteral::Property* property = this->properties()->at(i);
     Literal* literal = property->key();
-    if (literal->handle()->IsNull()) continue;
-    uint32_t hash = literal->Hash();
+    Handle<Object> handle = literal->handle();
+
+    if (handle->IsNull()) {
+      continue;
+    }
+
+    uint32_t hash;
+    HashMap* table;
+    void* key;
+    Factory* factory = Isolate::Current()->factory();
+    if (handle->IsSymbol()) {
+      Handle<String> name(String::cast(*handle));
+      if (name->AsArrayIndex(&hash)) {
+        Handle<Object> key_handle = factory->NewNumberFromUint(hash);
+        key = key_handle.location();
+        table = &elements;
+      } else {
+        key = name.location();
+        hash = name->Hash();
+        table = &properties;
+      }
+    } else if (handle->ToArrayIndex(&hash)) {
+      key = handle.location();
+      table = &elements;
+    } else {
+      ASSERT(handle->IsNumber());
+      double num = handle->Number();
+      char arr[100];
+      Vector<char> buffer(arr, ARRAY_SIZE(arr));
+      const char* str = DoubleToCString(num, buffer);
+      Handle<String> name = factory->NewStringFromAscii(CStrVector(str));
+      key = name.location();
+      hash = name->Hash();
+      table = &properties;
+    }
     // If the key of a computed property is in the table, do not emit
     // a store for the property later.
-    if (property->kind() == ObjectLiteral::Property::COMPUTED &&
-        table.Lookup(literal, hash, false) != NULL) {
-      property->set_emit_store(false);
-    } else {
-      // Add key to the table.
-      table.Lookup(literal, hash, true);
+    if (property->kind() == ObjectLiteral::Property::COMPUTED) {
+      if (table->Lookup(key, hash, false) != NULL) {
+        property->set_emit_store(false);
+      }
     }
+    // Add key to the table.
+    table->Lookup(key, hash, true);
   }
 }
 
@@ -308,100 +327,292 @@
 }
 
 
-static bool IsTypeof(Expression* expr) {
-  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
-  return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
-}
-
-
-// Check for the pattern: typeof <expression> equals <string literal>.
-static bool MatchLiteralCompareTypeof(Expression* left,
-                                      Token::Value op,
-                                      Expression* right,
-                                      Expression** expr,
-                                      Handle<String>* check) {
-  if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
-    *expr = left->AsUnaryOperation()->expression();
-    *check = Handle<String>::cast(right->AsLiteral()->handle());
-    return true;
-  }
-  return false;
-}
-
-
 bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
                                               Handle<String>* check) {
-  return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
-      MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
-}
+  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
 
+  UnaryOperation* left_unary = left_->AsUnaryOperation();
+  UnaryOperation* right_unary = right_->AsUnaryOperation();
+  Literal* left_literal = left_->AsLiteral();
+  Literal* right_literal = right_->AsLiteral();
 
-static bool IsVoidOfLiteral(Expression* expr) {
-  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
-  return maybe_unary != NULL &&
-      maybe_unary->op() == Token::VOID &&
-      maybe_unary->expression()->AsLiteral() != NULL;
-}
-
-
-// Check for the pattern: void <literal> equals <expression>
-static bool MatchLiteralCompareUndefined(Expression* left,
-                                         Token::Value op,
-                                         Expression* right,
-                                         Expression** expr) {
-  if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
-    *expr = right;
+  // Check for the pattern: typeof <expression> == <string literal>.
+  if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
+      right_literal != NULL && right_literal->handle()->IsString()) {
+    *expr = left_unary->expression();
+    *check = Handle<String>::cast(right_literal->handle());
     return true;
   }
+
+  // Check for the pattern: <string literal> == typeof <expression>.
+  if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
+      left_literal != NULL && left_literal->handle()->IsString()) {
+    *expr = right_unary->expression();
+    *check = Handle<String>::cast(left_literal->handle());
+    return true;
+  }
+
   return false;
 }
 
 
 bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
-  return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
-      MatchLiteralCompareUndefined(right_, op_, left_, expr);
-}
+  if (op_ != Token::EQ_STRICT) return false;
 
+  UnaryOperation* left_unary = left_->AsUnaryOperation();
+  UnaryOperation* right_unary = right_->AsUnaryOperation();
 
-// Check for the pattern: null equals <expression>
-static bool MatchLiteralCompareNull(Expression* left,
-                                    Token::Value op,
-                                    Expression* right,
-                                    Expression** expr) {
-  if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
-    *expr = right;
+  // Check for the pattern: <expression> === void <literal>.
+  if (right_unary != NULL && right_unary->op() == Token::VOID &&
+      right_unary->expression()->AsLiteral() != NULL) {
+    *expr = left_;
     return true;
   }
+
+  // Check for the pattern: void <literal> === <expression>.
+  if (left_unary != NULL && left_unary->op() == Token::VOID &&
+      left_unary->expression()->AsLiteral() != NULL) {
+    *expr = right_;
+    return true;
+  }
+
   return false;
 }
 
 
-bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
-  return MatchLiteralCompareNull(left_, op_, right_, expr) ||
-      MatchLiteralCompareNull(right_, op_, left_, expr);
-}
-
-
 // ----------------------------------------------------------------------------
 // Inlining support
 
 bool Declaration::IsInlineable() const {
-  return proxy()->var()->IsStackAllocated();
+  return proxy()->var()->IsStackAllocated() && fun() == NULL;
 }
 
-bool FunctionDeclaration::IsInlineable() const {
+
+bool TargetCollector::IsInlineable() const {
+  UNREACHABLE();
   return false;
 }
 
 
+bool ForInStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool WithStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool SwitchStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool TryStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool TryCatchStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool TryFinallyStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool DebuggerStatement::IsInlineable() const {
+  return false;
+}
+
+
+bool Throw::IsInlineable() const {
+  return exception()->IsInlineable();
+}
+
+
+bool MaterializedLiteral::IsInlineable() const {
+  // TODO(1322): Allow materialized literals.
+  return false;
+}
+
+
+bool FunctionLiteral::IsInlineable() const {
+  // TODO(1322): Allow materialized literals.
+  return false;
+}
+
+
+bool ThisFunction::IsInlineable() const {
+  return false;
+}
+
+
+bool SharedFunctionInfoLiteral::IsInlineable() const {
+  return false;
+}
+
+
+bool ForStatement::IsInlineable() const {
+  return (init() == NULL || init()->IsInlineable())
+      && (cond() == NULL || cond()->IsInlineable())
+      && (next() == NULL || next()->IsInlineable())
+      && body()->IsInlineable();
+}
+
+
+bool WhileStatement::IsInlineable() const {
+  return cond()->IsInlineable()
+      && body()->IsInlineable();
+}
+
+
+bool DoWhileStatement::IsInlineable() const {
+  return cond()->IsInlineable()
+      && body()->IsInlineable();
+}
+
+
+bool ContinueStatement::IsInlineable() const {
+  return true;
+}
+
+
+bool BreakStatement::IsInlineable() const {
+  return true;
+}
+
+
+bool EmptyStatement::IsInlineable() const {
+  return true;
+}
+
+
+bool Literal::IsInlineable() const {
+  return true;
+}
+
+
+bool Block::IsInlineable() const {
+  const int count = statements_.length();
+  for (int i = 0; i < count; ++i) {
+    if (!statements_[i]->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool ExpressionStatement::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool IfStatement::IsInlineable() const {
+  return condition()->IsInlineable()
+      && then_statement()->IsInlineable()
+      && else_statement()->IsInlineable();
+}
+
+
+bool ReturnStatement::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool Conditional::IsInlineable() const {
+  return condition()->IsInlineable() && then_expression()->IsInlineable() &&
+      else_expression()->IsInlineable();
+}
+
+
+bool VariableProxy::IsInlineable() const {
+  return var()->IsUnallocated() || var()->IsStackAllocated();
+}
+
+
+bool Assignment::IsInlineable() const {
+  return target()->IsInlineable() && value()->IsInlineable();
+}
+
+
+bool Property::IsInlineable() const {
+  return obj()->IsInlineable() && key()->IsInlineable();
+}
+
+
+bool Call::IsInlineable() const {
+  if (!expression()->IsInlineable()) return false;
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool CallNew::IsInlineable() const {
+  if (!expression()->IsInlineable()) return false;
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool CallRuntime::IsInlineable() const {
+  // Don't try to inline JS runtime calls because we don't (currently) even
+  // optimize them.
+  if (is_jsruntime()) return false;
+  // Don't inline the %_ArgumentsLength or %_Arguments because their
+  // implementation will not work.  There is no stack frame to get them
+  // from.
+  if (function()->intrinsic_type == Runtime::INLINE &&
+      (name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
+       name()->IsEqualTo(CStrVector("_Arguments")))) {
+    return false;
+  }
+  const int count = arguments()->length();
+  for (int i = 0; i < count; ++i) {
+    if (!arguments()->at(i)->IsInlineable()) return false;
+  }
+  return true;
+}
+
+
+bool UnaryOperation::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool BinaryOperation::IsInlineable() const {
+  return left()->IsInlineable() && right()->IsInlineable();
+}
+
+
+bool CompareOperation::IsInlineable() const {
+  return left()->IsInlineable() && right()->IsInlineable();
+}
+
+
+bool CompareToNull::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
+bool CountOperation::IsInlineable() const {
+  return expression()->IsInlineable();
+}
+
+
 // ----------------------------------------------------------------------------
 // Recording of type feedback
 
 void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
   // Record type feedback from the oracle in the AST.
-  is_uninitialized_ = oracle->LoadIsUninitialized(this);
-  if (is_uninitialized_) return;
-
   is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
   receiver_types_.Clear();
   if (key()->IsPropertyName()) {
@@ -466,10 +677,6 @@
   TypeInfo info = oracle->SwitchType(this);
   if (info.IsSmi()) {
     compare_type_ = SMI_ONLY;
-  } else if (info.IsSymbol()) {
-    compare_type_ = SYMBOL_ONLY;
-  } else if (info.IsNonSymbol()) {
-    compare_type_ = STRING_ONLY;
   } else if (info.IsNonPrimitive()) {
     compare_type_ = OBJECT_ONLY;
   } else {
@@ -478,47 +685,39 @@
 }
 
 
-bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
-  // If there is an interceptor, we can't compute the target for a direct call.
-  if (type->has_named_interceptor()) return false;
+static bool CanCallWithoutIC(Handle<JSFunction> target, int arity) {
+  SharedFunctionInfo* info = target->shared();
+  // If the number of formal parameters of the target function does
+  // not match the number of arguments we're passing, we don't want to
+  // deal with it. Otherwise, we can call it directly.
+  return !target->NeedsArgumentsAdaption() ||
+      info->formal_parameter_count() == arity;
+}
 
+
+bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
   if (check_type_ == RECEIVER_MAP_CHECK) {
-    // For primitive checks the holder is set up to point to the corresponding
-    // prototype object, i.e. one step of the algorithm below has been already
-    // performed. For non-primitive checks we clear it to allow computing
-    // targets for polymorphic calls.
+    // For primitive checks the holder is set up to point to the
+    // corresponding prototype object, i.e. one step of the algorithm
+    // below has been already performed.
+    // For non-primitive checks we clear it to allow computing targets
+    // for polymorphic calls.
     holder_ = Handle<JSObject>::null();
   }
-  LookupResult lookup(type->GetIsolate());
   while (true) {
+    LookupResult lookup;
     type->LookupInDescriptors(NULL, *name, &lookup);
-    if (lookup.IsFound()) {
-      switch (lookup.type()) {
-        case CONSTANT_FUNCTION:
-          // We surely know the target for a constant function.
-          target_ =
-              Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
-          return true;
-        case NORMAL:
-        case FIELD:
-        case CALLBACKS:
-        case HANDLER:
-        case INTERCEPTOR:
-          // We don't know the target.
-          return false;
-        case MAP_TRANSITION:
-        case ELEMENTS_TRANSITION:
-        case CONSTANT_TRANSITION:
-        case NULL_DESCRIPTOR:
-          // Perhaps something interesting is up in the prototype chain...
-          break;
-      }
+    // If the function wasn't found directly in the map, we start
+    // looking upwards through the prototype chain.
+    if (!lookup.IsFound() && type->prototype()->IsJSObject()) {
+      holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
+      type = Handle<Map>(holder()->map());
+    } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
+      target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
+      return CanCallWithoutIC(target_, arguments()->length());
+    } else {
+      return false;
     }
-    // If we reach the end of the prototype chain, we don't know the target.
-    if (!type->prototype()->IsJSObject()) return false;
-    // Go up the prototype chain, recording where we are currently.
-    holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
-    type = Handle<Map>(holder()->map());
   }
 }
 
@@ -527,7 +726,7 @@
                                LookupResult* lookup) {
   target_ = Handle<JSFunction>::null();
   cell_ = Handle<JSGlobalPropertyCell>::null();
-  ASSERT(lookup->IsFound() &&
+  ASSERT(lookup->IsProperty() &&
          lookup->type() == NORMAL &&
          lookup->holder() == *global);
   cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
@@ -535,7 +734,8 @@
     Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
     // If the function is in new space we assume it's more likely to
     // change and thus prefer the general IC code.
-    if (!HEAP->InNewSpace(*candidate)) {
+    if (!HEAP->InNewSpace(*candidate) &&
+        CanCallWithoutIC(candidate, arguments()->length())) {
       target_ = candidate;
       return true;
     }
@@ -546,49 +746,37 @@
 
 void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
                               CallKind call_kind) {
-  is_monomorphic_ = oracle->CallIsMonomorphic(this);
   Property* property = expression()->AsProperty();
-  if (property == NULL) {
-    // Function call.  Specialize for monomorphic calls.
-    if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
-  } else {
-    // Method call.  Specialize for the receiver types seen at runtime.
-    Literal* key = property->key()->AsLiteral();
-    ASSERT(key != NULL && key->handle()->IsString());
-    Handle<String> name = Handle<String>::cast(key->handle());
-    receiver_types_.Clear();
-    oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+  ASSERT(property != NULL);
+  // Specialize for the receiver types seen at runtime.
+  Literal* key = property->key()->AsLiteral();
+  ASSERT(key != NULL && key->handle()->IsString());
+  Handle<String> name = Handle<String>::cast(key->handle());
+  receiver_types_.Clear();
+  oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
 #ifdef DEBUG
-    if (FLAG_enable_slow_asserts) {
-      int length = receiver_types_.length();
-      for (int i = 0; i < length; i++) {
-        Handle<Map> map = receiver_types_.at(i);
-        ASSERT(!map.is_null() && *map != NULL);
-      }
-    }
-#endif
-    check_type_ = oracle->GetCallCheckType(this);
-    if (is_monomorphic_) {
-      Handle<Map> map;
-      if (receiver_types_.length() > 0) {
-        ASSERT(check_type_ == RECEIVER_MAP_CHECK);
-        map = receiver_types_.at(0);
-      } else {
-        ASSERT(check_type_ != RECEIVER_MAP_CHECK);
-        holder_ = Handle<JSObject>(
-            oracle->GetPrototypeForPrimitiveCheck(check_type_));
-        map = Handle<Map>(holder_->map());
-      }
-      is_monomorphic_ = ComputeTarget(map, name);
+  if (FLAG_enable_slow_asserts) {
+    int length = receiver_types_.length();
+    for (int i = 0; i < length; i++) {
+      Handle<Map> map = receiver_types_.at(i);
+      ASSERT(!map.is_null() && *map != NULL);
     }
   }
-}
-
-
-void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
+#endif
+  is_monomorphic_ = oracle->CallIsMonomorphic(this);
+  check_type_ = oracle->GetCallCheckType(this);
   if (is_monomorphic_) {
-    target_ = oracle->GetCallNewTarget(this);
+    Handle<Map> map;
+    if (receiver_types_.length() > 0) {
+      ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+      map = receiver_types_.at(0);
+    } else {
+      ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+      holder_ = Handle<JSObject>(
+          oracle->GetPrototypeForPrimitiveCheck(check_type_));
+      map = Handle<Map>(holder_->map());
+    }
+    is_monomorphic_ = ComputeTarget(map, name);
   }
 }
 
@@ -605,13 +793,6 @@
 }
 
 
-void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
-  receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
-      ? oracle->GetObjectLiteralStoreMap(this)
-      : Handle<Map>::null();
-}
-
-
 // ----------------------------------------------------------------------------
 // Implementation of AstVisitor
 
@@ -675,6 +856,8 @@
 FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
 #undef MAKE_TYPE_CASE
 
+RegExpEmpty RegExpEmpty::kInstance;
+
 
 static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
   Interval result = Interval::Empty();
@@ -992,172 +1175,4 @@
       entry_id_(AstNode::GetNextId(isolate)) {
 }
 
-
-#define INCREASE_NODE_COUNT(NodeType) \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    increase_node_count(); \
-  }
-
-INCREASE_NODE_COUNT(VariableDeclaration)
-INCREASE_NODE_COUNT(FunctionDeclaration)
-INCREASE_NODE_COUNT(ModuleDeclaration)
-INCREASE_NODE_COUNT(ImportDeclaration)
-INCREASE_NODE_COUNT(ExportDeclaration)
-INCREASE_NODE_COUNT(ModuleLiteral)
-INCREASE_NODE_COUNT(ModuleVariable)
-INCREASE_NODE_COUNT(ModulePath)
-INCREASE_NODE_COUNT(ModuleUrl)
-INCREASE_NODE_COUNT(Block)
-INCREASE_NODE_COUNT(ExpressionStatement)
-INCREASE_NODE_COUNT(EmptyStatement)
-INCREASE_NODE_COUNT(IfStatement)
-INCREASE_NODE_COUNT(ContinueStatement)
-INCREASE_NODE_COUNT(BreakStatement)
-INCREASE_NODE_COUNT(ReturnStatement)
-INCREASE_NODE_COUNT(Conditional)
-INCREASE_NODE_COUNT(Literal)
-INCREASE_NODE_COUNT(ObjectLiteral)
-INCREASE_NODE_COUNT(Assignment)
-INCREASE_NODE_COUNT(Throw)
-INCREASE_NODE_COUNT(Property)
-INCREASE_NODE_COUNT(UnaryOperation)
-INCREASE_NODE_COUNT(CountOperation)
-INCREASE_NODE_COUNT(BinaryOperation)
-INCREASE_NODE_COUNT(CompareOperation)
-INCREASE_NODE_COUNT(ThisFunction)
-INCREASE_NODE_COUNT(Call)
-INCREASE_NODE_COUNT(CallNew)
-
-#undef INCREASE_NODE_COUNT
-
-
-void AstConstructionVisitor::VisitWithStatement(WithStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitSwitchStatement(SwitchStatement* node) {
-  increase_node_count();
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitWhileStatement(WhileStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitForStatement(ForStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitForInStatement(ForInStatement* node) {
-  increase_node_count();
-  add_flag(kDontSelfOptimize);
-}
-
-
-void AstConstructionVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitTryFinallyStatement(
-    TryFinallyStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
-  increase_node_count();
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* node) {
-  increase_node_count();
-  add_flag(kDontOptimize);
-  add_flag(kDontInline);
-}
-
-
-void AstConstructionVisitor::VisitVariableProxy(VariableProxy* node) {
-  increase_node_count();
-  // In theory, we'd have to add:
-  // if(node->var()->IsLookupSlot()) { add_flag(kDontInline); }
-  // However, node->var() is usually not bound yet at VariableProxy creation
-  // time, and LOOKUP variables only result from constructs that cannot
-  // be inlined anyway.
-}
-
-
-void AstConstructionVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
-  increase_node_count();
-  add_flag(kDontInline);  // TODO(1322): Allow materialized literals.
-}
-
-
-void AstConstructionVisitor::VisitArrayLiteral(ArrayLiteral* node) {
-  increase_node_count();
-  add_flag(kDontInline);  // TODO(1322): Allow materialized literals.
-}
-
-
-void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
-  increase_node_count();
-  if (node->is_jsruntime()) {
-    // Don't try to inline JS runtime calls because we don't (currently) even
-    // optimize them.
-    add_flag(kDontInline);
-  } else if (node->function()->intrinsic_type == Runtime::INLINE &&
-      (node->name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
-       node->name()->IsEqualTo(CStrVector("_Arguments")))) {
-    // Don't inline the %_ArgumentsLength or %_Arguments because their
-    // implementation will not work.  There is no stack frame to get them
-    // from.
-    add_flag(kDontInline);
-  }
-}
-
-
-Handle<String> Literal::ToString() {
-  if (handle_->IsString()) return Handle<String>::cast(handle_);
-  ASSERT(handle_->IsNumber());
-  char arr[100];
-  Vector<char> buffer(arr, ARRAY_SIZE(arr));
-  const char* str;
-  if (handle_->IsSmi()) {
-    // Optimization only, the heap number case would subsume this.
-    OS::SNPrintF(buffer, "%d", Smi::cast(*handle_)->value());
-    str = arr;
-  } else {
-    str = DoubleToCString(handle_->Number(), buffer);
-  }
-  return FACTORY->NewStringFromAscii(CStrVector(str));
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/ast.h b/src/ast.h
index b827302..b56205f 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,21 +28,14 @@
 #ifndef V8_AST_H_
 #define V8_AST_H_
 
-#include "v8.h"
-
-#include "assembler.h"
+#include "allocation.h"
+#include "execution.h"
 #include "factory.h"
-#include "isolate.h"
 #include "jsregexp.h"
-#include "list-inl.h"
 #include "runtime.h"
 #include "small-pointer-list.h"
-#include "smart-array-pointer.h"
 #include "token.h"
-#include "utils.h"
 #include "variables.h"
-#include "interface.h"
-#include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -60,19 +53,6 @@
 // Nodes of the abstract syntax tree. Only concrete classes are
 // enumerated here.
 
-#define DECLARATION_NODE_LIST(V)                \
-  V(VariableDeclaration)                        \
-  V(FunctionDeclaration)                        \
-  V(ModuleDeclaration)                          \
-  V(ImportDeclaration)                          \
-  V(ExportDeclaration)                          \
-
-#define MODULE_NODE_LIST(V)                     \
-  V(ModuleLiteral)                              \
-  V(ModuleVariable)                             \
-  V(ModulePath)                                 \
-  V(ModuleUrl)
-
 #define STATEMENT_NODE_LIST(V)                  \
   V(Block)                                      \
   V(ExpressionStatement)                        \
@@ -110,41 +90,21 @@
   V(CountOperation)                             \
   V(BinaryOperation)                            \
   V(CompareOperation)                           \
+  V(CompareToNull)                              \
   V(ThisFunction)
 
 #define AST_NODE_LIST(V)                        \
-  DECLARATION_NODE_LIST(V)                      \
-  MODULE_NODE_LIST(V)                           \
+  V(Declaration)                                \
   STATEMENT_NODE_LIST(V)                        \
   EXPRESSION_NODE_LIST(V)
 
 // Forward declarations
-class AstConstructionVisitor;
-template<class> class AstNodeFactory;
-class AstVisitor;
-class Declaration;
-class Module;
-class BreakableStatement;
-class Expression;
-class IterationStatement;
+class BitVector;
+class DefinitionInfo;
 class MaterializedLiteral;
-class Statement;
 class TargetCollector;
 class TypeFeedbackOracle;
 
-class RegExpAlternative;
-class RegExpAssertion;
-class RegExpAtom;
-class RegExpBackReference;
-class RegExpCapture;
-class RegExpCharacterClass;
-class RegExpCompiler;
-class RegExpDisjunction;
-class RegExpEmpty;
-class RegExpLookahead;
-class RegExpQuantifier;
-class RegExpText;
-
 #define DEF_FORWARD_DECLARATION(type) class type;
 AST_NODE_LIST(DEF_FORWARD_DECLARATION)
 #undef DEF_FORWARD_DECLARATION
@@ -158,31 +118,8 @@
 
 #define DECLARE_NODE_TYPE(type)                                         \
   virtual void Accept(AstVisitor* v);                                   \
-  virtual AstNode::Type node_type() const { return AstNode::k##type; }
-
-
-enum AstPropertiesFlag {
-  kDontInline,
-  kDontOptimize,
-  kDontSelfOptimize,
-  kDontSoftInline
-};
-
-
-class AstProperties BASE_EMBEDDED {
- public:
-  class Flags : public EnumSet<AstPropertiesFlag, int> {};
-
-  AstProperties() : node_count_(0) { }
-
-  Flags* flags() { return &flags_; }
-  int node_count() { return node_count_; }
-  void add_node_count(int count) { node_count_ += count; }
-
- private:
-  Flags flags_;
-  int node_count_;
-};
+  virtual AstNode::Type node_type() const { return AstNode::k##type; }  \
+  virtual type* As##type() { return this; }
 
 
 class AstNode: public ZoneObject {
@@ -201,11 +138,14 @@
   // that emit code (function declarations).
   static const int kDeclarationsId = 3;
 
+  // Override ZoneObject's new to count allocated AST nodes.
   void* operator new(size_t size, Zone* zone) {
+    Isolate* isolate = zone->isolate();
+    isolate->set_ast_node_count(isolate->ast_node_count() + 1);
     return zone->New(static_cast<int>(size));
   }
 
-  AstNode() { }
+  AstNode() {}
 
   virtual ~AstNode() { }
 
@@ -214,12 +154,10 @@
 
   // Type testing & conversion functions overridden by concrete subclasses.
 #define DECLARE_NODE_FUNCTIONS(type)                  \
-  bool Is##type() { return node_type() == AstNode::k##type; }          \
-  type* As##type() { return Is##type() ? reinterpret_cast<type*>(this) : NULL; }
+  virtual type* As##type() { return NULL; }
   AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
 #undef DECLARE_NODE_FUNCTIONS
 
-  virtual Declaration* AsDeclaration() { return NULL; }
   virtual Statement* AsStatement() { return NULL; }
   virtual Expression* AsExpression() { return NULL; }
   virtual TargetCollector* AsTargetCollector() { return NULL; }
@@ -227,13 +165,19 @@
   virtual IterationStatement* AsIterationStatement() { return NULL; }
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
 
+  // True if the node is simple enough for us to inline calls containing it.
+  virtual bool IsInlineable() const = 0;
+
+  static int Count() { return Isolate::Current()->ast_node_count(); }
+  static void ResetIds() { Isolate::Current()->set_ast_node_id(0); }
+
  protected:
-  static int GetNextId(Isolate* isolate) {
+  static unsigned GetNextId(Isolate* isolate) {
     return ReserveIdRange(isolate, 1);
   }
 
-  static int ReserveIdRange(Isolate* isolate, int n) {
-    int tmp = isolate->ast_node_id();
+  static unsigned ReserveIdRange(Isolate* isolate, int n) {
+    unsigned tmp = isolate->ast_node_id();
     isolate->set_ast_node_id(tmp + n);
     return tmp;
   }
@@ -253,6 +197,9 @@
 
   virtual Statement* AsStatement()  { return this; }
 
+  virtual Assignment* StatementAsSimpleAssignment() { return NULL; }
+  virtual CountOperation* StatementAsCountOperation() { return NULL; }
+
   bool IsEmpty() { return AsEmptyStatement() != NULL; }
 
   void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; }
@@ -307,6 +254,10 @@
     kTest
   };
 
+  explicit Expression(Isolate* isolate)
+      : id_(GetNextId(isolate)),
+        test_id_(GetNextId(isolate)) {}
+
   virtual int position() const {
     UNREACHABLE();
     return 0;
@@ -314,6 +265,7 @@
 
   virtual Expression* AsExpression()  { return this; }
 
+  virtual bool IsTrivial() { return false; }
   virtual bool IsValidLeftHandSide() { return false; }
 
   // Helpers for ToBoolean conversion.
@@ -325,24 +277,27 @@
   // names because [] for string objects is handled only by keyed ICs.
   virtual bool IsPropertyName() { return false; }
 
+  // Mark the expression as being compiled as an expression
+  // statement. This is used to transform postfix increments to
+  // (faster) prefix increments.
+  virtual void MarkAsStatement() { /* do nothing */ }
+
   // True iff the result can be safely overwritten (to avoid allocation).
   // False for operations that can return one of their operands.
   virtual bool ResultOverwriteAllowed() { return false; }
 
   // True iff the expression is a literal represented as a smi.
-  bool IsSmiLiteral();
-
-  // True iff the expression is a string literal.
-  bool IsStringLiteral();
-
-  // True iff the expression is the null literal.
-  bool IsNullLiteral();
+  virtual bool IsSmiLiteral() { return false; }
 
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
     UNREACHABLE();
     return false;
   }
+  virtual bool IsArrayLength() {
+    UNREACHABLE();
+    return false;
+  }
   virtual SmallMapList* GetReceiverTypes() {
     UNREACHABLE();
     return NULL;
@@ -357,14 +312,9 @@
   unsigned id() const { return id_; }
   unsigned test_id() const { return test_id_; }
 
- protected:
-  explicit Expression(Isolate* isolate)
-      : id_(GetNextId(isolate)),
-        test_id_(GetNextId(isolate)) {}
-
  private:
-  int id_;
-  int test_id_;
+  unsigned id_;
+  unsigned test_id_;
 };
 
 
@@ -393,14 +343,7 @@
   int ExitId() const { return exit_id_; }
 
  protected:
-  BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type)
-      : labels_(labels),
-        type_(type),
-        entry_id_(GetNextId(isolate)),
-        exit_id_(GetNextId(isolate)) {
-    ASSERT(labels == NULL || labels->length() > 0);
-  }
-
+  BreakableStatement(Isolate* isolate, ZoneStringList* labels, Type type);
 
  private:
   ZoneStringList* labels_;
@@ -413,8 +356,25 @@
 
 class Block: public BreakableStatement {
  public:
+  inline Block(Isolate* isolate,
+               ZoneStringList* labels,
+               int capacity,
+               bool is_initializer_block);
+
   DECLARE_NODE_TYPE(Block)
 
+  virtual Assignment* StatementAsSimpleAssignment() {
+    if (statements_.length() != 1) return NULL;
+    return statements_[0]->StatementAsSimpleAssignment();
+  }
+
+  virtual CountOperation* StatementAsCountOperation() {
+    if (statements_.length() != 1) return NULL;
+    return statements_[0]->StatementAsCountOperation();
+  }
+
+  virtual bool IsInlineable() const;
+
   void AddStatement(Statement* statement) { statements_.Add(statement); }
 
   ZoneList<Statement*>* statements() { return &statements_; }
@@ -423,19 +383,6 @@
   Scope* block_scope() const { return block_scope_; }
   void set_block_scope(Scope* block_scope) { block_scope_ = block_scope; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  Block(Isolate* isolate,
-        ZoneStringList* labels,
-        int capacity,
-        bool is_initializer_block)
-      : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY),
-        statements_(capacity),
-        is_initializer_block_(is_initializer_block),
-        block_scope_(NULL) {
-  }
-
  private:
   ZoneList<Statement*> statements_;
   bool is_initializer_block_;
@@ -445,236 +392,39 @@
 
 class Declaration: public AstNode {
  public:
-  VariableProxy* proxy() const { return proxy_; }
-  VariableMode mode() const { return mode_; }
-  Scope* scope() const { return scope_; }
-  virtual InitializationFlag initialization() const = 0;
-  virtual bool IsInlineable() const;
-
-  virtual Declaration* AsDeclaration() { return this; }
-
- protected:
   Declaration(VariableProxy* proxy,
-              VariableMode mode,
+              Variable::Mode mode,
+              FunctionLiteral* fun,
               Scope* scope)
       : proxy_(proxy),
         mode_(mode),
+        fun_(fun),
         scope_(scope) {
-    ASSERT(mode == VAR ||
-           mode == CONST ||
-           mode == CONST_HARMONY ||
-           mode == LET);
+    ASSERT(mode == Variable::VAR ||
+           mode == Variable::CONST ||
+           mode == Variable::LET);
+    // At the moment there are no "const functions"'s in JavaScript...
+    ASSERT(fun == NULL || mode == Variable::VAR || mode == Variable::LET);
   }
 
+  DECLARE_NODE_TYPE(Declaration)
+
+  VariableProxy* proxy() const { return proxy_; }
+  Variable::Mode mode() const { return mode_; }
+  FunctionLiteral* fun() const { return fun_; }  // may be NULL
+  virtual bool IsInlineable() const;
+  Scope* scope() const { return scope_; }
+
  private:
   VariableProxy* proxy_;
-  VariableMode mode_;
+  Variable::Mode mode_;
+  FunctionLiteral* fun_;
 
   // Nested scope from which the declaration originated.
   Scope* scope_;
 };
 
 
-class VariableDeclaration: public Declaration {
- public:
-  DECLARE_NODE_TYPE(VariableDeclaration)
-
-  virtual InitializationFlag initialization() const {
-    return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
-  }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  VariableDeclaration(VariableProxy* proxy,
-                      VariableMode mode,
-                      Scope* scope)
-      : Declaration(proxy, mode, scope) {
-  }
-};
-
-
-class FunctionDeclaration: public Declaration {
- public:
-  DECLARE_NODE_TYPE(FunctionDeclaration)
-
-  FunctionLiteral* fun() const { return fun_; }
-  virtual InitializationFlag initialization() const {
-    return kCreatedInitialized;
-  }
-  virtual bool IsInlineable() const;
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  FunctionDeclaration(VariableProxy* proxy,
-                      VariableMode mode,
-                      FunctionLiteral* fun,
-                      Scope* scope)
-      : Declaration(proxy, mode, scope),
-        fun_(fun) {
-    // At the moment there are no "const functions" in JavaScript...
-    ASSERT(mode == VAR || mode == LET);
-    ASSERT(fun != NULL);
-  }
-
- private:
-  FunctionLiteral* fun_;
-};
-
-
-class ModuleDeclaration: public Declaration {
- public:
-  DECLARE_NODE_TYPE(ModuleDeclaration)
-
-  Module* module() const { return module_; }
-  virtual InitializationFlag initialization() const {
-    return kCreatedInitialized;
-  }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ModuleDeclaration(VariableProxy* proxy,
-                    Module* module,
-                    Scope* scope)
-      : Declaration(proxy, LET, scope),
-        module_(module) {
-  }
-
- private:
-  Module* module_;
-};
-
-
-class ImportDeclaration: public Declaration {
- public:
-  DECLARE_NODE_TYPE(ImportDeclaration)
-
-  Module* module() const { return module_; }
-  virtual InitializationFlag initialization() const {
-    return kCreatedInitialized;
-  }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ImportDeclaration(VariableProxy* proxy,
-                    Module* module,
-                    Scope* scope)
-      : Declaration(proxy, LET, scope),
-        module_(module) {
-  }
-
- private:
-  Module* module_;
-};
-
-
-class ExportDeclaration: public Declaration {
- public:
-  DECLARE_NODE_TYPE(ExportDeclaration)
-
-  virtual InitializationFlag initialization() const {
-    return kCreatedInitialized;
-  }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ExportDeclaration(VariableProxy* proxy,
-                    Scope* scope)
-      : Declaration(proxy, LET, scope) {
-  }
-};
-
-
-class Module: public AstNode {
- public:
-  Interface* interface() const { return interface_; }
-
- protected:
-  Module() : interface_(Interface::NewModule()) {}
-  explicit Module(Interface* interface) : interface_(interface) {}
-
- private:
-  Interface* interface_;
-};
-
-
-class ModuleLiteral: public Module {
- public:
-  DECLARE_NODE_TYPE(ModuleLiteral)
-
-  Block* body() const { return body_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ModuleLiteral(Block* body, Interface* interface)
-      : Module(interface),
-        body_(body) {
-  }
-
- private:
-  Block* body_;
-};
-
-
-class ModuleVariable: public Module {
- public:
-  DECLARE_NODE_TYPE(ModuleVariable)
-
-  VariableProxy* proxy() const { return proxy_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  inline explicit ModuleVariable(VariableProxy* proxy);
-
- private:
-  VariableProxy* proxy_;
-};
-
-
-class ModulePath: public Module {
- public:
-  DECLARE_NODE_TYPE(ModulePath)
-
-  Module* module() const { return module_; }
-  Handle<String> name() const { return name_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ModulePath(Module* module, Handle<String> name)
-      : module_(module),
-        name_(name) {
-  }
-
- private:
-  Module* module_;
-  Handle<String> name_;
-};
-
-
-class ModuleUrl: public Module {
- public:
-  DECLARE_NODE_TYPE(ModuleUrl)
-
-  Handle<String> url() const { return url_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  explicit ModuleUrl(Handle<String> url) : url_(url) {
-  }
-
- private:
-  Handle<String> url_;
-};
-
-
 class IterationStatement: public BreakableStatement {
  public:
   // Type testing & conversion.
@@ -691,11 +441,7 @@
   Label* continue_target()  { return &continue_target_; }
 
  protected:
-  IterationStatement(Isolate* isolate, ZoneStringList* labels)
-      : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
-        body_(NULL),
-        osr_entry_id_(GetNextId(isolate)) {
-  }
+  inline IterationStatement(Isolate* isolate, ZoneStringList* labels);
 
   void Initialize(Statement* body) {
     body_ = body;
@@ -710,6 +456,8 @@
 
 class DoWhileStatement: public IterationStatement {
  public:
+  inline DoWhileStatement(Isolate* isolate, ZoneStringList* labels);
+
   DECLARE_NODE_TYPE(DoWhileStatement)
 
   void Initialize(Expression* cond, Statement* body) {
@@ -729,16 +477,7 @@
   virtual int StackCheckId() const { return back_edge_id_; }
   int BackEdgeId() const { return back_edge_id_; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  DoWhileStatement(Isolate* isolate, ZoneStringList* labels)
-      : IterationStatement(isolate, labels),
-        cond_(NULL),
-        condition_position_(-1),
-        continue_id_(GetNextId(isolate)),
-        back_edge_id_(GetNextId(isolate)) {
-  }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* cond_;
@@ -750,6 +489,8 @@
 
 class WhileStatement: public IterationStatement {
  public:
+  inline WhileStatement(Isolate* isolate, ZoneStringList* labels);
+
   DECLARE_NODE_TYPE(WhileStatement)
 
   void Initialize(Expression* cond, Statement* body) {
@@ -764,22 +505,13 @@
   void set_may_have_function_literal(bool value) {
     may_have_function_literal_ = value;
   }
+  virtual bool IsInlineable() const;
 
   // Bailout support.
   virtual int ContinueId() const { return EntryId(); }
   virtual int StackCheckId() const { return body_id_; }
   int BodyId() const { return body_id_; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  WhileStatement(Isolate* isolate, ZoneStringList* labels)
-      : IterationStatement(isolate, labels),
-        cond_(NULL),
-        may_have_function_literal_(true),
-        body_id_(GetNextId(isolate)) {
-  }
-
  private:
   Expression* cond_;
   // True if there is a function literal subexpression in the condition.
@@ -790,6 +522,8 @@
 
 class ForStatement: public IterationStatement {
  public:
+  inline ForStatement(Isolate* isolate, ZoneStringList* labels);
+
   DECLARE_NODE_TYPE(ForStatement)
 
   void Initialize(Statement* init,
@@ -821,20 +555,7 @@
   bool is_fast_smi_loop() { return loop_variable_ != NULL; }
   Variable* loop_variable() { return loop_variable_; }
   void set_loop_variable(Variable* var) { loop_variable_ = var; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ForStatement(Isolate* isolate, ZoneStringList* labels)
-      : IterationStatement(isolate, labels),
-        init_(NULL),
-        cond_(NULL),
-        next_(NULL),
-        may_have_function_literal_(true),
-        loop_variable_(NULL),
-        continue_id_(GetNextId(isolate)),
-        body_id_(GetNextId(isolate)) {
-  }
+  virtual bool IsInlineable() const;
 
  private:
   Statement* init_;
@@ -850,6 +571,8 @@
 
 class ForInStatement: public IterationStatement {
  public:
+  inline ForInStatement(Isolate* isolate, ZoneStringList* labels);
+
   DECLARE_NODE_TYPE(ForInStatement)
 
   void Initialize(Expression* each, Expression* enumerable, Statement* body) {
@@ -860,44 +583,35 @@
 
   Expression* each() const { return each_; }
   Expression* enumerable() const { return enumerable_; }
+  virtual bool IsInlineable() const;
 
+  // Bailout support.
+  int AssignmentId() const { return assignment_id_; }
   virtual int ContinueId() const { return EntryId(); }
-  virtual int StackCheckId() const { return body_id_; }
-  int BodyId() const { return body_id_; }
-  int PrepareId() const { return prepare_id_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ForInStatement(Isolate* isolate, ZoneStringList* labels)
-      : IterationStatement(isolate, labels),
-        each_(NULL),
-        enumerable_(NULL),
-        body_id_(GetNextId(isolate)),
-        prepare_id_(GetNextId(isolate)) {
-  }
+  virtual int StackCheckId() const { return EntryId(); }
 
  private:
   Expression* each_;
   Expression* enumerable_;
-  int body_id_;
-  int prepare_id_;
+  int assignment_id_;
 };
 
 
 class ExpressionStatement: public Statement {
  public:
+  explicit ExpressionStatement(Expression* expression)
+      : expression_(expression) { }
+
   DECLARE_NODE_TYPE(ExpressionStatement)
 
+  virtual bool IsInlineable() const;
+
+  virtual Assignment* StatementAsSimpleAssignment();
+  virtual CountOperation* StatementAsCountOperation();
+
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() const { return expression_; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  explicit ExpressionStatement(Expression* expression)
-      : expression_(expression) { }
-
  private:
   Expression* expression_;
 };
@@ -905,15 +619,13 @@
 
 class ContinueStatement: public Statement {
  public:
+  explicit ContinueStatement(IterationStatement* target)
+      : target_(target) { }
+
   DECLARE_NODE_TYPE(ContinueStatement)
 
   IterationStatement* target() const { return target_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  explicit ContinueStatement(IterationStatement* target)
-      : target_(target) { }
+  virtual bool IsInlineable() const;
 
  private:
   IterationStatement* target_;
@@ -922,15 +634,13 @@
 
 class BreakStatement: public Statement {
  public:
+  explicit BreakStatement(BreakableStatement* target)
+      : target_(target) { }
+
   DECLARE_NODE_TYPE(BreakStatement)
 
   BreakableStatement* target() const { return target_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  explicit BreakStatement(BreakableStatement* target)
-      : target_(target) { }
+  virtual bool IsInlineable() const;
 
  private:
   BreakableStatement* target_;
@@ -939,15 +649,13 @@
 
 class ReturnStatement: public Statement {
  public:
+  explicit ReturnStatement(Expression* expression)
+      : expression_(expression) { }
+
   DECLARE_NODE_TYPE(ReturnStatement)
 
   Expression* expression() const { return expression_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  explicit ReturnStatement(Expression* expression)
-      : expression_(expression) { }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* expression_;
@@ -956,17 +664,15 @@
 
 class WithStatement: public Statement {
  public:
+  WithStatement(Expression* expression, Statement* statement)
+      : expression_(expression), statement_(statement) { }
+
   DECLARE_NODE_TYPE(WithStatement)
 
   Expression* expression() const { return expression_; }
   Statement* statement() const { return statement_; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  WithStatement(Expression* expression, Statement* statement)
-      : expression_(expression),
-        statement_(statement) { }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* expression_;
@@ -998,8 +704,6 @@
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
-  bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
-  bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
   bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
 
  private:
@@ -1007,13 +711,7 @@
   Label body_target_;
   ZoneList<Statement*>* statements_;
   int position_;
-  enum CompareTypeFeedback {
-    NONE,
-    SMI_ONLY,
-    SYMBOL_ONLY,
-    STRING_ONLY,
-    OBJECT_ONLY
-  };
+  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
   CompareTypeFeedback compare_type_;
   int compare_id_;
   int entry_id_;
@@ -1022,6 +720,8 @@
 
 class SwitchStatement: public BreakableStatement {
  public:
+  inline SwitchStatement(Isolate* isolate, ZoneStringList* labels);
+
   DECLARE_NODE_TYPE(SwitchStatement)
 
   void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
@@ -1031,14 +731,7 @@
 
   Expression* tag() const { return tag_; }
   ZoneList<CaseClause*>* cases() const { return cases_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  SwitchStatement(Isolate* isolate, ZoneStringList* labels)
-      : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS),
-        tag_(NULL),
-        cases_(NULL) { }
+  virtual bool IsInlineable() const;
 
  private:
   Expression* tag_;
@@ -1053,22 +746,6 @@
 // given if-statement has a then- or an else-part containing code.
 class IfStatement: public Statement {
  public:
-  DECLARE_NODE_TYPE(IfStatement)
-
-  bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
-  bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
-
-  Expression* condition() const { return condition_; }
-  Statement* then_statement() const { return then_statement_; }
-  Statement* else_statement() const { return else_statement_; }
-
-  int IfId() const { return if_id_; }
-  int ThenId() const { return then_id_; }
-  int ElseId() const { return else_id_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
   IfStatement(Isolate* isolate,
               Expression* condition,
               Statement* then_statement,
@@ -1081,6 +758,21 @@
         else_id_(GetNextId(isolate)) {
   }
 
+  DECLARE_NODE_TYPE(IfStatement)
+
+  virtual bool IsInlineable() const;
+
+  bool HasThenStatement() const { return !then_statement()->IsEmpty(); }
+  bool HasElseStatement() const { return !else_statement()->IsEmpty(); }
+
+  Expression* condition() const { return condition_; }
+  Statement* then_statement() const { return then_statement_; }
+  Statement* else_statement() const { return else_statement_; }
+
+  int IfId() const { return if_id_; }
+  int ThenId() const { return then_id_; }
+  int ElseId() const { return else_id_; }
+
  private:
   Expression* condition_;
   Statement* then_statement_;
@@ -1095,7 +787,7 @@
 // stack in the compiler; this should probably be reworked.
 class TargetCollector: public AstNode {
  public:
-  TargetCollector() : targets_(0) { }
+  TargetCollector(): targets_(0) { }
 
   // Adds a jump target to the collector. The collector stores a pointer not
   // a copy of the target to make binding work, so make sure not to pass in
@@ -1107,6 +799,7 @@
   virtual TargetCollector* AsTargetCollector() { return this; }
 
   ZoneList<Label*>* targets() { return &targets_; }
+  virtual bool IsInlineable() const;
 
  private:
   ZoneList<Label*> targets_;
@@ -1115,24 +808,18 @@
 
 class TryStatement: public Statement {
  public:
+  explicit TryStatement(Block* try_block)
+      : try_block_(try_block), escaping_targets_(NULL) { }
+
   void set_escaping_targets(ZoneList<Label*>* targets) {
     escaping_targets_ = targets;
   }
 
-  int index() const { return index_; }
   Block* try_block() const { return try_block_; }
   ZoneList<Label*>* escaping_targets() const { return escaping_targets_; }
-
- protected:
-  TryStatement(int index, Block* try_block)
-      : index_(index),
-        try_block_(try_block),
-        escaping_targets_(NULL) { }
+  virtual bool IsInlineable() const;
 
  private:
-  // Unique (per-function) index of this handler.  This is not an AST ID.
-  int index_;
-
   Block* try_block_;
   ZoneList<Label*>* escaping_targets_;
 };
@@ -1140,25 +827,22 @@
 
 class TryCatchStatement: public TryStatement {
  public:
+  TryCatchStatement(Block* try_block,
+                    Scope* scope,
+                    Variable* variable,
+                    Block* catch_block)
+      : TryStatement(try_block),
+        scope_(scope),
+        variable_(variable),
+        catch_block_(catch_block) {
+  }
+
   DECLARE_NODE_TYPE(TryCatchStatement)
 
   Scope* scope() { return scope_; }
   Variable* variable() { return variable_; }
   Block* catch_block() const { return catch_block_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  TryCatchStatement(int index,
-                    Block* try_block,
-                    Scope* scope,
-                    Variable* variable,
-                    Block* catch_block)
-      : TryStatement(index, try_block),
-        scope_(scope),
-        variable_(variable),
-        catch_block_(catch_block) {
-  }
+  virtual bool IsInlineable() const;
 
  private:
   Scope* scope_;
@@ -1169,16 +853,14 @@
 
 class TryFinallyStatement: public TryStatement {
  public:
+  TryFinallyStatement(Block* try_block, Block* finally_block)
+      : TryStatement(try_block),
+        finally_block_(finally_block) { }
+
   DECLARE_NODE_TYPE(TryFinallyStatement)
 
   Block* finally_block() const { return finally_block_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  TryFinallyStatement(int index, Block* try_block, Block* finally_block)
-      : TryStatement(index, try_block),
-        finally_block_(finally_block) { }
+  virtual bool IsInlineable() const;
 
  private:
   Block* finally_block_;
@@ -1188,11 +870,7 @@
 class DebuggerStatement: public Statement {
  public:
   DECLARE_NODE_TYPE(DebuggerStatement)
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  DebuggerStatement() {}
+  virtual bool IsInlineable() const;
 };
 
 
@@ -1200,17 +878,25 @@
  public:
   DECLARE_NODE_TYPE(EmptyStatement)
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  EmptyStatement() {}
+  virtual bool IsInlineable() const;
 };
 
 
 class Literal: public Expression {
  public:
+  Literal(Isolate* isolate, Handle<Object> handle)
+      : Expression(isolate), handle_(handle) { }
+
   DECLARE_NODE_TYPE(Literal)
 
+  virtual bool IsTrivial() { return true; }
+  virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
+
+  // Check if this literal is identical to the other literal.
+  bool IsIdenticalTo(const Literal* other) const {
+    return handle_.is_identical_to(other->handle_);
+  }
+
   virtual bool IsPropertyName() {
     if (handle_->IsSymbol()) {
       uint32_t ignored;
@@ -1242,27 +928,9 @@
   }
 
   Handle<Object> handle() const { return handle_; }
-
-  // Support for using Literal as a HashMap key. NOTE: Currently, this works
-  // only for string and number literals!
-  uint32_t Hash() { return ToString()->Hash(); }
-
-  static bool Match(void* literal1, void* literal2) {
-    Handle<String> s1 = static_cast<Literal*>(literal1)->ToString();
-    Handle<String> s2 = static_cast<Literal*>(literal2)->ToString();
-    return s1->Equals(*s2);
-  }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  Literal(Isolate* isolate, Handle<Object> handle)
-      : Expression(isolate),
-        handle_(handle) { }
+  virtual bool IsInlineable() const;
 
  private:
-  Handle<String> ToString();
-
   Handle<Object> handle_;
 };
 
@@ -1270,6 +938,15 @@
 // Base class for literals that needs space in the corresponding JSFunction.
 class MaterializedLiteral: public Expression {
  public:
+  MaterializedLiteral(Isolate* isolate,
+                      int literal_index,
+                      bool is_simple,
+                      int depth)
+      : Expression(isolate),
+        literal_index_(literal_index),
+        is_simple_(is_simple),
+        depth_(depth) {}
+
   virtual MaterializedLiteral* AsMaterializedLiteral() { return this; }
 
   int literal_index() { return literal_index_; }
@@ -1279,16 +956,7 @@
   bool is_simple() const { return is_simple_; }
 
   int depth() const { return depth_; }
-
- protected:
-  MaterializedLiteral(Isolate* isolate,
-                      int literal_index,
-                      bool is_simple,
-                      int depth)
-      : Expression(isolate),
-        literal_index_(literal_index),
-        is_simple_(is_simple),
-        depth_(depth) {}
+  virtual bool IsInlineable() const;
 
  private:
   int literal_index_;
@@ -1314,36 +982,39 @@
       PROTOTYPE              // Property is __proto__.
     };
 
-    Property(Literal* key, Expression* value, Isolate* isolate);
+    Property(Literal* key, Expression* value);
+    Property(bool is_getter, FunctionLiteral* value);
 
     Literal* key() { return key_; }
     Expression* value() { return value_; }
     Kind kind() { return kind_; }
 
-    // Type feedback information.
-    void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-    bool IsMonomorphic() { return !receiver_type_.is_null(); }
-    Handle<Map> GetReceiverType() { return receiver_type_; }
-
     bool IsCompileTimeValue();
 
     void set_emit_store(bool emit_store);
     bool emit_store();
 
-   protected:
-    template<class> friend class AstNodeFactory;
-
-    Property(bool is_getter, FunctionLiteral* value);
-    void set_key(Literal* key) { key_ = key; }
-
    private:
     Literal* key_;
     Expression* value_;
     Kind kind_;
     bool emit_store_;
-    Handle<Map> receiver_type_;
   };
 
+  ObjectLiteral(Isolate* isolate,
+                Handle<FixedArray> constant_properties,
+                ZoneList<Property*>* properties,
+                int literal_index,
+                bool is_simple,
+                bool fast_elements,
+                int depth,
+                bool has_function)
+      : MaterializedLiteral(isolate, literal_index, is_simple, depth),
+        constant_properties_(constant_properties),
+        properties_(properties),
+        fast_elements_(fast_elements),
+        has_function_(has_function) {}
+
   DECLARE_NODE_TYPE(ObjectLiteral)
 
   Handle<FixedArray> constant_properties() const {
@@ -1366,29 +1037,6 @@
     kHasFunction = 1 << 1
   };
 
-  struct Accessors: public ZoneObject {
-    Accessors() : getter(NULL), setter(NULL) { }
-    Expression* getter;
-    Expression* setter;
-  };
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  ObjectLiteral(Isolate* isolate,
-                Handle<FixedArray> constant_properties,
-                ZoneList<Property*>* properties,
-                int literal_index,
-                bool is_simple,
-                bool fast_elements,
-                int depth,
-                bool has_function)
-      : MaterializedLiteral(isolate, literal_index, is_simple, depth),
-        constant_properties_(constant_properties),
-        properties_(properties),
-        fast_elements_(fast_elements),
-        has_function_(has_function) {}
-
  private:
   Handle<FixedArray> constant_properties_;
   ZoneList<Property*>* properties_;
@@ -1400,14 +1048,6 @@
 // Node for capturing a regexp literal.
 class RegExpLiteral: public MaterializedLiteral {
  public:
-  DECLARE_NODE_TYPE(RegExpLiteral)
-
-  Handle<String> pattern() const { return pattern_; }
-  Handle<String> flags() const { return flags_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
   RegExpLiteral(Isolate* isolate,
                 Handle<String> pattern,
                 Handle<String> flags,
@@ -1416,6 +1056,11 @@
         pattern_(pattern),
         flags_(flags) {}
 
+  DECLARE_NODE_TYPE(RegExpLiteral)
+
+  Handle<String> pattern() const { return pattern_; }
+  Handle<String> flags() const { return flags_; }
+
  private:
   Handle<String> pattern_;
   Handle<String> flags_;
@@ -1425,17 +1070,6 @@
 // for minimizing the work when constructing it at runtime.
 class ArrayLiteral: public MaterializedLiteral {
  public:
-  DECLARE_NODE_TYPE(ArrayLiteral)
-
-  Handle<FixedArray> constant_elements() const { return constant_elements_; }
-  ZoneList<Expression*>* values() const { return values_; }
-
-  // Return an AST id for an element that is used in simulate instructions.
-  int GetIdForElement(int i) { return first_element_id_ + i; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
   ArrayLiteral(Isolate* isolate,
                Handle<FixedArray> constant_elements,
                ZoneList<Expression*>* values,
@@ -1447,6 +1081,14 @@
         values_(values),
         first_element_id_(ReserveIdRange(isolate, values->length())) {}
 
+  DECLARE_NODE_TYPE(ArrayLiteral)
+
+  Handle<FixedArray> constant_elements() const { return constant_elements_; }
+  ZoneList<Expression*>* values() const { return values_; }
+
+  // Return an AST id for an element that is used in simulate instructions.
+  int GetIdForElement(int i) { return first_element_id_ + i; }
+
  private:
   Handle<FixedArray> constant_elements_;
   ZoneList<Expression*>* values_;
@@ -1456,63 +1098,77 @@
 
 class VariableProxy: public Expression {
  public:
+  VariableProxy(Isolate* isolate, Variable* var);
+
   DECLARE_NODE_TYPE(VariableProxy)
 
   virtual bool IsValidLeftHandSide() {
     return var_ == NULL ? true : var_->IsValidLeftHandSide();
   }
 
+  virtual bool IsTrivial() {
+    // Reading from a mutable variable is a side effect, but the
+    // variable for 'this' is immutable.
+    return is_this_ || is_trivial_;
+  }
+
+  virtual bool IsInlineable() const;
+
   bool IsVariable(Handle<String> n) {
     return !is_this() && name().is_identical_to(n);
   }
 
   bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
 
-  bool IsLValue() {
-    return is_lvalue_;
-  }
-
   Handle<String> name() const { return name_; }
   Variable* var() const { return var_; }
   bool is_this() const { return is_this_; }
+  bool inside_with() const { return inside_with_; }
   int position() const { return position_; }
-  Interface* interface() const { return interface_; }
-
 
   void MarkAsTrivial() { is_trivial_ = true; }
-  void MarkAsLValue() { is_lvalue_ = true; }
 
   // Bind this proxy to the variable var.
   void BindTo(Variable* var);
 
  protected:
-  template<class> friend class AstNodeFactory;
-
-  VariableProxy(Isolate* isolate, Variable* var);
+  Handle<String> name_;
+  Variable* var_;  // resolved variable, or NULL
+  bool is_this_;
+  bool inside_with_;
+  bool is_trivial_;
+  int position_;
 
   VariableProxy(Isolate* isolate,
                 Handle<String> name,
                 bool is_this,
-                int position,
-                Interface* interface);
+                bool inside_with,
+                int position = RelocInfo::kNoPosition);
 
-  Handle<String> name_;
-  Variable* var_;  // resolved variable, or NULL
-  bool is_this_;
-  bool is_trivial_;
-  // True if this variable proxy is being used in an assignment
-  // or with a increment/decrement operator.
-  bool is_lvalue_;
-  int position_;
-  Interface* interface_;
+  friend class Scope;
 };
 
 
 class Property: public Expression {
  public:
+  Property(Isolate* isolate,
+           Expression* obj,
+           Expression* key,
+           int pos)
+      : Expression(isolate),
+        obj_(obj),
+        key_(key),
+        pos_(pos),
+        is_monomorphic_(false),
+        is_array_length_(false),
+        is_string_length_(false),
+        is_string_access_(false),
+        is_function_prototype_(false) { }
+
   DECLARE_NODE_TYPE(Property)
 
   virtual bool IsValidLeftHandSide() { return true; }
+  virtual bool IsInlineable() const;
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
@@ -1526,26 +1182,7 @@
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
-  bool IsArrayLength() { return is_array_length_; }
-  bool IsUninitialized() { return is_uninitialized_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  Property(Isolate* isolate,
-           Expression* obj,
-           Expression* key,
-           int pos)
-      : Expression(isolate),
-        obj_(obj),
-        key_(key),
-        pos_(pos),
-        is_monomorphic_(false),
-        is_uninitialized_(false),
-        is_array_length_(false),
-        is_string_length_(false),
-        is_string_access_(false),
-        is_function_prototype_(false) { }
+  virtual bool IsArrayLength() { return is_array_length_; }
 
  private:
   Expression* obj_;
@@ -1554,7 +1191,6 @@
 
   SmallMapList receiver_types_;
   bool is_monomorphic_ : 1;
-  bool is_uninitialized_ : 1;
   bool is_array_length_ : 1;
   bool is_string_length_ : 1;
   bool is_string_access_ : 1;
@@ -1564,8 +1200,23 @@
 
 class Call: public Expression {
  public:
+  Call(Isolate* isolate,
+       Expression* expression,
+       ZoneList<Expression*>* arguments,
+       int pos)
+      : Expression(isolate),
+        expression_(expression),
+        arguments_(arguments),
+        pos_(pos),
+        is_monomorphic_(false),
+        check_type_(RECEIVER_MAP_CHECK),
+        return_id_(GetNextId(isolate)) {
+  }
+
   DECLARE_NODE_TYPE(Call)
 
+  virtual bool IsInlineable() const;
+
   Expression* expression() const { return expression_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   virtual int position() const { return pos_; }
@@ -1590,21 +1241,6 @@
   bool return_is_recorded_;
 #endif
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  Call(Isolate* isolate,
-       Expression* expression,
-       ZoneList<Expression*>* arguments,
-       int pos)
-      : Expression(isolate),
-        expression_(expression),
-        arguments_(arguments),
-        pos_(pos),
-        is_monomorphic_(false),
-        check_type_(RECEIVER_MAP_CHECK),
-        return_id_(GetNextId(isolate)) { }
-
  private:
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
@@ -1623,22 +1259,6 @@
 
 class CallNew: public Expression {
  public:
-  DECLARE_NODE_TYPE(CallNew)
-
-  Expression* expression() const { return expression_; }
-  ZoneList<Expression*>* arguments() const { return arguments_; }
-  virtual int position() const { return pos_; }
-
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  virtual bool IsMonomorphic() { return is_monomorphic_; }
-  Handle<JSFunction> target() { return target_; }
-
-  // Bailout support.
-  int ReturnId() const { return return_id_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
   CallNew(Isolate* isolate,
           Expression* expression,
           ZoneList<Expression*>* arguments,
@@ -1646,19 +1266,20 @@
       : Expression(isolate),
         expression_(expression),
         arguments_(arguments),
-        pos_(pos),
-        is_monomorphic_(false),
-        return_id_(GetNextId(isolate)) { }
+        pos_(pos) { }
+
+  DECLARE_NODE_TYPE(CallNew)
+
+  virtual bool IsInlineable() const;
+
+  Expression* expression() const { return expression_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+  virtual int position() const { return pos_; }
 
  private:
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   int pos_;
-
-  bool is_monomorphic_;
-  Handle<JSFunction> target_;
-
-  int return_id_;
 };
 
 
@@ -1668,16 +1289,6 @@
 // implemented in JavaScript (see "v8natives.js").
 class CallRuntime: public Expression {
  public:
-  DECLARE_NODE_TYPE(CallRuntime)
-
-  Handle<String> name() const { return name_; }
-  const Runtime::Function* function() const { return function_; }
-  ZoneList<Expression*>* arguments() const { return arguments_; }
-  bool is_jsruntime() const { return function_ == NULL; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
   CallRuntime(Isolate* isolate,
               Handle<String> name,
               const Runtime::Function* function,
@@ -1687,6 +1298,15 @@
         function_(function),
         arguments_(arguments) { }
 
+  DECLARE_NODE_TYPE(CallRuntime)
+
+  virtual bool IsInlineable() const;
+
+  Handle<String> name() const { return name_; }
+  const Runtime::Function* function() const { return function_; }
+  ZoneList<Expression*>* arguments() const { return arguments_; }
+  bool is_jsruntime() const { return function_ == NULL; }
+
  private:
   Handle<String> name_;
   const Runtime::Function* function_;
@@ -1696,53 +1316,49 @@
 
 class UnaryOperation: public Expression {
  public:
+  UnaryOperation(Isolate* isolate,
+                 Token::Value op,
+                 Expression* expression,
+                 int pos)
+      : Expression(isolate), op_(op), expression_(expression), pos_(pos) {
+    ASSERT(Token::IsUnaryOp(op));
+  }
+
   DECLARE_NODE_TYPE(UnaryOperation)
 
+  virtual bool IsInlineable() const;
+
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
   Expression* expression() const { return expression_; }
   virtual int position() const { return pos_; }
 
-  int MaterializeTrueId() { return materialize_true_id_; }
-  int MaterializeFalseId() { return materialize_false_id_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  UnaryOperation(Isolate* isolate,
-                 Token::Value op,
-                 Expression* expression,
-                 int pos)
-      : Expression(isolate),
-        op_(op),
-        expression_(expression),
-        pos_(pos),
-        materialize_true_id_(AstNode::kNoNumber),
-        materialize_false_id_(AstNode::kNoNumber) {
-    ASSERT(Token::IsUnaryOp(op));
-    if (op == Token::NOT) {
-      materialize_true_id_ = GetNextId(isolate);
-      materialize_false_id_ = GetNextId(isolate);
-    }
-  }
-
  private:
   Token::Value op_;
   Expression* expression_;
   int pos_;
-
-  // For unary not (Token::NOT), the AST ids where true and false will
-  // actually be materialized, respectively.
-  int materialize_true_id_;
-  int materialize_false_id_;
 };
 
 
 class BinaryOperation: public Expression {
  public:
+  BinaryOperation(Isolate* isolate,
+                  Token::Value op,
+                  Expression* left,
+                  Expression* right,
+                  int pos)
+      : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
+    ASSERT(Token::IsBinaryOp(op));
+    right_id_ = (op == Token::AND || op == Token::OR)
+        ? static_cast<int>(GetNextId(isolate))
+        : AstNode::kNoNumber;
+  }
+
   DECLARE_NODE_TYPE(BinaryOperation)
 
+  virtual bool IsInlineable() const;
+
   virtual bool ResultOverwriteAllowed();
 
   Token::Value op() const { return op_; }
@@ -1753,21 +1369,6 @@
   // Bailout support.
   int RightId() const { return right_id_; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  BinaryOperation(Isolate* isolate,
-                  Token::Value op,
-                  Expression* left,
-                  Expression* right,
-                  int pos)
-      : Expression(isolate), op_(op), left_(left), right_(right), pos_(pos) {
-    ASSERT(Token::IsBinaryOp(op));
-    right_id_ = (op == Token::AND || op == Token::OR)
-        ? GetNextId(isolate)
-        : AstNode::kNoNumber;
-  }
-
  private:
   Token::Value op_;
   Expression* left_;
@@ -1781,6 +1382,19 @@
 
 class CountOperation: public Expression {
  public:
+  CountOperation(Isolate* isolate,
+                 Token::Value op,
+                 bool is_prefix,
+                 Expression* expr,
+                 int pos)
+      : Expression(isolate),
+        op_(op),
+        is_prefix_(is_prefix),
+        expression_(expr),
+        pos_(pos),
+        assignment_id_(GetNextId(isolate)),
+        count_id_(GetNextId(isolate)) {}
+
   DECLARE_NODE_TYPE(CountOperation)
 
   bool is_prefix() const { return is_prefix_; }
@@ -1796,6 +1410,8 @@
 
   virtual void MarkAsStatement() { is_prefix_ = true; }
 
+  virtual bool IsInlineable() const;
+
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   virtual bool IsMonomorphic() { return is_monomorphic_; }
   virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; }
@@ -1804,22 +1420,6 @@
   int AssignmentId() const { return assignment_id_; }
   int CountId() const { return count_id_; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  CountOperation(Isolate* isolate,
-                 Token::Value op,
-                 bool is_prefix,
-                 Expression* expr,
-                 int pos)
-      : Expression(isolate),
-        op_(op),
-        is_prefix_(is_prefix),
-        expression_(expr),
-        pos_(pos),
-        assignment_id_(GetNextId(isolate)),
-        count_id_(GetNextId(isolate)) {}
-
  private:
   Token::Value op_;
   bool is_prefix_;
@@ -1834,26 +1434,6 @@
 
 class CompareOperation: public Expression {
  public:
-  DECLARE_NODE_TYPE(CompareOperation)
-
-  Token::Value op() const { return op_; }
-  Expression* left() const { return left_; }
-  Expression* right() const { return right_; }
-  virtual int position() const { return pos_; }
-
-  // Type feedback information.
-  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
-  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
-
-  // Match special cases.
-  bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
-  bool IsLiteralCompareUndefined(Expression** expr);
-  bool IsLiteralCompareNull(Expression** expr);
-
- protected:
-  template<class> friend class AstNodeFactory;
-
   CompareOperation(Isolate* isolate,
                    Token::Value op,
                    Expression* left,
@@ -1868,6 +1448,24 @@
     ASSERT(Token::IsCompareOp(op));
   }
 
+  DECLARE_NODE_TYPE(CompareOperation)
+
+  Token::Value op() const { return op_; }
+  Expression* left() const { return left_; }
+  Expression* right() const { return right_; }
+  virtual int position() const { return pos_; }
+
+  virtual bool IsInlineable() const;
+
+  // Type feedback information.
+  void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+  bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
+
+  // Match special cases.
+  bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
+  bool IsLiteralCompareUndefined(Expression** expr);
+
  private:
   Token::Value op_;
   Expression* left_;
@@ -1879,23 +1477,27 @@
 };
 
 
+class CompareToNull: public Expression {
+ public:
+  CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
+      : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
+
+  DECLARE_NODE_TYPE(CompareToNull)
+
+  virtual bool IsInlineable() const;
+
+  bool is_strict() const { return is_strict_; }
+  Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
+  Expression* expression() const { return expression_; }
+
+ private:
+  bool is_strict_;
+  Expression* expression_;
+};
+
+
 class Conditional: public Expression {
  public:
-  DECLARE_NODE_TYPE(Conditional)
-
-  Expression* condition() const { return condition_; }
-  Expression* then_expression() const { return then_expression_; }
-  Expression* else_expression() const { return else_expression_; }
-
-  int then_expression_position() const { return then_expression_position_; }
-  int else_expression_position() const { return else_expression_position_; }
-
-  int ThenId() const { return then_id_; }
-  int ElseId() const { return else_id_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
   Conditional(Isolate* isolate,
               Expression* condition,
               Expression* then_expression,
@@ -1909,7 +1511,22 @@
         then_expression_position_(then_expression_position),
         else_expression_position_(else_expression_position),
         then_id_(GetNextId(isolate)),
-        else_id_(GetNextId(isolate)) { }
+        else_id_(GetNextId(isolate)) {
+  }
+
+  DECLARE_NODE_TYPE(Conditional)
+
+  virtual bool IsInlineable() const;
+
+  Expression* condition() const { return condition_; }
+  Expression* then_expression() const { return then_expression_; }
+  Expression* else_expression() const { return else_expression_; }
+
+  int then_expression_position() const { return then_expression_position_; }
+  int else_expression_position() const { return else_expression_position_; }
+
+  int ThenId() const { return then_id_; }
+  int ElseId() const { return else_id_; }
 
  private:
   Expression* condition_;
@@ -1924,8 +1541,16 @@
 
 class Assignment: public Expression {
  public:
+  Assignment(Isolate* isolate,
+             Token::Value op,
+             Expression* target,
+             Expression* value,
+             int pos);
+
   DECLARE_NODE_TYPE(Assignment)
 
+  virtual bool IsInlineable() const;
+
   Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; }
 
   Token::Value binary_op() const;
@@ -1957,25 +1582,6 @@
   int CompoundLoadId() const { return compound_load_id_; }
   int AssignmentId() const { return assignment_id_; }
 
- protected:
-  template<class> friend class AstNodeFactory;
-
-  Assignment(Isolate* isolate,
-             Token::Value op,
-             Expression* target,
-             Expression* value,
-             int pos);
-
-  template<class Visitor>
-  void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) {
-    ASSERT(Token::IsAssignmentOp(op_));
-    if (is_compound()) {
-      binary_operation_ =
-          factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1);
-      compound_load_id_ = GetNextId(isolate);
-    }
-  }
-
  private:
   Token::Value op_;
   Expression* target_;
@@ -1995,16 +1601,14 @@
 
 class Throw: public Expression {
  public:
+  Throw(Isolate* isolate, Expression* exception, int pos)
+      : Expression(isolate), exception_(exception), pos_(pos) {}
+
   DECLARE_NODE_TYPE(Throw)
 
   Expression* exception() const { return exception_; }
   virtual int position() const { return pos_; }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  Throw(Isolate* isolate, Expression* exception, int pos)
-      : Expression(isolate), exception_(exception), pos_(pos) {}
+  virtual bool IsInlineable() const;
 
  private:
   Expression* exception_;
@@ -2020,15 +1624,38 @@
     DECLARATION
   };
 
-  enum ParameterFlag {
-    kNoDuplicateParameters = 0,
-    kHasDuplicateParameters = 1
-  };
-
-  enum IsFunctionFlag {
-    kGlobalOrEval,
-    kIsFunction
-  };
+  FunctionLiteral(Isolate* isolate,
+                  Handle<String> name,
+                  Scope* scope,
+                  ZoneList<Statement*>* body,
+                  int materialized_literal_count,
+                  int expected_property_count,
+                  bool has_only_simple_this_property_assignments,
+                  Handle<FixedArray> this_property_assignments,
+                  int num_parameters,
+                  int start_position,
+                  int end_position,
+                  Type type,
+                  bool has_duplicate_parameters)
+      : Expression(isolate),
+        name_(name),
+        scope_(scope),
+        body_(body),
+        materialized_literal_count_(materialized_literal_count),
+        expected_property_count_(expected_property_count),
+        has_only_simple_this_property_assignments_(
+            has_only_simple_this_property_assignments),
+        this_property_assignments_(this_property_assignments),
+        num_parameters_(num_parameters),
+        start_position_(start_position),
+        end_position_(end_position),
+        function_token_position_(RelocInfo::kNoPosition),
+        inferred_name_(HEAP->empty_string()),
+        is_expression_(type != DECLARATION),
+        is_anonymous_(type == ANONYMOUS_EXPRESSION),
+        pretenure_(false),
+        has_duplicate_parameters_(has_duplicate_parameters) {
+  }
 
   DECLARE_NODE_TYPE(FunctionLiteral)
 
@@ -2037,24 +1664,21 @@
   ZoneList<Statement*>* body() const { return body_; }
   void set_function_token_position(int pos) { function_token_position_ = pos; }
   int function_token_position() const { return function_token_position_; }
-  int start_position() const;
-  int end_position() const;
-  int SourceSize() const { return end_position() - start_position(); }
-  bool is_expression() const { return IsExpression::decode(bitfield_); }
-  bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
-  bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
-  LanguageMode language_mode() const;
+  int start_position() const { return start_position_; }
+  int end_position() const { return end_position_; }
+  bool is_expression() const { return is_expression_; }
+  bool is_anonymous() const { return is_anonymous_; }
+  bool strict_mode() const;
 
   int materialized_literal_count() { return materialized_literal_count_; }
   int expected_property_count() { return expected_property_count_; }
-  int handler_count() { return handler_count_; }
   bool has_only_simple_this_property_assignments() {
-    return HasOnlySimpleThisPropertyAssignments::decode(bitfield_);
+      return has_only_simple_this_property_assignments_;
   }
   Handle<FixedArray> this_property_assignments() {
       return this_property_assignments_;
   }
-  int parameter_count() { return parameter_count_; }
+  int num_parameters() { return num_parameters_; }
 
   bool AllowsLazyCompilation();
 
@@ -2068,98 +1692,45 @@
     inferred_name_ = inferred_name;
   }
 
-  bool pretenure() { return Pretenure::decode(bitfield_); }
-  void set_pretenure() { bitfield_ |= Pretenure::encode(true); }
+  bool pretenure() { return pretenure_; }
+  void set_pretenure(bool value) { pretenure_ = value; }
+  virtual bool IsInlineable() const;
 
-  bool has_duplicate_parameters() {
-    return HasDuplicateParameters::decode(bitfield_);
-  }
-
-  bool is_function() { return IsFunction::decode(bitfield_) == kIsFunction; }
-
-  int ast_node_count() { return ast_properties_.node_count(); }
-  AstProperties::Flags* flags() { return ast_properties_.flags(); }
-  void set_ast_properties(AstProperties* ast_properties) {
-    ast_properties_ = *ast_properties;
-  }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  FunctionLiteral(Isolate* isolate,
-                  Handle<String> name,
-                  Scope* scope,
-                  ZoneList<Statement*>* body,
-                  int materialized_literal_count,
-                  int expected_property_count,
-                  int handler_count,
-                  bool has_only_simple_this_property_assignments,
-                  Handle<FixedArray> this_property_assignments,
-                  int parameter_count,
-                  Type type,
-                  ParameterFlag has_duplicate_parameters,
-                  IsFunctionFlag is_function)
-      : Expression(isolate),
-        name_(name),
-        scope_(scope),
-        body_(body),
-        this_property_assignments_(this_property_assignments),
-        inferred_name_(isolate->factory()->empty_string()),
-        materialized_literal_count_(materialized_literal_count),
-        expected_property_count_(expected_property_count),
-        handler_count_(handler_count),
-        parameter_count_(parameter_count),
-        function_token_position_(RelocInfo::kNoPosition) {
-    bitfield_ =
-        HasOnlySimpleThisPropertyAssignments::encode(
-            has_only_simple_this_property_assignments) |
-        IsExpression::encode(type != DECLARATION) |
-        IsAnonymous::encode(type == ANONYMOUS_EXPRESSION) |
-        Pretenure::encode(false) |
-        HasDuplicateParameters::encode(has_duplicate_parameters) |
-        IsFunction::encode(is_function);
-  }
+  bool has_duplicate_parameters() { return has_duplicate_parameters_; }
 
  private:
   Handle<String> name_;
   Scope* scope_;
   ZoneList<Statement*>* body_;
-  Handle<FixedArray> this_property_assignments_;
-  Handle<String> inferred_name_;
-  AstProperties ast_properties_;
-
   int materialized_literal_count_;
   int expected_property_count_;
-  int handler_count_;
-  int parameter_count_;
+  bool has_only_simple_this_property_assignments_;
+  Handle<FixedArray> this_property_assignments_;
+  int num_parameters_;
+  int start_position_;
+  int end_position_;
   int function_token_position_;
-
-  unsigned bitfield_;
-  class HasOnlySimpleThisPropertyAssignments: public BitField<bool, 0, 1> {};
-  class IsExpression: public BitField<bool, 1, 1> {};
-  class IsAnonymous: public BitField<bool, 2, 1> {};
-  class Pretenure: public BitField<bool, 3, 1> {};
-  class HasDuplicateParameters: public BitField<ParameterFlag, 4, 1> {};
-  class IsFunction: public BitField<IsFunctionFlag, 5, 1> {};
+  Handle<String> inferred_name_;
+  bool is_expression_;
+  bool is_anonymous_;
+  bool pretenure_;
+  bool has_duplicate_parameters_;
 };
 
 
 class SharedFunctionInfoLiteral: public Expression {
  public:
+  SharedFunctionInfoLiteral(
+      Isolate* isolate,
+      Handle<SharedFunctionInfo> shared_function_info)
+      : Expression(isolate), shared_function_info_(shared_function_info) { }
+
   DECLARE_NODE_TYPE(SharedFunctionInfoLiteral)
 
   Handle<SharedFunctionInfo> shared_function_info() const {
     return shared_function_info_;
   }
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  SharedFunctionInfoLiteral(
-      Isolate* isolate,
-      Handle<SharedFunctionInfo> shared_function_info)
-      : Expression(isolate),
-        shared_function_info_(shared_function_info) { }
+  virtual bool IsInlineable() const;
 
  private:
   Handle<SharedFunctionInfo> shared_function_info_;
@@ -2168,16 +1739,11 @@
 
 class ThisFunction: public Expression {
  public:
+  explicit ThisFunction(Isolate* isolate) : Expression(isolate) {}
   DECLARE_NODE_TYPE(ThisFunction)
-
- protected:
-  template<class> friend class AstNodeFactory;
-
-  explicit ThisFunction(Isolate* isolate): Expression(isolate) {}
+  virtual bool IsInlineable() const;
 };
 
-#undef DECLARE_NODE_TYPE
-
 
 // ----------------------------------------------------------------------------
 // Regular expressions
@@ -2530,23 +2096,13 @@
   virtual bool IsEmpty();
   virtual int min_match() { return 0; }
   virtual int max_match() { return 0; }
-  static RegExpEmpty* GetInstance() {
-    static RegExpEmpty* instance = ::new RegExpEmpty();
-    return instance;
-  }
+  static RegExpEmpty* GetInstance() { return &kInstance; }
+ private:
+  static RegExpEmpty kInstance;
 };
 
 
 // ----------------------------------------------------------------------------
-// Out-of-line inline constructors (to side-step cyclic dependencies).
-
-inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
-    : Module(proxy->interface()),
-      proxy_(proxy) {
-}
-
-
-// ----------------------------------------------------------------------------
 // Basic visitor
 // - leaf node visitors are abstract.
 
@@ -2588,397 +2144,6 @@
 };
 
 
-// ----------------------------------------------------------------------------
-// Construction time visitor.
-
-class AstConstructionVisitor BASE_EMBEDDED {
- public:
-  AstConstructionVisitor() { }
-
-  AstProperties* ast_properties() { return &properties_; }
-
- private:
-  template<class> friend class AstNodeFactory;
-
-  // Node visitors.
-#define DEF_VISIT(type) \
-  void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  void increase_node_count() { properties_.add_node_count(1); }
-  void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
-
-  AstProperties properties_;
-};
-
-
-class AstNullVisitor BASE_EMBEDDED {
- public:
-  // Node visitors.
-#define DEF_VISIT(type) \
-  void Visit##type(type* node) {}
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-};
-
-
-
-// ----------------------------------------------------------------------------
-// AstNode factory
-
-template<class Visitor>
-class AstNodeFactory BASE_EMBEDDED {
- public:
-  explicit AstNodeFactory(Isolate* isolate)
-      : isolate_(isolate),
-        zone_(isolate_->zone()) { }
-
-  Visitor* visitor() { return &visitor_; }
-
-#define VISIT_AND_RETURN(NodeType, node) \
-  visitor_.Visit##NodeType((node)); \
-  return node;
-
-  VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
-                                              VariableMode mode,
-                                              Scope* scope) {
-    VariableDeclaration* decl =
-        new(zone_) VariableDeclaration(proxy, mode, scope);
-    VISIT_AND_RETURN(VariableDeclaration, decl)
-  }
-
-  FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
-                                              VariableMode mode,
-                                              FunctionLiteral* fun,
-                                              Scope* scope) {
-    FunctionDeclaration* decl =
-        new(zone_) FunctionDeclaration(proxy, mode, fun, scope);
-    VISIT_AND_RETURN(FunctionDeclaration, decl)
-  }
-
-  ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
-                                          Module* module,
-                                          Scope* scope) {
-    ModuleDeclaration* decl =
-        new(zone_) ModuleDeclaration(proxy, module, scope);
-    VISIT_AND_RETURN(ModuleDeclaration, decl)
-  }
-
-  ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
-                                          Module* module,
-                                          Scope* scope) {
-    ImportDeclaration* decl =
-        new(zone_) ImportDeclaration(proxy, module, scope);
-    VISIT_AND_RETURN(ImportDeclaration, decl)
-  }
-
-  ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
-                                          Scope* scope) {
-    ExportDeclaration* decl =
-        new(zone_) ExportDeclaration(proxy, scope);
-    VISIT_AND_RETURN(ExportDeclaration, decl)
-  }
-
-  ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) {
-    ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface);
-    VISIT_AND_RETURN(ModuleLiteral, module)
-  }
-
-  ModuleVariable* NewModuleVariable(VariableProxy* proxy) {
-    ModuleVariable* module = new(zone_) ModuleVariable(proxy);
-    VISIT_AND_RETURN(ModuleVariable, module)
-  }
-
-  ModulePath* NewModulePath(Module* origin, Handle<String> name) {
-    ModulePath* module = new(zone_) ModulePath(origin, name);
-    VISIT_AND_RETURN(ModulePath, module)
-  }
-
-  ModuleUrl* NewModuleUrl(Handle<String> url) {
-    ModuleUrl* module = new(zone_) ModuleUrl(url);
-    VISIT_AND_RETURN(ModuleUrl, module)
-  }
-
-  Block* NewBlock(ZoneStringList* labels,
-                  int capacity,
-                  bool is_initializer_block) {
-    Block* block = new(zone_) Block(
-        isolate_, labels, capacity, is_initializer_block);
-    VISIT_AND_RETURN(Block, block)
-  }
-
-#define STATEMENT_WITH_LABELS(NodeType) \
-  NodeType* New##NodeType(ZoneStringList* labels) { \
-    NodeType* stmt = new(zone_) NodeType(isolate_, labels); \
-    VISIT_AND_RETURN(NodeType, stmt); \
-  }
-  STATEMENT_WITH_LABELS(DoWhileStatement)
-  STATEMENT_WITH_LABELS(WhileStatement)
-  STATEMENT_WITH_LABELS(ForStatement)
-  STATEMENT_WITH_LABELS(ForInStatement)
-  STATEMENT_WITH_LABELS(SwitchStatement)
-#undef STATEMENT_WITH_LABELS
-
-  ExpressionStatement* NewExpressionStatement(Expression* expression) {
-    ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
-    VISIT_AND_RETURN(ExpressionStatement, stmt)
-  }
-
-  ContinueStatement* NewContinueStatement(IterationStatement* target) {
-    ContinueStatement* stmt = new(zone_) ContinueStatement(target);
-    VISIT_AND_RETURN(ContinueStatement, stmt)
-  }
-
-  BreakStatement* NewBreakStatement(BreakableStatement* target) {
-    BreakStatement* stmt = new(zone_) BreakStatement(target);
-    VISIT_AND_RETURN(BreakStatement, stmt)
-  }
-
-  ReturnStatement* NewReturnStatement(Expression* expression) {
-    ReturnStatement* stmt = new(zone_) ReturnStatement(expression);
-    VISIT_AND_RETURN(ReturnStatement, stmt)
-  }
-
-  WithStatement* NewWithStatement(Expression* expression,
-                                  Statement* statement) {
-    WithStatement* stmt = new(zone_) WithStatement(expression, statement);
-    VISIT_AND_RETURN(WithStatement, stmt)
-  }
-
-  IfStatement* NewIfStatement(Expression* condition,
-                              Statement* then_statement,
-                              Statement* else_statement) {
-    IfStatement* stmt = new(zone_) IfStatement(
-        isolate_, condition, then_statement, else_statement);
-    VISIT_AND_RETURN(IfStatement, stmt)
-  }
-
-  TryCatchStatement* NewTryCatchStatement(int index,
-                                          Block* try_block,
-                                          Scope* scope,
-                                          Variable* variable,
-                                          Block* catch_block) {
-    TryCatchStatement* stmt = new(zone_) TryCatchStatement(
-        index, try_block, scope, variable, catch_block);
-    VISIT_AND_RETURN(TryCatchStatement, stmt)
-  }
-
-  TryFinallyStatement* NewTryFinallyStatement(int index,
-                                              Block* try_block,
-                                              Block* finally_block) {
-    TryFinallyStatement* stmt =
-        new(zone_) TryFinallyStatement(index, try_block, finally_block);
-    VISIT_AND_RETURN(TryFinallyStatement, stmt)
-  }
-
-  DebuggerStatement* NewDebuggerStatement() {
-    DebuggerStatement* stmt = new(zone_) DebuggerStatement();
-    VISIT_AND_RETURN(DebuggerStatement, stmt)
-  }
-
-  EmptyStatement* NewEmptyStatement() {
-    return new(zone_) EmptyStatement();
-  }
-
-  Literal* NewLiteral(Handle<Object> handle) {
-    Literal* lit = new(zone_) Literal(isolate_, handle);
-    VISIT_AND_RETURN(Literal, lit)
-  }
-
-  Literal* NewNumberLiteral(double number) {
-    return NewLiteral(isolate_->factory()->NewNumber(number, TENURED));
-  }
-
-  ObjectLiteral* NewObjectLiteral(
-      Handle<FixedArray> constant_properties,
-      ZoneList<ObjectLiteral::Property*>* properties,
-      int literal_index,
-      bool is_simple,
-      bool fast_elements,
-      int depth,
-      bool has_function) {
-    ObjectLiteral* lit = new(zone_) ObjectLiteral(
-        isolate_, constant_properties, properties, literal_index,
-        is_simple, fast_elements, depth, has_function);
-    VISIT_AND_RETURN(ObjectLiteral, lit)
-  }
-
-  ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
-                                                    FunctionLiteral* value) {
-    ObjectLiteral::Property* prop =
-        new(zone_) ObjectLiteral::Property(is_getter, value);
-    prop->set_key(NewLiteral(value->name()));
-    return prop;  // Not an AST node, will not be visited.
-  }
-
-  RegExpLiteral* NewRegExpLiteral(Handle<String> pattern,
-                                  Handle<String> flags,
-                                  int literal_index) {
-    RegExpLiteral* lit =
-        new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index);
-    VISIT_AND_RETURN(RegExpLiteral, lit);
-  }
-
-  ArrayLiteral* NewArrayLiteral(Handle<FixedArray> constant_elements,
-                                ZoneList<Expression*>* values,
-                                int literal_index,
-                                bool is_simple,
-                                int depth) {
-    ArrayLiteral* lit = new(zone_) ArrayLiteral(
-        isolate_, constant_elements, values, literal_index, is_simple, depth);
-    VISIT_AND_RETURN(ArrayLiteral, lit)
-  }
-
-  VariableProxy* NewVariableProxy(Variable* var) {
-    VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var);
-    VISIT_AND_RETURN(VariableProxy, proxy)
-  }
-
-  VariableProxy* NewVariableProxy(Handle<String> name,
-                                  bool is_this,
-                                  int position = RelocInfo::kNoPosition,
-                                  Interface* interface =
-                                      Interface::NewValue()) {
-    VariableProxy* proxy =
-        new(zone_) VariableProxy(isolate_, name, is_this, position, interface);
-    VISIT_AND_RETURN(VariableProxy, proxy)
-  }
-
-  Property* NewProperty(Expression* obj, Expression* key, int pos) {
-    Property* prop = new(zone_) Property(isolate_, obj, key, pos);
-    VISIT_AND_RETURN(Property, prop)
-  }
-
-  Call* NewCall(Expression* expression,
-                ZoneList<Expression*>* arguments,
-                int pos) {
-    Call* call = new(zone_) Call(isolate_, expression, arguments, pos);
-    VISIT_AND_RETURN(Call, call)
-  }
-
-  CallNew* NewCallNew(Expression* expression,
-                      ZoneList<Expression*>* arguments,
-                      int pos) {
-    CallNew* call = new(zone_) CallNew(isolate_, expression, arguments, pos);
-    VISIT_AND_RETURN(CallNew, call)
-  }
-
-  CallRuntime* NewCallRuntime(Handle<String> name,
-                              const Runtime::Function* function,
-                              ZoneList<Expression*>* arguments) {
-    CallRuntime* call =
-        new(zone_) CallRuntime(isolate_, name, function, arguments);
-    VISIT_AND_RETURN(CallRuntime, call)
-  }
-
-  UnaryOperation* NewUnaryOperation(Token::Value op,
-                                    Expression* expression,
-                                    int pos) {
-    UnaryOperation* node =
-        new(zone_) UnaryOperation(isolate_, op, expression, pos);
-    VISIT_AND_RETURN(UnaryOperation, node)
-  }
-
-  BinaryOperation* NewBinaryOperation(Token::Value op,
-                                      Expression* left,
-                                      Expression* right,
-                                      int pos) {
-    BinaryOperation* node =
-        new(zone_) BinaryOperation(isolate_, op, left, right, pos);
-    VISIT_AND_RETURN(BinaryOperation, node)
-  }
-
-  CountOperation* NewCountOperation(Token::Value op,
-                                    bool is_prefix,
-                                    Expression* expr,
-                                    int pos) {
-    CountOperation* node =
-        new(zone_) CountOperation(isolate_, op, is_prefix, expr, pos);
-    VISIT_AND_RETURN(CountOperation, node)
-  }
-
-  CompareOperation* NewCompareOperation(Token::Value op,
-                                        Expression* left,
-                                        Expression* right,
-                                        int pos) {
-    CompareOperation* node =
-        new(zone_) CompareOperation(isolate_, op, left, right, pos);
-    VISIT_AND_RETURN(CompareOperation, node)
-  }
-
-  Conditional* NewConditional(Expression* condition,
-                              Expression* then_expression,
-                              Expression* else_expression,
-                              int then_expression_position,
-                              int else_expression_position) {
-    Conditional* cond = new(zone_) Conditional(
-        isolate_, condition, then_expression, else_expression,
-        then_expression_position, else_expression_position);
-    VISIT_AND_RETURN(Conditional, cond)
-  }
-
-  Assignment* NewAssignment(Token::Value op,
-                            Expression* target,
-                            Expression* value,
-                            int pos) {
-    Assignment* assign =
-        new(zone_) Assignment(isolate_, op, target, value, pos);
-    assign->Init(isolate_, this);
-    VISIT_AND_RETURN(Assignment, assign)
-  }
-
-  Throw* NewThrow(Expression* exception, int pos) {
-    Throw* t = new(zone_) Throw(isolate_, exception, pos);
-    VISIT_AND_RETURN(Throw, t)
-  }
-
-  FunctionLiteral* NewFunctionLiteral(
-      Handle<String> name,
-      Scope* scope,
-      ZoneList<Statement*>* body,
-      int materialized_literal_count,
-      int expected_property_count,
-      int handler_count,
-      bool has_only_simple_this_property_assignments,
-      Handle<FixedArray> this_property_assignments,
-      int parameter_count,
-      FunctionLiteral::ParameterFlag has_duplicate_parameters,
-      FunctionLiteral::Type type,
-      FunctionLiteral::IsFunctionFlag is_function) {
-    FunctionLiteral* lit = new(zone_) FunctionLiteral(
-        isolate_, name, scope, body,
-        materialized_literal_count, expected_property_count, handler_count,
-        has_only_simple_this_property_assignments, this_property_assignments,
-        parameter_count, type, has_duplicate_parameters, is_function);
-    // Top-level literal doesn't count for the AST's properties.
-    if (is_function == FunctionLiteral::kIsFunction) {
-      visitor_.VisitFunctionLiteral(lit);
-    }
-    return lit;
-  }
-
-  SharedFunctionInfoLiteral* NewSharedFunctionInfoLiteral(
-      Handle<SharedFunctionInfo> shared_function_info) {
-    SharedFunctionInfoLiteral* lit =
-        new(zone_) SharedFunctionInfoLiteral(isolate_, shared_function_info);
-    VISIT_AND_RETURN(SharedFunctionInfoLiteral, lit)
-  }
-
-  ThisFunction* NewThisFunction() {
-    ThisFunction* fun = new(zone_) ThisFunction(isolate_);
-    VISIT_AND_RETURN(ThisFunction, fun)
-  }
-
-#undef VISIT_AND_RETURN
-
- private:
-  Isolate* isolate_;
-  Zone* zone_;
-  Visitor visitor_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_AST_H_
diff --git a/src/atomicops_internals_mips_gcc.h b/src/atomicops_internals_mips_gcc.h
index 9498fd7..5113de2 100644
--- a/src/atomicops_internals_mips_gcc.h
+++ b/src/atomicops_internals_mips_gcc.h
@@ -30,7 +30,7 @@
 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
 
 namespace v8 {
 namespace internal {
@@ -48,19 +48,16 @@
 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
-  Atomic32 prev, tmp;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
-                       "ll %0, %5\n"  // prev = *ptr
+  Atomic32 prev;
+  __asm__ __volatile__("1:\n"
+                       "ll %0, %1\n"  // prev = *ptr
                        "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
-                       "move %2, %4\n"  // tmp = new_value
-                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "nop\n"  // delay slot nop
+                       "sc %2, %1\n"  // *ptr = new_value (with atomic check)
                        "beqz %2, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
                        "2:\n"
-                       ".set pop\n"
-                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
                        : "Ir" (old_value), "r" (new_value), "m" (*ptr)
                        : "memory");
   return prev;
@@ -71,15 +68,12 @@
 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                          Atomic32 new_value) {
   Atomic32 temp, old;
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
+  __asm__ __volatile__("1:\n"
                        "ll %1, %2\n"  // old = *ptr
                        "move %0, %3\n"  // temp = new_value
                        "sc %0, %2\n"  // *ptr = temp (with atomic check)
                        "beqz %0, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
-                       ".set pop\n"
                        : "=&r" (temp), "=&r" (old), "=m" (*ptr)
                        : "r" (new_value), "m" (*ptr)
                        : "memory");
@@ -93,15 +87,13 @@
                                           Atomic32 increment) {
   Atomic32 temp, temp2;
 
-  __asm__ __volatile__(".set push\n"
-                       ".set noreorder\n"
-                       "1:\n"
+  __asm__ __volatile__("1:\n"
                        "ll %0, %2\n"  // temp = *ptr
-                       "addu %1, %0, %3\n"  // temp2 = temp + increment
-                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
-                       "beqz %1, 1b\n"  // start again on atomic error
-                       "addu %1, %0, %3\n"  // temp2 = temp + increment
-                       ".set pop\n"
+                       "addu %0, %3\n"  // temp = temp + increment
+                       "move %1, %0\n"  // temp2 = temp
+                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
                        : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
                        : "Ir" (increment), "m" (*ptr)
                        : "memory");
@@ -111,7 +103,6 @@
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                         Atomic32 increment) {
-  ATOMICOPS_COMPILER_BARRIER();
   Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
   ATOMICOPS_COMPILER_BARRIER();
   return res;
@@ -126,19 +117,16 @@
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
+  Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   ATOMICOPS_COMPILER_BARRIER();
-  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  ATOMICOPS_COMPILER_BARRIER();
-  return res;
+  return x;
 }
 
 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   ATOMICOPS_COMPILER_BARRIER();
-  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  ATOMICOPS_COMPILER_BARRIER();
-  return res;
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
 }
 
 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -146,7 +134,7 @@
 }
 
 inline void MemoryBarrier() {
-  __asm__ __volatile__("sync" : : : "memory");
+  ATOMICOPS_COMPILER_BARRIER();
 }
 
 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
diff --git a/src/atomicops_internals_x86_macosx.h b/src/atomicops_internals_x86_macosx.h
index bfb02b3..2bac006 100644
--- a/src/atomicops_internals_x86_macosx.h
+++ b/src/atomicops_internals_x86_macosx.h
@@ -35,7 +35,7 @@
 namespace v8 {
 namespace internal {
 
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
   Atomic32 prev_value;
@@ -49,7 +49,7 @@
   return prev_value;
 }
 
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
                                          Atomic32 new_value) {
   Atomic32 old_value;
   do {
@@ -59,12 +59,12 @@
   return old_value;
 }
 
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
                                           Atomic32 increment) {
   return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
 }
 
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
                                           Atomic32 increment) {
   return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
 }
@@ -73,7 +73,7 @@
   OSMemoryBarrier();
 }
 
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 prev_value;
@@ -87,7 +87,7 @@
   return prev_value;
 }
 
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   return Acquire_CompareAndSwap(ptr, old_value, new_value);
@@ -97,12 +97,12 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
   *ptr = value;
   MemoryBarrier();
 }
 
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
   MemoryBarrier();
   *ptr = value;
 }
@@ -111,13 +111,13 @@
   return *ptr;
 }
 
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
   Atomic32 value = *ptr;
   MemoryBarrier();
   return value;
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
   MemoryBarrier();
   return *ptr;
 }
@@ -126,7 +126,7 @@
 
 // 64-bit implementation on 64-bit platform
 
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
                                          Atomic64 old_value,
                                          Atomic64 new_value) {
   Atomic64 prev_value;
@@ -140,7 +140,7 @@
   return prev_value;
 }
 
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
                                          Atomic64 new_value) {
   Atomic64 old_value;
   do {
@@ -150,17 +150,17 @@
   return old_value;
 }
 
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
                                           Atomic64 increment) {
   return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
 }
 
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
                                         Atomic64 increment) {
   return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
 }
 
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 prev_value;
@@ -174,7 +174,7 @@
   return prev_value;
 }
 
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   // The lib kern interface does not distinguish between
@@ -186,12 +186,12 @@
   *ptr = value;
 }
 
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
   *ptr = value;
   MemoryBarrier();
 }
 
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   MemoryBarrier();
   *ptr = value;
 }
@@ -200,13 +200,13 @@
   return *ptr;
 }
 
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
   Atomic64 value = *ptr;
   MemoryBarrier();
   return value;
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
   MemoryBarrier();
   return *ptr;
 }
@@ -264,7 +264,7 @@
       old_value, new_value);
 }
 
-inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
   NoBarrier_Store(
       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
 }
@@ -279,7 +279,7 @@
       reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
 }
 
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
   return NoBarrier_Load(
       reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
 }
diff --git a/src/bignum-dtoa.h b/src/bignum-dtoa.h
index 93ec1f7..ea1acbb 100644
--- a/src/bignum-dtoa.h
+++ b/src/bignum-dtoa.h
@@ -44,7 +44,7 @@
   BIGNUM_DTOA_PRECISION
 };
 
-// Converts the given double 'v' to ASCII.
+// Converts the given double 'v' to ascii.
 // The result should be interpreted as buffer * 10^(point-length).
 // The buffer will be null-terminated.
 //
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 0e95b4b..f07e625 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,11 +34,9 @@
 #include "debug.h"
 #include "execution.h"
 #include "global-handles.h"
-#include "isolate-inl.h"
 #include "macro-assembler.h"
 #include "natives.h"
 #include "objects-visiting.h"
-#include "platform.h"
 #include "snapshot.h"
 #include "extensions/externalize-string-extension.h"
 #include "extensions/gc-extension.h"
@@ -76,15 +74,22 @@
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
   if (heap->natives_source_cache()->get(index)->IsUndefined()) {
-    // We can use external strings for the natives.
-    Vector<const char> source = Natives::GetRawScriptSource(index);
-    NativesExternalStringResource* resource =
-        new NativesExternalStringResource(this,
-                                          source.start(),
-                                          source.length());
-    Handle<String> source_code =
-        factory->NewExternalStringFromAscii(resource);
-    heap->natives_source_cache()->set(index, *source_code);
+    if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
+      // We can use external strings for the natives.
+      Vector<const char> source = Natives::GetRawScriptSource(index);
+      NativesExternalStringResource* resource =
+          new NativesExternalStringResource(this,
+                                            source.start(),
+                                            source.length());
+      Handle<String> source_code =
+          factory->NewExternalStringFromAscii(resource);
+      heap->natives_source_cache()->set(index, *source_code);
+    } else {
+      // Old snapshot code can't cope with external strings at all.
+      Handle<String> source_code =
+        factory->NewStringFromAscii(Natives::GetRawScriptSource(index));
+      heap->natives_source_cache()->set(index, *source_code);
+    }
   }
   Handle<Object> cached_source(heap->natives_source_cache()->get(index));
   return Handle<String>::cast(cached_source);
@@ -172,10 +177,6 @@
   Handle<JSFunction> GetThrowTypeErrorFunction();
 
   void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
-
-  // Make the "arguments" and "caller" properties throw a TypeError on access.
-  void PoisonArgumentsAndCaller(Handle<Map> map);
-
   // Creates the global objects using the global and the template passed in
   // through the API.  We call this regardless of whether we are building a
   // context from scratch or using a deserialized one from the partial snapshot
@@ -196,7 +197,7 @@
   // detached from the other objects in the snapshot.
   void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
   // New context initialization.  Used for creating a context from scratch.
-  bool InitializeGlobal(Handle<GlobalObject> inner_global,
+  void InitializeGlobal(Handle<GlobalObject> inner_global,
                         Handle<JSFunction> empty_function);
   void InitializeExperimentalGlobal();
   // Installs the contents of the native .js files on the global objects.
@@ -208,30 +209,12 @@
   void InstallBuiltinFunctionIds();
   void InstallJSFunctionResultCaches();
   void InitializeNormalizedMapCaches();
-
-  enum ExtensionTraversalState {
-    UNVISITED, VISITED, INSTALLED
-  };
-
-  class ExtensionStates {
-   public:
-    ExtensionStates();
-    ExtensionTraversalState get_state(RegisteredExtension* extension);
-    void set_state(RegisteredExtension* extension,
-                   ExtensionTraversalState state);
-   private:
-    HashMap map_;
-    DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
-  };
-
   // Used both for deserialized and from-scratch contexts to add the extensions
   // provided.
   static bool InstallExtensions(Handle<Context> global_context,
                                 v8::ExtensionConfiguration* extensions);
-  static bool InstallExtension(const char* name,
-                               ExtensionStates* extension_states);
-  static bool InstallExtension(v8::RegisteredExtension* current,
-                               ExtensionStates* extension_states);
+  static bool InstallExtension(const char* name);
+  static bool InstallExtension(v8::RegisteredExtension* current);
   static void InstallSpecialObjects(Handle<Context> global_context);
   bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
   bool ConfigureApiObject(Handle<JSObject> object,
@@ -259,10 +242,14 @@
 
   Handle<Map> CreateStrictModeFunctionMap(
       PrototypePropertyMode prototype_mode,
-      Handle<JSFunction> empty_function);
+      Handle<JSFunction> empty_function,
+      Handle<FixedArray> arguments_callbacks,
+      Handle<FixedArray> caller_callbacks);
 
   Handle<DescriptorArray> ComputeStrictFunctionInstanceDescriptor(
-      PrototypePropertyMode propertyMode);
+      PrototypePropertyMode propertyMode,
+      Handle<FixedArray> arguments,
+      Handle<FixedArray> caller);
 
   static bool CompileBuiltin(Isolate* isolate, int index);
   static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -291,7 +278,7 @@
 
 void Bootstrapper::Iterate(ObjectVisitor* v) {
   extensions_cache_.Iterate(v);
-  v->Synchronize(VisitorSynchronization::kExtensions);
+  v->Synchronize("Extensions");
 }
 
 
@@ -370,56 +357,55 @@
   } else {
     attributes = DONT_ENUM;
   }
-  CHECK_NOT_EMPTY_HANDLE(isolate,
-                         JSObject::SetLocalPropertyIgnoreAttributes(
-                             target, symbol, function, attributes));
+  SetLocalPropertyNoThrow(target, symbol, function, attributes);
   if (is_ecma_native) {
     function->shared()->set_instance_class_name(*symbol);
   }
-  function->shared()->set_native(true);
   return function;
 }
 
 
 Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
     PrototypePropertyMode prototypeMode) {
-  int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
-  Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
-  PropertyAttributes attribs = static_cast<PropertyAttributes>(
-      DONT_ENUM | DONT_DELETE | READ_ONLY);
-
-  DescriptorArray::WhitenessWitness witness(*descriptors);
+  Handle<DescriptorArray> descriptors =
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
+  PropertyAttributes attributes =
+      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   {  // Add length.
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
-    CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
-    descriptors->Set(0, &d, witness);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
+    descriptors->Set(0, &d);
   }
   {  // Add name.
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
-    CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
-    descriptors->Set(1, &d, witness);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
+    descriptors->Set(1, &d);
   }
   {  // Add arguments.
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionArguments));
-    CallbacksDescriptor d(*factory()->arguments_symbol(), *f, attribs);
-    descriptors->Set(2, &d, witness);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionArguments);
+    CallbacksDescriptor d(*factory()->arguments_symbol(), *foreign, attributes);
+    descriptors->Set(2, &d);
   }
   {  // Add caller.
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionCaller));
-    CallbacksDescriptor d(*factory()->caller_symbol(), *f, attribs);
-    descriptors->Set(3, &d, witness);
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionCaller);
+    CallbacksDescriptor d(*factory()->caller_symbol(), *foreign, attributes);
+    descriptors->Set(3, &d);
   }
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
     // Add prototype.
     if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
-      attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
+      attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
-    CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
-    descriptors->Set(4, &d, witness);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
+    descriptors->Set(4, &d);
   }
-  descriptors->Sort(witness);
+  descriptors->Sort();
   return descriptors;
 }
 
@@ -492,7 +478,7 @@
   // 262 15.3.4.
   Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
   Handle<JSFunction> empty_function =
-      factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE);
+      factory->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
 
   // --- E m p t y ---
   Handle<Code> code =
@@ -527,46 +513,49 @@
 
 
 Handle<DescriptorArray> Genesis::ComputeStrictFunctionInstanceDescriptor(
-    PrototypePropertyMode prototypeMode) {
-  int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
-  Handle<DescriptorArray> descriptors(factory()->NewDescriptorArray(size));
-  PropertyAttributes attribs = static_cast<PropertyAttributes>(
-      DONT_ENUM | DONT_DELETE);
+    PrototypePropertyMode prototypeMode,
+    Handle<FixedArray> arguments,
+    Handle<FixedArray> caller) {
+  Handle<DescriptorArray> descriptors =
+      factory()->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE
+                                    ? 4
+                                    : 5);
+  PropertyAttributes attributes = static_cast<PropertyAttributes>(
+      DONT_ENUM | DONT_DELETE | READ_ONLY);
 
-  DescriptorArray::WhitenessWitness witness(*descriptors);
-
-  {  // Add length.
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionLength));
-    CallbacksDescriptor d(*factory()->length_symbol(), *f, attribs);
-    descriptors->Set(0, &d, witness);
+  {  // length
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionLength);
+    CallbacksDescriptor d(*factory()->length_symbol(), *foreign, attributes);
+    descriptors->Set(0, &d);
   }
-  {  // Add name.
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionName));
-    CallbacksDescriptor d(*factory()->name_symbol(), *f, attribs);
-    descriptors->Set(1, &d, witness);
+  {  // name
+    Handle<Foreign> foreign = factory()->NewForeign(&Accessors::FunctionName);
+    CallbacksDescriptor d(*factory()->name_symbol(), *foreign, attributes);
+    descriptors->Set(1, &d);
   }
-  {  // Add arguments.
-    Handle<AccessorPair> arguments(factory()->NewAccessorPair());
-    CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
-    descriptors->Set(2, &d, witness);
+  {  // arguments
+    CallbacksDescriptor d(*factory()->arguments_symbol(),
+                          *arguments,
+                          attributes);
+    descriptors->Set(2, &d);
   }
-  {  // Add caller.
-    Handle<AccessorPair> caller(factory()->NewAccessorPair());
-    CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
-    descriptors->Set(3, &d, witness);
+  {  // caller
+    CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attributes);
+    descriptors->Set(3, &d);
   }
 
+  // prototype
   if (prototypeMode != DONT_ADD_PROTOTYPE) {
-    // Add prototype.
-    if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
-      attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
+    if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
+      attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
     }
-    Handle<Foreign> f(factory()->NewForeign(&Accessors::FunctionPrototype));
-    CallbacksDescriptor d(*factory()->prototype_symbol(), *f, attribs);
-    descriptors->Set(4, &d, witness);
+    Handle<Foreign> foreign =
+        factory()->NewForeign(&Accessors::FunctionPrototype);
+    CallbacksDescriptor d(*factory()->prototype_symbol(), *foreign, attributes);
+    descriptors->Set(4, &d);
   }
 
-  descriptors->Sort(witness);
+  descriptors->Sort();
   return descriptors;
 }
 
@@ -576,7 +565,7 @@
   if (throw_type_error_function.is_null()) {
     Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
     throw_type_error_function =
-      factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
+      factory()->NewFunctionWithoutPrototype(name, kNonStrictMode);
     Handle<Code> code(isolate()->builtins()->builtin(
         Builtins::kStrictModePoisonPill));
     throw_type_error_function->set_map(
@@ -585,7 +574,7 @@
     throw_type_error_function->shared()->set_code(*code);
     throw_type_error_function->shared()->DontAdaptArguments();
 
-    JSObject::PreventExtensions(throw_type_error_function);
+    PreventExtensions(throw_type_error_function);
   }
   return throw_type_error_function;
 }
@@ -593,10 +582,14 @@
 
 Handle<Map> Genesis::CreateStrictModeFunctionMap(
     PrototypePropertyMode prototype_mode,
-    Handle<JSFunction> empty_function) {
+    Handle<JSFunction> empty_function,
+    Handle<FixedArray> arguments_callbacks,
+    Handle<FixedArray> caller_callbacks) {
   Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   Handle<DescriptorArray> descriptors =
-      ComputeStrictFunctionInstanceDescriptor(prototype_mode);
+      ComputeStrictFunctionInstanceDescriptor(prototype_mode,
+                                              arguments_callbacks,
+                                              caller_callbacks);
   map->set_instance_descriptors(*descriptors);
   map->set_function_with_prototype(prototype_mode != DONT_ADD_PROTOTYPE);
   map->set_prototype(*empty_function);
@@ -605,15 +598,23 @@
 
 
 void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
+  // Create the callbacks arrays for ThrowTypeError functions.
+  // The get/set callacks are filled in after the maps are created below.
+  Factory* factory = empty->GetIsolate()->factory();
+  Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
+  Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
+
   // Allocate map for the strict mode function instances.
   Handle<Map> strict_mode_function_instance_map =
-      CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
+      CreateStrictModeFunctionMap(
+          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_instance_map(
       *strict_mode_function_instance_map);
 
   // Allocate map for the prototype-less strict mode instances.
   Handle<Map> strict_mode_function_without_prototype_map =
-      CreateStrictModeFunctionMap(DONT_ADD_PROTOTYPE, empty);
+      CreateStrictModeFunctionMap(
+          DONT_ADD_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_without_prototype_map(
       *strict_mode_function_without_prototype_map);
 
@@ -621,38 +622,26 @@
   // only for processing of builtins.
   // Later the map is replaced with writable prototype map, allocated below.
   Handle<Map> strict_mode_function_map =
-      CreateStrictModeFunctionMap(ADD_READONLY_PROTOTYPE, empty);
+      CreateStrictModeFunctionMap(
+          ADD_READONLY_PROTOTYPE, empty, arguments, caller);
   global_context()->set_strict_mode_function_map(
       *strict_mode_function_map);
 
   // The final map for the strict mode functions. Writeable prototype.
   // This map is installed in MakeFunctionInstancePrototypeWritable.
   strict_mode_function_instance_map_writable_prototype_ =
-      CreateStrictModeFunctionMap(ADD_WRITEABLE_PROTOTYPE, empty);
+      CreateStrictModeFunctionMap(
+          ADD_WRITEABLE_PROTOTYPE, empty, arguments, caller);
 
-  // Complete the callbacks.
-  PoisonArgumentsAndCaller(strict_mode_function_instance_map);
-  PoisonArgumentsAndCaller(strict_mode_function_without_prototype_map);
-  PoisonArgumentsAndCaller(strict_mode_function_map);
-  PoisonArgumentsAndCaller(
-      strict_mode_function_instance_map_writable_prototype_);
-}
+  // Create the ThrowTypeError function instance.
+  Handle<JSFunction> throw_function =
+      GetThrowTypeErrorFunction();
 
-
-static void SetAccessors(Handle<Map> map,
-                         Handle<String> name,
-                         Handle<JSFunction> func) {
-  DescriptorArray* descs = map->instance_descriptors();
-  int number = descs->Search(*name);
-  AccessorPair* accessors = AccessorPair::cast(descs->GetValue(number));
-  accessors->set_getter(*func);
-  accessors->set_setter(*func);
-}
-
-
-void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
-  SetAccessors(map, factory()->arguments_symbol(), GetThrowTypeErrorFunction());
-  SetAccessors(map, factory()->caller_symbol(), GetThrowTypeErrorFunction());
+  // Complete the callback fixed arrays.
+  arguments->set(0, *throw_function);
+  arguments->set(1, *throw_function);
+  caller->set(0, *throw_function);
+  caller->set(1, *throw_function);
 }
 
 
@@ -738,10 +727,11 @@
     Handle<JSObject> prototype =
         Handle<JSObject>(
             JSObject::cast(js_global_function->instance_prototype()));
-    CHECK_NOT_EMPTY_HANDLE(isolate(),
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                               prototype, factory()->constructor_symbol(),
-                               isolate()->object_function(), NONE));
+    SetLocalPropertyNoThrow(
+        prototype,
+        factory()->constructor_symbol(),
+        isolate()->object_function(),
+        NONE);
   } else {
     Handle<FunctionTemplateInfo> js_global_constructor(
         FunctionTemplateInfo::cast(js_global_template->constructor()));
@@ -818,7 +808,7 @@
                    factory()->LookupAsciiSymbol("global"),
                    inner_global,
                    attributes);
-  // Set up the reference from the global object to the builtins object.
+  // Setup the reference from the global object to the builtins object.
   JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
   TransferNamedProperties(inner_global_from_snapshot, inner_global);
   TransferIndexedProperties(inner_global_from_snapshot, inner_global);
@@ -827,7 +817,7 @@
 
 // This is only called if we are not using snapshots.  The equivalent
 // work in the snapshot case is done in HookUpInnerGlobal.
-bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
+void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
                                Handle<JSFunction> empty_function) {
   // --- G l o b a l   C o n t e x t ---
   // Use the empty function as closure (no scope info).
@@ -847,10 +837,8 @@
   Heap* heap = isolate->heap();
 
   Handle<String> object_name = Handle<String>(heap->Object_symbol());
-  CHECK_NOT_EMPTY_HANDLE(isolate,
-                         JSObject::SetLocalPropertyIgnoreAttributes(
-                             inner_global, object_name,
-                             isolate->object_function(), DONT_ENUM));
+  SetLocalPropertyNoThrow(inner_global, object_name,
+                          isolate->object_function(), DONT_ENUM);
 
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
 
@@ -877,12 +865,15 @@
             factory->NewForeign(&Accessors::ArrayLength),
             static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
 
+    // Cache the fast JavaScript array map
+    global_context()->set_js_array_map(array_function->initial_map());
+    global_context()->js_array_map()->set_instance_descriptors(
+        *array_descriptors);
     // array_function is used internally. JS code creating array object should
     // search for the 'Array' property on the global object and use that one
     // as the constructor. 'Array' property on a global object can be
     // overwritten by JS code.
     global_context()->set_array_function(*array_function);
-    array_function->initial_map()->set_instance_descriptors(*array_descriptors);
   }
 
   {  // --- N u m b e r ---
@@ -927,7 +918,7 @@
   {  // --- D a t e ---
     // Builtin functions for Date.prototype.
     Handle<JSFunction> date_fun =
-        InstallFunction(global, "Date", JS_DATE_TYPE, JSDate::kSize,
+        InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
                         isolate->initial_object_prototype(),
                         Builtins::kIllegal, true);
 
@@ -949,7 +940,6 @@
     ASSERT_EQ(0, initial_map->inobject_properties());
 
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
-    DescriptorArray::WhitenessWitness witness(*descriptors);
     PropertyAttributes final =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
     int enum_index = 0;
@@ -959,7 +949,7 @@
                             JSRegExp::kSourceFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(0, &field, witness);
+      descriptors->Set(0, &field);
     }
     {
       // ECMA-262, section 15.10.7.2.
@@ -967,7 +957,7 @@
                             JSRegExp::kGlobalFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(1, &field, witness);
+      descriptors->Set(1, &field);
     }
     {
       // ECMA-262, section 15.10.7.3.
@@ -975,7 +965,7 @@
                             JSRegExp::kIgnoreCaseFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(2, &field, witness);
+      descriptors->Set(2, &field);
     }
     {
       // ECMA-262, section 15.10.7.4.
@@ -983,7 +973,7 @@
                             JSRegExp::kMultilineFieldIndex,
                             final,
                             enum_index++);
-      descriptors->Set(3, &field, witness);
+      descriptors->Set(3, &field);
     }
     {
       // ECMA-262, section 15.10.7.5.
@@ -993,10 +983,10 @@
                             JSRegExp::kLastIndexFieldIndex,
                             writable,
                             enum_index++);
-      descriptors->Set(4, &field, witness);
+      descriptors->Set(4, &field);
     }
     descriptors->SetNextEnumerationIndex(enum_index);
-    descriptors->Sort(witness);
+    descriptors->Sort();
 
     initial_map->set_inobject_properties(5);
     initial_map->set_pre_allocated_property_fields(5);
@@ -1005,42 +995,18 @@
         initial_map->instance_size() + 5 * kPointerSize);
     initial_map->set_instance_descriptors(*descriptors);
     initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
-
-    // RegExp prototype object is itself a RegExp.
-    Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
-    proto_map->set_prototype(global_context()->initial_object_prototype());
-    Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
-    proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
-                                 heap->empty_string());
-    proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
-                                 heap->false_value());
-    proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
-                                 heap->false_value());
-    proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
-                                 heap->false_value());
-    proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
-                                 Smi::FromInt(0),
-                                 SKIP_WRITE_BARRIER);  // It's a Smi.
-    initial_map->set_prototype(*proto);
-    factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
-                                   JSRegExp::IRREGEXP, factory->empty_string(),
-                                   JSRegExp::Flags(0), 0);
   }
 
   {  // -- J S O N
     Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
-    Handle<JSFunction> cons = factory->NewFunction(name,
-                                                   factory->the_hole_value());
-    { MaybeObject* result = cons->SetInstancePrototype(
-        global_context()->initial_object_prototype());
-      if (result->IsFailure()) return false;
-    }
+    Handle<JSFunction> cons = factory->NewFunction(
+        name,
+        factory->the_hole_value());
+    cons->SetInstancePrototype(global_context()->initial_object_prototype());
     cons->SetInstanceClassName(*name);
     Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
     ASSERT(json_object->IsJSObject());
-    CHECK_NOT_EMPTY_HANDLE(isolate,
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                                 global, name, json_object, DONT_ENUM));
+    SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
     global_context()->set_json_object(*json_object);
   }
 
@@ -1070,23 +1036,21 @@
     global_context()->set_arguments_boilerplate(*result);
     // Note: length must be added as the first property and
     //       callee must be added as the second property.
-    CHECK_NOT_EMPTY_HANDLE(isolate,
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                               result, factory->length_symbol(),
-                               factory->undefined_value(), DONT_ENUM));
-    CHECK_NOT_EMPTY_HANDLE(isolate,
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                               result, factory->callee_symbol(),
-                               factory->undefined_value(), DONT_ENUM));
+    SetLocalPropertyNoThrow(result, factory->length_symbol(),
+                            factory->undefined_value(),
+                            DONT_ENUM);
+    SetLocalPropertyNoThrow(result, factory->callee_symbol(),
+                            factory->undefined_value(),
+                            DONT_ENUM);
 
 #ifdef DEBUG
-    LookupResult lookup(isolate);
+    LookupResult lookup;
     result->LocalLookup(heap->callee_symbol(), &lookup);
-    ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
 
     result->LocalLookup(heap->length_symbol(), &lookup);
-    ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
 
     ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
@@ -1099,6 +1063,11 @@
   }
 
   {  // --- aliased_arguments_boilerplate_
+    Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
+    Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
+    new_map->set_pre_allocated_property_fields(2);
+    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
+    new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
     // Set up a well-formed parameter map to make assertions happy.
     Handle<FixedArray> elements = factory->NewFixedArray(2);
     elements->set_map(heap->non_strict_arguments_elements_map());
@@ -1107,16 +1076,7 @@
     elements->set(0, *array);
     array = factory->NewFixedArray(0);
     elements->set(1, *array);
-
-    Handle<Map> old_map(global_context()->arguments_boilerplate()->map());
-    Handle<Map> new_map = factory->CopyMapDropTransitions(old_map);
-    new_map->set_pre_allocated_property_fields(2);
-    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
-    // Set elements kind after allocating the object because
-    // NewJSObjectFromMap assumes a fast elements map.
-    new_map->set_elements_kind(NON_STRICT_ARGUMENTS_ELEMENTS);
     result->set_elements(*elements);
-    ASSERT(result->HasNonStrictArgumentsElements());
     global_context()->set_aliased_arguments_boilerplate(*result);
   }
 
@@ -1125,34 +1085,33 @@
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
     // Create the ThrowTypeError functions.
-    Handle<AccessorPair> callee = factory->NewAccessorPair();
-    Handle<AccessorPair> caller = factory->NewAccessorPair();
+    Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
+    Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
 
     Handle<JSFunction> throw_function =
         GetThrowTypeErrorFunction();
 
     // Install the ThrowTypeError functions.
-    callee->set_getter(*throw_function);
-    callee->set_setter(*throw_function);
-    caller->set_getter(*throw_function);
-    caller->set_setter(*throw_function);
+    callee->set(0, *throw_function);
+    callee->set(1, *throw_function);
+    caller->set(0, *throw_function);
+    caller->set(1, *throw_function);
 
     // Create the descriptor array for the arguments object.
     Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
-    DescriptorArray::WhitenessWitness witness(*descriptors);
     {  // length
       FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
-      descriptors->Set(0, &d, witness);
+      descriptors->Set(0, &d);
     }
     {  // callee
       CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
-      descriptors->Set(1, &d, witness);
+      descriptors->Set(1, &d);
     }
     {  // caller
       CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
-      descriptors->Set(2, &d, witness);
+      descriptors->Set(2, &d);
     }
-    descriptors->Sort(witness);
+    descriptors->Sort();
 
     // Create the map. Allocate one in-object field for length.
     Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
@@ -1172,15 +1131,14 @@
     global_context()->set_strict_mode_arguments_boilerplate(*result);
 
     // Add length property only for strict mode boilerplate.
-    CHECK_NOT_EMPTY_HANDLE(isolate,
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                               result, factory->length_symbol(),
-                               factory->undefined_value(), DONT_ENUM));
+    SetLocalPropertyNoThrow(result, factory->length_symbol(),
+                            factory->undefined_value(),
+                            DONT_ENUM);
 
 #ifdef DEBUG
-    LookupResult lookup(isolate);
+    LookupResult lookup;
     result->LocalLookup(heap->length_symbol(), &lookup);
-    ASSERT(lookup.IsFound() && (lookup.type() == FIELD));
+    ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
     ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
 
     ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1237,15 +1195,6 @@
 
   // Initialize the data slot.
   global_context()->set_data(heap->undefined_value());
-
-  {
-    // Initialize the random seed slot.
-    Handle<ByteArray> zeroed_byte_array(
-        factory->NewByteArray(kRandomStateSize));
-    global_context()->set_random_seed(*zeroed_byte_array);
-    memset(zeroed_byte_array->GetDataStartAddress(), 0, kRandomStateSize);
-  }
-  return true;
 }
 
 
@@ -1253,26 +1202,12 @@
   Handle<JSObject> global = Handle<JSObject>(global_context()->global());
 
   // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
-  // longer need to live behind a flag, so functions get added to the snapshot.
-  if (FLAG_harmony_collections) {
-    {  // -- S e t
-      Handle<JSObject> prototype =
-          factory()->NewJSObject(isolate()->object_function(), TENURED);
-      InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
-                      prototype, Builtins::kIllegal, true);
-    }
-    {  // -- M a p
-      Handle<JSObject> prototype =
-          factory()->NewJSObject(isolate()->object_function(), TENURED);
-      InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
-                      prototype, Builtins::kIllegal, true);
-    }
-    {  // -- W e a k M a p
-      Handle<JSObject> prototype =
-          factory()->NewJSObject(isolate()->object_function(), TENURED);
-      InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
-                      prototype, Builtins::kIllegal, true);
-    }
+  // longer need to live behind a flag, so WeakMap gets added to the snapshot.
+  if (FLAG_harmony_weakmaps) {  // -- W e a k M a p
+    Handle<JSObject> prototype =
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
+    InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
+                    prototype, Builtins::kIllegal, true);
   }
 }
 
@@ -1301,12 +1236,6 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate->debugger()->set_compiling_natives(true);
 #endif
-  // During genesis, the boilerplate for stack overflow won't work until the
-  // environment has been at least partially initialized. Add a stack check
-  // before entering JS code to catch overflow early.
-  StackLimitCheck check(Isolate::Current());
-  if (check.HasOverflowed()) return false;
-
   bool result = CompileScriptCached(name,
                                     source,
                                     NULL,
@@ -1350,7 +1279,7 @@
     if (cache != NULL) cache->Add(name, function_info);
   }
 
-  // Set up the function context. Conceptually, we should clone the
+  // Setup the function context. Conceptually, we should clone the
   // function before overwriting the context but since we're in a
   // single-threaded environment it is not strictly necessary.
   ASSERT(top_context->IsGlobalContext());
@@ -1398,8 +1327,6 @@
                  configure_instance_fun);
   INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
   INSTALL_NATIVE(JSObject, "functionCache", function_cache);
-  INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
-                 to_complete_property_descriptor);
 }
 
 void Genesis::InstallExperimentalNativeFunctions() {
@@ -1407,7 +1334,6 @@
     INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
     INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
     INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
-    INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
   }
 }
 
@@ -1437,7 +1363,7 @@
   builtins->set_global_context(*global_context());
   builtins->set_global_receiver(*builtins);
 
-  // Set up the 'global' properties of the builtins object. The
+  // Setup the 'global' properties of the builtins object. The
   // 'global' property that refers to the global object is the only
   // way to get from code running in the builtins context to the
   // global object.
@@ -1445,11 +1371,9 @@
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
   Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
   Handle<Object> global_obj(global_context()->global());
-  CHECK_NOT_EMPTY_HANDLE(isolate(),
-                         JSObject::SetLocalPropertyIgnoreAttributes(
-                             builtins, global_symbol, global_obj, attributes));
+  SetLocalPropertyNoThrow(builtins, global_symbol, global_obj, attributes);
 
-  // Set up the reference from the global object to the builtins object.
+  // Setup the reference from the global object to the builtins object.
   JSGlobalObject::cast(global_context()->global())->set_builtins(*builtins);
 
   // Create a bridge function that has context in the global context.
@@ -1612,13 +1536,16 @@
     // doesn't inherit from Object.prototype.
     // To be used only for internal work by builtins. Instances
     // must not be leaked to user code.
+    // Only works correctly when called as a constructor. The normal
+    // Array code uses Array.prototype as prototype when called as
+    // a function.
     Handle<JSFunction> array_function =
         InstallFunction(builtins,
                         "InternalArray",
                         JS_ARRAY_TYPE,
                         JSArray::kSize,
                         isolate()->initial_object_prototype(),
-                        Builtins::kInternalArrayCode,
+                        Builtins::kArrayCode,
                         true);
     Handle<JSObject> prototype =
         factory()->NewJSObject(isolate()->object_function(), TENURED);
@@ -1628,18 +1555,6 @@
         isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
     array_function->shared()->DontAdaptArguments();
 
-    // InternalArrays should not use Smi-Only array optimizations. There are too
-    // many places in the C++ runtime code (e.g. RegEx) that assume that
-    // elements in InternalArrays can be set to non-Smi values without going
-    // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
-    // transition easy to trap. Moreover, they rarely are smi-only.
-    MaybeObject* maybe_map =
-        array_function->initial_map()->CopyDropTransitions();
-    Map* new_map;
-    if (!maybe_map->To<Map>(&new_map)) return false;
-    new_map->set_elements_kind(FAST_ELEMENTS);
-    array_function->set_initial_map(new_map);
-
     // Make "length" magic on instances.
     Handle<DescriptorArray> array_descriptors =
         factory()->CopyAppendForeignDescriptor(
@@ -1650,8 +1565,6 @@
 
     array_function->initial_map()->set_instance_descriptors(
         *array_descriptors);
-
-    global_context()->set_internal_array_function(*array_function);
   }
 
   if (FLAG_disable_native_files) {
@@ -1673,7 +1586,7 @@
   InstallNativeFunctions();
 
   // Store the map for the string prototype after the natives has been compiled
-  // and the String function has been set up.
+  // and the String function has been setup.
   Handle<JSFunction> string_function(global_context()->string_function());
   ASSERT(JSObject::cast(
       string_function->initial_map()->prototype())->HasFastProperties());
@@ -1735,17 +1648,15 @@
     initial_map->set_prototype(*array_prototype);
 
     // Update map with length accessor from Array and add "index" and "input".
+    Handle<Map> array_map(global_context()->js_array_map());
+    Handle<DescriptorArray> array_descriptors(
+        array_map->instance_descriptors());
+    ASSERT_EQ(1, array_descriptors->number_of_descriptors());
+
     Handle<DescriptorArray> reresult_descriptors =
         factory()->NewDescriptorArray(3);
-    DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
 
-    JSFunction* array_function = global_context()->array_function();
-    Handle<DescriptorArray> array_descriptors(
-        array_function->initial_map()->instance_descriptors());
-    int index = array_descriptors->SearchWithCache(heap()->length_symbol());
-    MaybeObject* copy_result =
-        reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
-    if (copy_result->IsFailure()) return false;
+    reresult_descriptors->CopyFrom(0, *array_descriptors, 0);
 
     int enum_index = 0;
     {
@@ -1753,7 +1664,7 @@
                                   JSRegExpResult::kIndexIndex,
                                   NONE,
                                   enum_index++);
-      reresult_descriptors->Set(1, &index_field, witness);
+      reresult_descriptors->Set(1, &index_field);
     }
 
     {
@@ -1761,9 +1672,9 @@
                                   JSRegExpResult::kInputIndex,
                                   NONE,
                                   enum_index++);
-      reresult_descriptors->Set(2, &input_field, witness);
+      reresult_descriptors->Set(2, &input_field);
     }
-    reresult_descriptors->Sort(witness);
+    reresult_descriptors->Sort();
 
     initial_map->set_inobject_properties(2);
     initial_map->set_pre_allocated_property_fields(2);
@@ -1790,9 +1701,9 @@
                "native proxy.js") == 0) {
       if (!CompileExperimentalBuiltin(isolate(), i)) return false;
     }
-    if (FLAG_harmony_collections &&
+    if (FLAG_harmony_weakmaps &&
         strcmp(ExperimentalNatives::GetScriptName(i).start(),
-               "native collection.js") == 0) {
+               "native weakmap.js") == 0) {
       if (!CompileExperimentalBuiltin(isolate(), i)) return false;
     }
   }
@@ -1910,28 +1821,25 @@
 
 
 void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
-  Isolate* isolate = global_context->GetIsolate();
-  Factory* factory = isolate->factory();
+  Factory* factory = global_context->GetIsolate()->factory();
   HandleScope scope;
-  Handle<JSGlobalObject> global(JSGlobalObject::cast(global_context->global()));
+  Handle<JSGlobalObject> js_global(
+      JSGlobalObject::cast(global_context->global()));
   // Expose the natives in global if a name for it is specified.
   if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
-    Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
-    CHECK_NOT_EMPTY_HANDLE(isolate,
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                               global, natives,
-                               Handle<JSObject>(global->builtins()),
-                               DONT_ENUM));
+    Handle<String> natives_string =
+        factory->LookupAsciiSymbol(FLAG_expose_natives_as);
+    SetLocalPropertyNoThrow(js_global, natives_string,
+                            Handle<JSObject>(js_global->builtins()), DONT_ENUM);
   }
 
-  Handle<Object> Error = GetProperty(global, "Error");
+  Handle<Object> Error = GetProperty(js_global, "Error");
   if (Error->IsJSObject()) {
     Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
-    Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit));
-    CHECK_NOT_EMPTY_HANDLE(isolate,
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                               Handle<JSObject>::cast(Error), name,
-                               stack_trace_limit, NONE));
+    SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
+                            name,
+                            Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
+                            NONE);
   }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1950,39 +1858,11 @@
     Handle<String> debug_string =
         factory->LookupAsciiSymbol(FLAG_expose_debug_as);
     Handle<Object> global_proxy(debug->debug_context()->global_proxy());
-    CHECK_NOT_EMPTY_HANDLE(isolate,
-                           JSObject::SetLocalPropertyIgnoreAttributes(
-                               global, debug_string, global_proxy, DONT_ENUM));
+    SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
   }
 #endif
 }
 
-static uint32_t Hash(RegisteredExtension* extension) {
-  return v8::internal::ComputePointerHash(extension);
-}
-
-static bool MatchRegisteredExtensions(void* key1, void* key2) {
-  return key1 == key2;
-}
-
-Genesis::ExtensionStates::ExtensionStates()
-  : map_(MatchRegisteredExtensions, 8) { }
-
-Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
-    RegisteredExtension* extension) {
-  i::HashMap::Entry* entry = map_.Lookup(extension, Hash(extension), false);
-  if (entry == NULL) {
-    return UNVISITED;
-  }
-  return static_cast<ExtensionTraversalState>(
-      reinterpret_cast<intptr_t>(entry->value));
-}
-
-void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
-                                         ExtensionTraversalState state) {
-  map_.Lookup(extension, Hash(extension), true)->value =
-      reinterpret_cast<void*>(static_cast<intptr_t>(state));
-}
 
 bool Genesis::InstallExtensions(Handle<Context> global_context,
                                 v8::ExtensionConfiguration* extensions) {
@@ -1990,27 +1870,29 @@
   //                 effort. (The external API reads 'ignore'-- does that mean
   //                 we can break the interface?)
 
-
-  ExtensionStates extension_states;  // All extensions have state UNVISITED.
-  // Install auto extensions.
+  // Clear coloring of extension list
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   while (current != NULL) {
+    current->set_state(v8::UNVISITED);
+    current = current->next();
+  }
+  // Install auto extensions.
+  current = v8::RegisteredExtension::first_extension();
+  while (current != NULL) {
     if (current->extension()->auto_enable())
-      InstallExtension(current, &extension_states);
+      InstallExtension(current);
     current = current->next();
   }
 
-  if (FLAG_expose_gc) InstallExtension("v8/gc", &extension_states);
-  if (FLAG_expose_externalize_string) {
-    InstallExtension("v8/externalize", &extension_states);
-  }
+  if (FLAG_expose_gc) InstallExtension("v8/gc");
+  if (FLAG_expose_externalize_string) InstallExtension("v8/externalize");
 
   if (extensions == NULL) return true;
   // Install required extensions
   int count = v8::ImplementationUtilities::GetNameCount(extensions);
   const char** names = v8::ImplementationUtilities::GetNames(extensions);
   for (int i = 0; i < count; i++) {
-    if (!InstallExtension(names[i], &extension_states))
+    if (!InstallExtension(names[i]))
       return false;
   }
 
@@ -2020,8 +1902,7 @@
 
 // Installs a named extension.  This methods is unoptimized and does
 // not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(const char* name,
-                               ExtensionStates* extension_states) {
+bool Genesis::InstallExtension(const char* name) {
   v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
   // Loop until we find the relevant extension
   while (current != NULL) {
@@ -2034,52 +1915,42 @@
         "v8::Context::New()", "Cannot find required extension");
     return false;
   }
-  return InstallExtension(current, extension_states);
+  return InstallExtension(current);
 }
 
 
-bool Genesis::InstallExtension(v8::RegisteredExtension* current,
-                               ExtensionStates* extension_states) {
+bool Genesis::InstallExtension(v8::RegisteredExtension* current) {
   HandleScope scope;
 
-  if (extension_states->get_state(current) == INSTALLED) return true;
+  if (current->state() == v8::INSTALLED) return true;
   // The current node has already been visited so there must be a
   // cycle in the dependency graph; fail.
-  if (extension_states->get_state(current) == VISITED) {
+  if (current->state() == v8::VISITED) {
     v8::Utils::ReportApiFailure(
         "v8::Context::New()", "Circular extension dependency");
     return false;
   }
-  ASSERT(extension_states->get_state(current) == UNVISITED);
-  extension_states->set_state(current, VISITED);
+  ASSERT(current->state() == v8::UNVISITED);
+  current->set_state(v8::VISITED);
   v8::Extension* extension = current->extension();
   // Install the extension's dependencies
   for (int i = 0; i < extension->dependency_count(); i++) {
-    if (!InstallExtension(extension->dependencies()[i], extension_states))
-      return false;
+    if (!InstallExtension(extension->dependencies()[i])) return false;
   }
   Isolate* isolate = Isolate::Current();
-  Handle<String> source_code =
-      isolate->factory()->NewExternalStringFromAscii(extension->source());
-  bool result = CompileScriptCached(
-      CStrVector(extension->name()),
-      source_code,
-      isolate->bootstrapper()->extensions_cache(),
-      extension,
-      Handle<Context>(isolate->context()),
-      false);
+  Vector<const char> source = CStrVector(extension->source());
+  Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
+  bool result = CompileScriptCached(CStrVector(extension->name()),
+                                    source_code,
+                                    isolate->bootstrapper()->extensions_cache(),
+                                    extension,
+                                    Handle<Context>(isolate->context()),
+                                    false);
   ASSERT(isolate->has_pending_exception() != result);
   if (!result) {
-    // We print out the name of the extension that fail to install.
-    // When an error is thrown during bootstrapping we automatically print
-    // the line number at which this happened to the console in the isolate
-    // error throwing functionality.
-    OS::PrintError("Error installing extension '%s'.\n",
-                   current->extension()->name());
     isolate->clear_pending_exception();
   }
-  extension_states->set_state(current, INSTALLED);
-  isolate->NotifyExtensionInstalled();
+  current->set_state(v8::INSTALLED);
   return result;
 }
 
@@ -2096,9 +1967,7 @@
     builtins->set_javascript_builtin(id, *function);
     Handle<SharedFunctionInfo> shared
         = Handle<SharedFunctionInfo>(function->shared());
-    if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
-      return false;
-    }
+    if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
     // Set the code object on the function object.
     function->ReplaceCode(function->shared()->code());
     builtins->set_javascript_builtin_code(id, shared->code());
@@ -2166,9 +2035,7 @@
           Handle<String> key = Handle<String>(descs->GetKey(i));
           int index = descs->GetFieldIndex(i);
           Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
-          CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
-                                 JSObject::SetLocalPropertyIgnoreAttributes(
-                                     to, key, value, details.attributes()));
+          SetLocalPropertyNoThrow(to, key, value, details.attributes());
           break;
         }
         case CONSTANT_FUNCTION: {
@@ -2176,13 +2043,11 @@
           Handle<String> key = Handle<String>(descs->GetKey(i));
           Handle<JSFunction> fun =
               Handle<JSFunction>(descs->GetConstantFunction(i));
-          CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
-                                 JSObject::SetLocalPropertyIgnoreAttributes(
-                                     to, key, fun, details.attributes()));
+          SetLocalPropertyNoThrow(to, key, fun, details.attributes());
           break;
         }
         case CALLBACKS: {
-          LookupResult result(isolate());
+          LookupResult result;
           to->LocalLookup(descs->GetKey(i), &result);
           // If the property is already there we skip it
           if (result.IsProperty()) continue;
@@ -2193,7 +2058,7 @@
           Handle<Object> callbacks(descs->GetCallbacksObject(i));
           PropertyDetails d =
               PropertyDetails(details.attributes(), CALLBACKS, details.index());
-          JSObject::SetNormalizedProperty(to, key, callbacks, d);
+          SetNormalizedProperty(to, key, callbacks, d);
           break;
         }
         case MAP_TRANSITION:
@@ -2220,7 +2085,7 @@
       if (properties->IsKey(raw_key)) {
         ASSERT(raw_key->IsString());
         // If the property is already there we skip it.
-        LookupResult result(isolate());
+        LookupResult result;
         to->LocalLookup(String::cast(raw_key), &result);
         if (result.IsProperty()) continue;
         // Set the property.
@@ -2230,9 +2095,7 @@
           value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
         }
         PropertyDetails details = properties->DetailsAt(i);
-        CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
-                               JSObject::SetLocalPropertyIgnoreAttributes(
-                                   to, key, value, details.attributes()));
+        SetLocalPropertyNoThrow(to, key, value, details.attributes());
       }
     }
   }
@@ -2295,12 +2158,6 @@
   HandleScope scope;
   SaveContext saved_context(isolate);
 
-  // During genesis, the boilerplate for stack overflow won't work until the
-  // environment has been at least partially initialized. Add a stack check
-  // before entering JS code to catch overflow early.
-  StackLimitCheck check(Isolate::Current());
-  if (check.HasOverflowed()) return;
-
   Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
   if (!new_context.is_null()) {
     global_context_ =
@@ -2327,7 +2184,7 @@
     Handle<JSGlobalProxy> global_proxy =
         CreateNewGlobals(global_template, global_object, &inner_global);
     HookUpGlobalProxy(inner_global, global_proxy);
-    if (!InitializeGlobal(inner_global, empty_function)) return;
+    InitializeGlobal(inner_global, empty_function);
     InstallJSFunctionResultCaches();
     InitializeNormalizedMapCaches();
     if (!InstallNatives()) return;
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 101c2e1..abf61b9 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -88,7 +88,7 @@
 // context.
 class Bootstrapper {
  public:
-  // Requires: Heap::SetUp has been called.
+  // Requires: Heap::Setup has been called.
   void Initialize(bool create_heap_objects);
   void TearDown();
 
diff --git a/src/builtins.cc b/src/builtins.cc
index 0f493e6..e6a0699 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,8 +33,6 @@
 #include "builtins.h"
 #include "gdb-jit.h"
 #include "ic-inl.h"
-#include "heap-profiler.h"
-#include "mark-compact.h"
 #include "vm-state-inl.h"
 
 namespace v8 {
@@ -185,46 +183,39 @@
 }
 
 
-static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
-                                           Isolate* isolate,
-                                           JSFunction* constructor) {
+BUILTIN(ArrayCodeGeneric) {
   Heap* heap = isolate->heap();
   isolate->counters()->array_function_runtime()->Increment();
 
   JSArray* array;
   if (CalledAsConstructor(isolate)) {
-    array = JSArray::cast((*args)[0]);
-    // Initialize elements and length in case later allocations fail so that the
-    // array object is initialized in a valid state.
-    array->set_length(Smi::FromInt(0));
-    array->set_elements(heap->empty_fixed_array());
-    if (!FLAG_smi_only_arrays) {
-      Context* global_context = isolate->context()->global_context();
-      if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
-          !global_context->object_js_array_map()->IsUndefined()) {
-        array->set_map(Map::cast(global_context->object_js_array_map()));
-      }
-    }
+    array = JSArray::cast(*args.receiver());
   } else {
     // Allocate the JS Array
-    MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
-    if (!maybe_obj->To(&array)) return maybe_obj;
+    JSFunction* constructor =
+        isolate->context()->global_context()->array_function();
+    Object* obj;
+    { MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
+      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    }
+    array = JSArray::cast(obj);
   }
 
+  // 'array' now contains the JSArray we should initialize.
+  ASSERT(array->HasFastElements());
+
   // Optimize the case where there is one argument and the argument is a
   // small smi.
-  if (args->length() == 2) {
-    Object* obj = (*args)[1];
+  if (args.length() == 2) {
+    Object* obj = args[1];
     if (obj->IsSmi()) {
       int len = Smi::cast(obj)->value();
       if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
-        Object* fixed_array;
+        Object* obj;
         { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
-          if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
+          if (!maybe_obj->ToObject(&obj)) return maybe_obj;
         }
-        // We do not use SetContent to skip the unnecessary elements type check.
-        array->set_elements(FixedArray::cast(fixed_array));
-        array->set_length(Smi::cast(obj));
+        array->SetContent(FixedArray::cast(obj));
         return array;
       }
     }
@@ -232,82 +223,78 @@
     { MaybeObject* maybe_obj = array->Initialize(0);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
-    return array->SetElementsLength((*args)[1]);
+    return array->SetElementsLength(args[1]);
   }
 
   // Optimize the case where there are no parameters passed.
-  if (args->length() == 1) {
+  if (args.length() == 1) {
     return array->Initialize(JSArray::kPreallocatedArrayElements);
   }
 
-  // Set length and elements on the array.
-  int number_of_elements = args->length() - 1;
-  MaybeObject* maybe_object =
-      array->EnsureCanContainElements(args, 1, number_of_elements,
-                                      ALLOW_CONVERTED_DOUBLE_ELEMENTS);
-  if (maybe_object->IsFailure()) return maybe_object;
-
-  // Allocate an appropriately typed elements array.
-  MaybeObject* maybe_elms;
-  ElementsKind elements_kind = array->GetElementsKind();
-  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-    maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
-        number_of_elements);
-  } else {
-    maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+  // Take the arguments as elements.
+  int number_of_elements = args.length() - 1;
+  Smi* len = Smi::FromInt(number_of_elements);
+  Object* obj;
+  { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
-  FixedArrayBase* elms;
-  if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
 
+  AssertNoAllocation no_gc;
+  FixedArray* elms = FixedArray::cast(obj);
+  WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
   // Fill in the content
-  switch (array->GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS: {
-      FixedArray* smi_elms = FixedArray::cast(elms);
-      for (int index = 0; index < number_of_elements; index++) {
-        smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
-      }
-      break;
-    }
-    case FAST_ELEMENTS: {
-      AssertNoAllocation no_gc;
-      WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
-      FixedArray* object_elms = FixedArray::cast(elms);
-      for (int index = 0; index < number_of_elements; index++) {
-        object_elms->set(index, (*args)[index+1], mode);
-      }
-      break;
-    }
-    case FAST_DOUBLE_ELEMENTS: {
-      FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
-      for (int index = 0; index < number_of_elements; index++) {
-        double_elms->set(index, (*args)[index+1]->Number());
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
+  for (int index = 0; index < number_of_elements; index++) {
+    elms->set(index, args[index+1], mode);
   }
 
-  array->set_elements(elms);
-  array->set_length(Smi::FromInt(number_of_elements));
+  // Set length and elements on the array.
+  array->set_elements(FixedArray::cast(obj));
+  array->set_length(len);
+
   return array;
 }
 
 
-BUILTIN(InternalArrayCodeGeneric) {
-  return ArrayCodeGenericCommon(
-      &args,
-      isolate,
-      isolate->context()->global_context()->internal_array_function());
+MUST_USE_RESULT static MaybeObject* AllocateJSArray(Heap* heap) {
+  JSFunction* array_function =
+      heap->isolate()->context()->global_context()->array_function();
+  Object* result;
+  { MaybeObject* maybe_result = heap->AllocateJSObject(array_function);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  return result;
 }
 
 
-BUILTIN(ArrayCodeGeneric) {
-  return ArrayCodeGenericCommon(
-      &args,
-      isolate,
-      isolate->context()->global_context()->array_function());
+MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray(Heap* heap) {
+  Object* result;
+  { MaybeObject* maybe_result = AllocateJSArray(heap);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  JSArray* result_array = JSArray::cast(result);
+  result_array->set_length(Smi::FromInt(0));
+  result_array->set_elements(heap->empty_fixed_array());
+  return result_array;
+}
+
+
+static void CopyElements(Heap* heap,
+                         AssertNoAllocation* no_gc,
+                         FixedArray* dst,
+                         int dst_index,
+                         FixedArray* src,
+                         int src_index,
+                         int len) {
+  ASSERT(dst != src);  // Use MoveElements instead.
+  ASSERT(dst->map() != HEAP->fixed_cow_array_map());
+  ASSERT(len > 0);
+  CopyWords(dst->data_start() + dst_index,
+            src->data_start() + src_index,
+            len);
+  WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
+  if (mode == UPDATE_WRITE_BARRIER) {
+    heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
+  }
 }
 
 
@@ -318,7 +305,6 @@
                          FixedArray* src,
                          int src_index,
                          int len) {
-  if (len == 0) return;
   ASSERT(dst->map() != HEAP->fixed_cow_array_map());
   memmove(dst->data_start() + dst_index,
           src->data_start() + src_index,
@@ -327,7 +313,6 @@
   if (mode == UPDATE_WRITE_BARRIER) {
     heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
   }
-  heap->incremental_marking()->RecordWrites(dst);
 }
 
 
@@ -373,16 +358,6 @@
   former_start[to_trim] = heap->fixed_array_map();
   former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
 
-  // Maintain marking consistency for HeapObjectIterator and
-  // IncrementalMarking.
-  int size_delta = to_trim * kPointerSize;
-  if (heap->marking()->TransferMark(elms->address(),
-                                    elms->address() + size_delta)) {
-    MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
-  }
-
-  HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
-                                     elms->address() + size_delta));
   return FixedArray::cast(HeapObject::FromAddress(
       elms->address() + to_trim * kPointerSize));
 }
@@ -394,6 +369,9 @@
   // This method depends on non writability of Object and Array prototype
   // fields.
   if (array_proto->elements() != heap->empty_fixed_array()) return false;
+  // Hidden prototype
+  array_proto = JSObject::cast(array_proto->GetPrototype());
+  ASSERT(array_proto->elements() == heap->empty_fixed_array());
   // Object.prototype
   Object* proto = array_proto->GetPrototype();
   if (proto == heap->null_value()) return false;
@@ -406,43 +384,20 @@
 
 MUST_USE_RESULT
 static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
-    Heap* heap, Object* receiver, Arguments* args, int first_added_arg) {
+    Heap* heap, Object* receiver) {
   if (!receiver->IsJSArray()) return NULL;
   JSArray* array = JSArray::cast(receiver);
   HeapObject* elms = array->elements();
-  Map* map = elms->map();
-  if (map == heap->fixed_array_map()) {
-    if (args == NULL || !array->HasFastSmiOnlyElements()) {
-      return elms;
-    }
-  } else if (map == heap->fixed_cow_array_map()) {
-    MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
-    if (args == NULL || !array->HasFastSmiOnlyElements() ||
-        maybe_writable_result->IsFailure()) {
-      return maybe_writable_result;
-    }
-  } else {
-    return NULL;
+  if (elms->map() == heap->fixed_array_map()) return elms;
+  if (elms->map() == heap->fixed_cow_array_map()) {
+    return array->EnsureWritableFastElements();
   }
-
-  // Need to ensure that the arguments passed in args can be contained in
-  // the array.
-  int args_length = args->length();
-  if (first_added_arg >= args_length) return array->elements();
-
-  MaybeObject* maybe_array = array->EnsureCanContainElements(
-      args,
-      first_added_arg,
-      args_length - first_added_arg,
-      DONT_ALLOW_DOUBLE_ELEMENTS);
-  if (maybe_array->IsFailure()) return maybe_array;
-  return array->elements();
+  return NULL;
 }
 
 
 static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
                                                      JSArray* receiver) {
-  if (!FLAG_clever_optimizations) return false;
   Context* global_context = heap->isolate()->context()->global_context();
   JSObject* array_proto =
       JSObject::cast(global_context->array_function()->prototype());
@@ -458,18 +413,20 @@
   HandleScope handleScope(isolate);
 
   Handle<Object> js_builtin =
-      GetProperty(Handle<JSObject>(isolate->global_context()->builtins()),
-                  name);
-  Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
-  int argc = args.length() - 1;
-  ScopedVector<Handle<Object> > argv(argc);
-  for (int i = 0; i < argc; ++i) {
-    argv[i] = args.at<Object>(i + 1);
+      GetProperty(Handle<JSObject>(
+          isolate->global_context()->builtins()),
+          name);
+  ASSERT(js_builtin->IsJSFunction());
+  Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
+  ScopedVector<Object**> argv(args.length() - 1);
+  int n_args = args.length() - 1;
+  for (int i = 0; i < n_args; i++) {
+    argv[i] = args.at<Object>(i + 1).location();
   }
-  bool pending_exception;
+  bool pending_exception = false;
   Handle<Object> result = Execution::Call(function,
                                           args.receiver(),
-                                          argc,
+                                          n_args,
                                           argv.start(),
                                           &pending_exception);
   if (pending_exception) return Failure::Exception();
@@ -482,7 +439,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
+        EnsureJSArrayWithWritableFastElements(heap, receiver);
     if (maybe_elms_obj == NULL) {
       return CallJsBuiltin(isolate, "ArrayPush", args);
     }
@@ -511,11 +468,14 @@
     }
     FixedArray* new_elms = FixedArray::cast(obj);
 
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                               new_elms, FAST_ELEMENTS, 0, len);
+    AssertNoAllocation no_gc;
+    if (len > 0) {
+      CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
+    }
     FillWithHoles(heap, new_elms, new_length, capacity);
 
     elms = new_elms;
+    array->set_elements(elms);
   }
 
   // Add the provided values.
@@ -525,10 +485,6 @@
     elms->set(index + len, args[index + 1], mode);
   }
 
-  if (elms != array->elements()) {
-    array->set_elements(elms);
-  }
-
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
   return Smi::FromInt(new_length);
@@ -540,7 +496,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+        EnsureJSArrayWithWritableFastElements(heap, receiver);
     if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
   }
@@ -573,7 +529,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+        EnsureJSArrayWithWritableFastElements(heap, receiver);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArrayShift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -583,7 +539,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastTypeElements());
+  ASSERT(array->HasFastElements());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -595,7 +551,9 @@
   }
 
   if (!heap->lo_space()->Contains(elms)) {
-    array->set_elements(LeftTrimFixedArray(heap, elms, 1));
+    // As elms still in the same space they used to be,
+    // there is no need to update region dirty mark.
+    array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
   } else {
     // Shift the elements.
     AssertNoAllocation no_gc;
@@ -615,7 +573,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+        EnsureJSArrayWithWritableFastElements(heap, receiver);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArrayUnshift", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -625,7 +583,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastTypeElements());
+  ASSERT(array->HasFastElements());
 
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
@@ -634,11 +592,6 @@
   // we should never hit this case.
   ASSERT(to_add <= (Smi::kMaxValue - len));
 
-  MaybeObject* maybe_object =
-      array->EnsureCanContainElements(&args, 1, to_add,
-                                      DONT_ALLOW_DOUBLE_ELEMENTS);
-  if (maybe_object->IsFailure()) return maybe_object;
-
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
@@ -647,9 +600,13 @@
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* new_elms = FixedArray::cast(obj);
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                               new_elms, FAST_ELEMENTS, to_add, len);
+
+    AssertNoAllocation no_gc;
+    if (len > 0) {
+      CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
+    }
     FillWithHoles(heap, new_elms, new_length, capacity);
+
     elms = new_elms;
     array->set_elements(elms);
   } else {
@@ -677,7 +634,7 @@
   int len = -1;
   if (receiver->IsJSArray()) {
     JSArray* array = JSArray::cast(receiver);
-    if (!array->HasFastTypeElements() ||
+    if (!array->HasFastElements() ||
         !IsJSArrayFastElementMovingAllowed(heap, array)) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -693,7 +650,7 @@
     bool is_arguments_object_with_fast_elements =
         receiver->IsJSObject()
         && JSObject::cast(receiver)->map() == arguments_map
-        && JSObject::cast(receiver)->HasFastTypeElements();
+        && JSObject::cast(receiver)->HasFastElements();
     if (!is_arguments_object_with_fast_elements) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -746,22 +703,32 @@
   int final = (relative_end < 0) ? Max(len + relative_end, 0)
                                  : Min(relative_end, len);
 
-  ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
-
   // Calculate the length of result array.
-  int result_len = Max(final - k, 0);
+  int result_len = final - k;
+  if (result_len <= 0) {
+    return AllocateEmptyJSArray(heap);
+  }
 
-  MaybeObject* maybe_array =
-      heap->AllocateJSArrayAndStorage(elements_kind,
-                                      result_len,
-                                      result_len);
-  JSArray* result_array;
-  if (!maybe_array->To(&result_array)) return maybe_array;
+  Object* result;
+  { MaybeObject* maybe_result = AllocateJSArray(heap);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  JSArray* result_array = JSArray::cast(result);
 
-  CopyObjectToObjectElements(elms, FAST_ELEMENTS, k,
-                             FixedArray::cast(result_array->elements()),
-                             FAST_ELEMENTS, 0, result_len);
+  { MaybeObject* maybe_result =
+        heap->AllocateUninitializedFixedArray(result_len);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  FixedArray* result_elms = FixedArray::cast(result);
 
+  AssertNoAllocation no_gc;
+  CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
+
+  // Set elements.
+  result_array->set_elements(result_elms);
+
+  // Set the length.
+  result_array->set_length(Smi::FromInt(result_len));
   return result_array;
 }
 
@@ -771,7 +738,7 @@
   Object* receiver = *args.receiver();
   Object* elms_obj;
   { MaybeObject* maybe_elms_obj =
-        EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
+        EnsureJSArrayWithWritableFastElements(heap, receiver);
     if (maybe_elms_obj == NULL)
         return CallJsBuiltin(isolate, "ArraySplice", args);
     if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
@@ -781,7 +748,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastTypeElements());
+  ASSERT(array->HasFastElements());
 
   int len = Smi::cast(array->length())->value();
 
@@ -822,25 +789,45 @@
   }
 
   JSArray* result_array = NULL;
-  ElementsKind elements_kind =
-      JSObject::cast(receiver)->GetElementsKind();
-  MaybeObject* maybe_array =
-      heap->AllocateJSArrayAndStorage(elements_kind,
-                                      actual_delete_count,
-                                      actual_delete_count);
-  if (!maybe_array->To(&result_array)) return maybe_array;
+  if (actual_delete_count == 0) {
+    Object* result;
+    { MaybeObject* maybe_result = AllocateEmptyJSArray(heap);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+    result_array = JSArray::cast(result);
+  } else {
+    // Allocate result array.
+    Object* result;
+    { MaybeObject* maybe_result = AllocateJSArray(heap);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+    result_array = JSArray::cast(result);
 
-  {
+    { MaybeObject* maybe_result =
+          heap->AllocateUninitializedFixedArray(actual_delete_count);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+    FixedArray* result_elms = FixedArray::cast(result);
+
+    AssertNoAllocation no_gc;
     // Fill newly created array.
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, actual_start,
-                               FixedArray::cast(result_array->elements()),
-                               FAST_ELEMENTS, 0, actual_delete_count);
+    CopyElements(heap,
+                 &no_gc,
+                 result_elms, 0,
+                 elms, actual_start,
+                 actual_delete_count);
+
+    // Set elements.
+    result_array->set_elements(result_elms);
+
+    // Set the length.
+    result_array->set_length(Smi::FromInt(actual_delete_count));
   }
 
   int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
+
   int new_length = len - actual_delete_count + item_count;
 
-  bool elms_changed = false;
   if (item_count < actual_delete_count) {
     // Shrink the array.
     const bool trim_array = !heap->lo_space()->Contains(elms) &&
@@ -849,14 +836,13 @@
     if (trim_array) {
       const int delta = actual_delete_count - item_count;
 
-      {
+      if (actual_start > 0) {
         AssertNoAllocation no_gc;
         MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
       }
 
       elms = LeftTrimFixedArray(heap, elms, delta);
-
-      elms_changed = true;
+      array->set_elements(elms, SKIP_WRITE_BARRIER);
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -881,21 +867,22 @@
       }
       FixedArray* new_elms = FixedArray::cast(obj);
 
-      {
-        // Copy the part before actual_start as is.
-        CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                                   new_elms, FAST_ELEMENTS, 0, actual_start);
-        const int to_copy = len - actual_delete_count - actual_start;
-        CopyObjectToObjectElements(elms, FAST_ELEMENTS,
-                                   actual_start + actual_delete_count,
-                                   new_elms, FAST_ELEMENTS,
-                                   actual_start + item_count, to_copy);
+      AssertNoAllocation no_gc;
+      // Copy the part before actual_start as is.
+      if (actual_start > 0) {
+        CopyElements(heap, &no_gc, new_elms, 0, elms, 0, actual_start);
       }
-
+      const int to_copy = len - actual_delete_count - actual_start;
+      if (to_copy > 0) {
+        CopyElements(heap, &no_gc,
+                     new_elms, actual_start + item_count,
+                     elms, actual_start + actual_delete_count,
+                     to_copy);
+      }
       FillWithHoles(heap, new_elms, new_length, capacity);
 
       elms = new_elms;
-      elms_changed = true;
+      array->set_elements(elms);
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -911,10 +898,6 @@
     elms->set(k, args[3 + k - actual_start], mode);
   }
 
-  if (elms_changed) {
-    array->set_elements(elms);
-  }
-
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
 
@@ -935,10 +918,9 @@
   // and calculating total length.
   int n_arguments = args.length();
   int result_len = 0;
-  ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
   for (int i = 0; i < n_arguments; i++) {
     Object* arg = args[i];
-    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
+    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
         || JSArray::cast(arg)->GetPrototype() != array_proto) {
       return CallJsBuiltin(isolate, "ArrayConcat", args);
     }
@@ -955,35 +937,43 @@
     if (result_len > FixedArray::kMaxLength) {
       return CallJsBuiltin(isolate, "ArrayConcat", args);
     }
+  }
 
-    if (!JSArray::cast(arg)->HasFastSmiOnlyElements()) {
-      elements_kind = FAST_ELEMENTS;
-    }
+  if (result_len == 0) {
+    return AllocateEmptyJSArray(heap);
   }
 
   // Allocate result.
-  JSArray* result_array;
-  MaybeObject* maybe_array =
-      heap->AllocateJSArrayAndStorage(elements_kind,
-                                      result_len,
-                                      result_len);
-  if (!maybe_array->To(&result_array)) return maybe_array;
-  if (result_len == 0) return result_array;
+  Object* result;
+  { MaybeObject* maybe_result = AllocateJSArray(heap);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  JSArray* result_array = JSArray::cast(result);
+
+  { MaybeObject* maybe_result =
+        heap->AllocateUninitializedFixedArray(result_len);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  FixedArray* result_elms = FixedArray::cast(result);
 
   // Copy data.
+  AssertNoAllocation no_gc;
   int start_pos = 0;
-  FixedArray* result_elms(FixedArray::cast(result_array->elements()));
   for (int i = 0; i < n_arguments; i++) {
     JSArray* array = JSArray::cast(args[i]);
     int len = Smi::cast(array->length())->value();
-    FixedArray* elms = FixedArray::cast(array->elements());
-    CopyObjectToObjectElements(elms, FAST_ELEMENTS, 0,
-                               result_elms, FAST_ELEMENTS,
-                               start_pos, len);
-    start_pos += len;
+    if (len > 0) {
+      FixedArray* elms = FixedArray::cast(array->elements());
+      CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
+      start_pos += len;
+    }
   }
   ASSERT(start_pos == result_len);
 
+  // Set the length and elements.
+  result_array->set_length(Smi::FromInt(result_len));
+  result_array->set_elements(result_elms);
+
   return result_array;
 }
 
@@ -1458,14 +1448,6 @@
   KeyedStoreIC::GenerateNonStrictArguments(masm);
 }
 
-static void Generate_TransitionElementsSmiToDouble(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateTransitionElementsSmiToDouble(masm);
-}
-
-static void Generate_TransitionElementsDoubleToObject(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateTransitionElementsDoubleToObject(masm);
-}
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
   Debug::GenerateLoadICDebugBreak(masm);
@@ -1487,30 +1469,18 @@
 }
 
 
+static void Generate_ConstructCall_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateConstructCallDebugBreak(masm);
+}
+
+
 static void Generate_Return_DebugBreak(MacroAssembler* masm) {
   Debug::GenerateReturnDebugBreak(masm);
 }
 
 
-static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateCallFunctionStubDebugBreak(masm);
-}
-
-
-static void Generate_CallFunctionStub_Recording_DebugBreak(
-    MacroAssembler* masm) {
-  Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
-}
-
-
-static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
-  Debug::GenerateCallConstructStubDebugBreak(masm);
-}
-
-
-static void Generate_CallConstructStub_Recording_DebugBreak(
-    MacroAssembler* masm) {
-  Debug::GenerateCallConstructStubRecordDebugBreak(masm);
+static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
+  Debug::GenerateStubNoRegistersDebugBreak(masm);
 }
 
 
@@ -1567,30 +1537,30 @@
   BuiltinExtraArguments extra_args;
 };
 
-#define BUILTIN_FUNCTION_TABLE_INIT { V8_ONCE_INIT, {} }
-
 class BuiltinFunctionTable {
  public:
-  BuiltinDesc* functions() {
-    CallOnce(&once_, &Builtins::InitBuiltinFunctionTable);
-    return functions_;
+  BuiltinFunctionTable() {
+    Builtins::InitBuiltinFunctionTable();
   }
 
-  OnceType once_;
-  BuiltinDesc functions_[Builtins::builtin_count + 1];
+  static const BuiltinDesc* functions() { return functions_; }
+
+ private:
+  static BuiltinDesc functions_[Builtins::builtin_count + 1];
 
   friend class Builtins;
 };
 
-static BuiltinFunctionTable builtin_function_table =
-    BUILTIN_FUNCTION_TABLE_INIT;
+BuiltinDesc BuiltinFunctionTable::functions_[Builtins::builtin_count + 1];
+
+static const BuiltinFunctionTable builtin_function_table_init;
 
 // Define array of pointers to generators and C builtin functions.
 // We do this in a sort of roundabout way so that we can do the initialization
 // within the lexical scope of Builtins:: and within a context where
 // Code::Flags names a non-abstract type.
 void Builtins::InitBuiltinFunctionTable() {
-  BuiltinDesc* functions = builtin_function_table.functions_;
+  BuiltinDesc* functions = BuiltinFunctionTable::functions_;
   functions[builtin_count].generator = NULL;
   functions[builtin_count].c_code = NULL;
   functions[builtin_count].s_name = NULL;
@@ -1626,7 +1596,7 @@
 #undef DEF_FUNCTION_PTR_A
 }
 
-void Builtins::SetUp(bool create_heap_objects) {
+void Builtins::Setup(bool create_heap_objects) {
   ASSERT(!initialized_);
   Isolate* isolate = Isolate::Current();
   Heap* heap = isolate->heap();
@@ -1634,25 +1604,23 @@
   // Create a scope for the handles in the builtins.
   HandleScope scope(isolate);
 
-  const BuiltinDesc* functions = builtin_function_table.functions();
+  const BuiltinDesc* functions = BuiltinFunctionTable::functions();
 
   // For now we generate builtin adaptor code into a stack-allocated
-  // buffer, before copying it into individual code objects. Be careful
-  // with alignment, some platforms don't like unaligned code.
-  union { int force_alignment; byte buffer[4*KB]; } u;
+  // buffer, before copying it into individual code objects.
+  byte buffer[4*KB];
 
   // Traverse the list of builtins and generate an adaptor in a
   // separate code object for each one.
   for (int i = 0; i < builtin_count; i++) {
     if (create_heap_objects) {
-      MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
+      MacroAssembler masm(isolate, buffer, sizeof buffer);
       // Generate the code/adaptor.
       typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
       Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
       // We pass all arguments to the generator, but it may not use all of
       // them.  This works because the first arguments are on top of the
       // stack.
-      ASSERT(!masm.has_frame());
       g(&masm, functions[i].name, functions[i].extra_args);
       // Move the code into the object heap.
       CodeDesc desc;
diff --git a/src/builtins.h b/src/builtins.h
index f079139..31090d3 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -44,7 +44,6 @@
                                                                     \
   V(EmptyFunction, NO_EXTRA_ARGUMENTS)                              \
                                                                     \
-  V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS)                   \
   V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS)                           \
                                                                     \
   V(ArrayPush, NO_EXTRA_ARGUMENTS)                                  \
@@ -67,6 +66,8 @@
 #define BUILTIN_LIST_A(V)                                               \
   V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
+  V(JSConstructCall,                BUILTIN, UNINITIALIZED,             \
+                                    Code::kNoExtraICState)              \
   V(JSConstructStubCountdown,       BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
   V(JSConstructStubGeneric,         BUILTIN, UNINITIALIZED,             \
@@ -166,10 +167,6 @@
                                     kStrictMode)                        \
   V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC,       \
                                      Code::kNoExtraICState)             \
-  V(TransitionElementsSmiToDouble,  BUILTIN, UNINITIALIZED,             \
-                                    Code::kNoExtraICState)              \
-  V(TransitionElementsDoubleToObject, BUILTIN, UNINITIALIZED,           \
-                                      Code::kNoExtraICState)            \
                                                                         \
   /* Uses KeyedLoadIC_Initialize; must be after in list. */             \
   V(FunctionCall,                   BUILTIN, UNINITIALIZED,             \
@@ -177,8 +174,6 @@
   V(FunctionApply,                  BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
                                                                         \
-  V(InternalArrayCode,              BUILTIN, UNINITIALIZED,             \
-                                    Code::kNoExtraICState)              \
   V(ArrayCode,                      BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
   V(ArrayConstructCode,             BUILTIN, UNINITIALIZED,             \
@@ -193,31 +188,27 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 // Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V)                                 \
-  V(Return_DebugBreak,                         BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(CallFunctionStub_DebugBreak,               BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(CallFunctionStub_Recording_DebugBreak,     BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(CallConstructStub_DebugBreak,              BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(CallConstructStub_Recording_DebugBreak,    BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(LoadIC_DebugBreak,                         LOAD_IC, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(KeyedLoadIC_DebugBreak,                    KEYED_LOAD_IC, DEBUG_BREAK,    \
-                                               Code::kNoExtraICState)         \
-  V(StoreIC_DebugBreak,                        STORE_IC, DEBUG_BREAK,         \
-                                               Code::kNoExtraICState)         \
-  V(KeyedStoreIC_DebugBreak,                   KEYED_STORE_IC, DEBUG_BREAK,   \
-                                               Code::kNoExtraICState)         \
-  V(Slot_DebugBreak,                           BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(PlainReturn_LiveEdit,                      BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)         \
-  V(FrameDropper_LiveEdit,                     BUILTIN, DEBUG_BREAK,          \
-                                               Code::kNoExtraICState)
+#define BUILTIN_LIST_DEBUG_A(V)                                \
+  V(Return_DebugBreak,          BUILTIN, DEBUG_BREAK,          \
+                                Code::kNoExtraICState)         \
+  V(ConstructCall_DebugBreak,   BUILTIN, DEBUG_BREAK,          \
+                                Code::kNoExtraICState)         \
+  V(StubNoRegisters_DebugBreak, BUILTIN, DEBUG_BREAK,          \
+                                Code::kNoExtraICState)         \
+  V(LoadIC_DebugBreak,          LOAD_IC, DEBUG_BREAK,          \
+                                Code::kNoExtraICState)         \
+  V(KeyedLoadIC_DebugBreak,     KEYED_LOAD_IC, DEBUG_BREAK,    \
+                                Code::kNoExtraICState)         \
+  V(StoreIC_DebugBreak,         STORE_IC, DEBUG_BREAK,         \
+                                Code::kNoExtraICState)         \
+  V(KeyedStoreIC_DebugBreak,    KEYED_STORE_IC, DEBUG_BREAK,   \
+                                Code::kNoExtraICState)         \
+  V(Slot_DebugBreak,            BUILTIN, DEBUG_BREAK,          \
+                                Code::kNoExtraICState)         \
+  V(PlainReturn_LiveEdit,       BUILTIN, DEBUG_BREAK,          \
+                                Code::kNoExtraICState)         \
+  V(FrameDropper_LiveEdit,      BUILTIN, DEBUG_BREAK,          \
+                                Code::kNoExtraICState)
 #else
 #define BUILTIN_LIST_DEBUG_A(V)
 #endif
@@ -243,6 +234,7 @@
   V(DELETE, 2)                           \
   V(IN, 1)                               \
   V(INSTANCE_OF, 1)                      \
+  V(GET_KEYS, 0)                         \
   V(FILTER_KEY, 1)                       \
   V(CALL_NON_FUNCTION, 0)                \
   V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
@@ -267,7 +259,7 @@
 
   // Generate all builtin code objects. Should be called once during
   // isolate initialization.
-  void SetUp(bool create_heap_objects);
+  void Setup(bool create_heap_objects);
   void TearDown();
 
   // Garbage collection support.
@@ -348,6 +340,7 @@
   static void Generate_Adaptor(MacroAssembler* masm,
                                CFunctionId id,
                                BuiltinExtraArguments extra_args);
+  static void Generate_JSConstructCall(MacroAssembler* masm);
   static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
   static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
   static void Generate_JSConstructStubApi(MacroAssembler* masm);
@@ -363,7 +356,6 @@
   static void Generate_FunctionCall(MacroAssembler* masm);
   static void Generate_FunctionApply(MacroAssembler* masm);
 
-  static void Generate_InternalArrayCode(MacroAssembler* masm);
   static void Generate_ArrayCode(MacroAssembler* masm);
   static void Generate_ArrayConstructCode(MacroAssembler* masm);
 
diff --git a/src/bytecodes-irregexp.h b/src/bytecodes-irregexp.h
index b13efb3..93218ea 100644
--- a/src/bytecodes-irregexp.h
+++ b/src/bytecodes-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,12 +33,12 @@
 namespace internal {
 
 
-const int BYTECODE_MASK = 0xff;
+static const int BYTECODE_MASK = 0xff;
 // The first argument is packed in with the byte code in one word, but so it
 // has 24 bits, but it can be positive and negative so only use 23 bits for
 // positive values.
-const unsigned int MAX_FIRST_ARG = 0x7fffffu;
-const int BYTECODE_SHIFT = 8;
+static const unsigned int MAX_FIRST_ARG = 0x7fffffu;
+static const int BYTECODE_SHIFT = 8;
 
 #define BYTECODE_ITERATOR(V)                                                   \
 V(BREAK,              0, 4)   /* bc8                                        */ \
diff --git a/src/cached-powers.cc b/src/cached-powers.cc
index 9241d26..30a67a6 100644
--- a/src/cached-powers.cc
+++ b/src/cached-powers.cc
@@ -134,12 +134,14 @@
 };
 
 static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
-static const int kCachedPowersOffset = 348;  // -1 * the first decimal_exponent.
+static const int kCachedPowersOffset = -kCachedPowers[0].decimal_exponent;
 static const double kD_1_LOG2_10 = 0.30102999566398114;  //  1 / lg(10)
-// Difference between the decimal exponents in the table above.
-const int PowersOfTenCache::kDecimalExponentDistance = 8;
-const int PowersOfTenCache::kMinDecimalExponent = -348;
-const int PowersOfTenCache::kMaxDecimalExponent = 340;
+const int PowersOfTenCache::kDecimalExponentDistance =
+    kCachedPowers[1].decimal_exponent - kCachedPowers[0].decimal_exponent;
+const int PowersOfTenCache::kMinDecimalExponent =
+    kCachedPowers[0].decimal_exponent;
+const int PowersOfTenCache::kMaxDecimalExponent =
+    kCachedPowers[kCachedPowersLength - 1].decimal_exponent;
 
 void PowersOfTenCache::GetCachedPowerForBinaryExponentRange(
     int min_exponent,
diff --git a/src/char-predicates-inl.h b/src/char-predicates-inl.h
index 1a89ef3..0dfc80d 100644
--- a/src/char-predicates-inl.h
+++ b/src/char-predicates-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,7 +52,7 @@
 }
 
 
-inline bool IsInRange(int value, int lower_limit, int higher_limit) {
+static inline bool IsInRange(int value, int lower_limit, int higher_limit) {
   ASSERT(lower_limit <= higher_limit);
   return static_cast<unsigned int>(value - lower_limit) <=
       static_cast<unsigned int>(higher_limit - lower_limit);
diff --git a/src/char-predicates.h b/src/char-predicates.h
index b97191f..5a901a2 100644
--- a/src/char-predicates.h
+++ b/src/char-predicates.h
@@ -57,8 +57,6 @@
   static inline bool Is(uc32 c) {
     return IdentifierStart::Is(c)
         || unibrow::Number::Is(c)
-        || c == 0x200C  // U+200C is Zero-Width Non-Joiner.
-        || c == 0x200D  // U+200D is Zero-Width Joiner.
         || unibrow::CombiningMark::Is(c)
         || unibrow::ConnectorPunctuation::Is(c);
   }
diff --git a/src/checks.h b/src/checks.h
index 608aa14..2f359f6 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -51,20 +51,26 @@
 #endif
 
 
+// Used by the CHECK macro -- should not be called directly.
+static inline void CheckHelper(const char* file,
+                               int line,
+                               const char* source,
+                               bool condition) {
+  if (!condition)
+    V8_Fatal(file, line, "CHECK(%s) failed", source);
+}
+
+
 // The CHECK macro checks that the given condition is true; if not, it
 // prints a message to stderr and aborts.
-#define CHECK(condition) do {                                       \
-    if (!(condition)) {                                             \
-      V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
-    }                                                               \
-  } while (0)
+#define CHECK(condition) CheckHelper(__FILE__, __LINE__, #condition, condition)
 
 
 // Helper function used by the CHECK_EQ function when given int
 // arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
-                              const char* expected_source, int expected,
-                              const char* value_source, int value) {
+static inline void CheckEqualsHelper(const char* file, int line,
+                                     const char* expected_source, int expected,
+                                     const char* value_source, int value) {
   if (expected != value) {
     V8_Fatal(file, line,
              "CHECK_EQ(%s, %s) failed\n#   Expected: %i\n#   Found: %i",
@@ -75,11 +81,11 @@
 
 // Helper function used by the CHECK_EQ function when given int64_t
 // arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
-                              const char* expected_source,
-                              int64_t expected,
-                              const char* value_source,
-                              int64_t value) {
+static inline void CheckEqualsHelper(const char* file, int line,
+                                     const char* expected_source,
+                                     int64_t expected,
+                                     const char* value_source,
+                                     int64_t value) {
   if (expected != value) {
     // Print int64_t values in hex, as two int32s,
     // to avoid platform-dependencies.
@@ -97,12 +103,12 @@
 
 // Helper function used by the CHECK_NE function when given int
 // arguments.  Should not be called directly.
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* unexpected_source,
-                                 int unexpected,
-                                 const char* value_source,
-                                 int value) {
+static inline void CheckNonEqualsHelper(const char* file,
+                                        int line,
+                                        const char* unexpected_source,
+                                        int unexpected,
+                                        const char* value_source,
+                                        int value) {
   if (unexpected == value) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
              unexpected_source, value_source, value);
@@ -112,12 +118,12 @@
 
 // Helper function used by the CHECK function when given string
 // arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              const char* expected,
-                              const char* value_source,
-                              const char* value) {
+static inline void CheckEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     const char* expected,
+                                     const char* value_source,
+                                     const char* value) {
   if ((expected == NULL && value != NULL) ||
       (expected != NULL && value == NULL) ||
       (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
@@ -128,12 +134,12 @@
 }
 
 
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* expected_source,
-                                 const char* expected,
-                                 const char* value_source,
-                                 const char* value) {
+static inline void CheckNonEqualsHelper(const char* file,
+                                        int line,
+                                        const char* expected_source,
+                                        const char* expected,
+                                        const char* value_source,
+                                        const char* value) {
   if (expected == value ||
       (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
@@ -144,12 +150,12 @@
 
 // Helper function used by the CHECK function when given pointer
 // arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              const void* expected,
-                              const char* value_source,
-                              const void* value) {
+static inline void CheckEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     const void* expected,
+                                     const char* value_source,
+                                     const void* value) {
   if (expected != value) {
     V8_Fatal(file, line,
              "CHECK_EQ(%s, %s) failed\n#   Expected: %p\n#   Found: %p",
@@ -159,12 +165,12 @@
 }
 
 
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* expected_source,
-                                 const void* expected,
-                                 const char* value_source,
-                                 const void* value) {
+static inline void CheckNonEqualsHelper(const char* file,
+                                        int line,
+                                        const char* expected_source,
+                                        const void* expected,
+                                        const char* value_source,
+                                        const void* value) {
   if (expected == value) {
     V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
              expected_source, value_source, value);
@@ -174,12 +180,12 @@
 
 // Helper function used by the CHECK function when given floating
 // point arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              double expected,
-                              const char* value_source,
-                              double value) {
+static inline void CheckEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     double expected,
+                                     const char* value_source,
+                                     double value) {
   // Force values to 64 bit memory to truncate 80 bit precision on IA32.
   volatile double* exp = new double[1];
   *exp = expected;
@@ -195,12 +201,12 @@
 }
 
 
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* expected_source,
-                                 double expected,
-                                 const char* value_source,
-                                 double value) {
+static inline void CheckNonEqualsHelper(const char* file,
+                                     int line,
+                                     const char* expected_source,
+                                     double expected,
+                                     const char* value_source,
+                                     double value) {
   // Force values to 64 bit memory to truncate 80 bit precision on IA32.
   volatile double* exp = new double[1];
   *exp = expected;
@@ -251,22 +257,25 @@
     SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__)
 
 
-extern bool FLAG_enable_slow_asserts;
+namespace v8 { namespace internal {
 
+bool EnableSlowAsserts();
+
+} }  // namespace v8::internal
 
 // The ASSERT macro is equivalent to CHECK except that it only
 // generates code in debug builds.
 #ifdef DEBUG
-#define ASSERT_RESULT(expr)    CHECK(expr)
-#define ASSERT(condition)      CHECK(condition)
-#define ASSERT_EQ(v1, v2)      CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2)      CHECK_NE(v1, v2)
-#define ASSERT_GE(v1, v2)      CHECK_GE(v1, v2)
-#define ASSERT_LT(v1, v2)      CHECK_LT(v1, v2)
-#define ASSERT_LE(v1, v2)      CHECK_LE(v1, v2)
-#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition))
+#define ASSERT_RESULT(expr)  CHECK(expr)
+#define ASSERT(condition)    CHECK(condition)
+#define ASSERT_EQ(v1, v2)    CHECK_EQ(v1, v2)
+#define ASSERT_NE(v1, v2)    CHECK_NE(v1, v2)
+#define ASSERT_GE(v1, v2)    CHECK_GE(v1, v2)
+#define ASSERT_LT(v1, v2)    CHECK_LT(v1, v2)
+#define ASSERT_LE(v1, v2)    CHECK_LE(v1, v2)
+#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
 #else
-#define ASSERT_RESULT(expr)    (expr)
+#define ASSERT_RESULT(expr)     (expr)
 #define ASSERT(condition)      ((void) 0)
 #define ASSERT_EQ(v1, v2)      ((void) 0)
 #define ASSERT_NE(v1, v2)      ((void) 0)
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 11016c8..724445e 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,12 +52,11 @@
   // Update the static counter each time a new code stub is generated.
   masm->isolate()->counters()->code_stubs()->Increment();
 
-  // Nested stubs are not allowed for leaves.
-  AllowStubCallsScope allow_scope(masm, false);
+  // Nested stubs are not allowed for leafs.
+  AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
 
   // Generate the code for the stub.
   masm->set_generating_stub(true);
-  NoCurrentFrameScope scope(masm);
   Generate(masm);
 }
 
@@ -101,14 +100,7 @@
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
   Code* code;
-  if (UseSpecialCache()
-      ? FindCodeInSpecialCache(&code)
-      : FindCodeInCache(&code)) {
-    ASSERT(IsPregenerated() == code->is_pregenerated());
-    return Handle<Code>(code);
-  }
-
-  {
+  if (!FindCodeInCache(&code)) {
     HandleScope scope(isolate);
 
     // Generate the new code.
@@ -126,28 +118,61 @@
     Handle<Code> new_object = factory->NewCode(
         desc, flags, masm.CodeObject(), NeedsImmovableCode());
     RecordCodeGeneration(*new_object, &masm);
-    FinishCode(new_object);
+    FinishCode(*new_object);
 
-    if (UseSpecialCache()) {
-      AddToSpecialCache(new_object);
-    } else {
-      // Update the dictionary and the root in Heap.
-      Handle<UnseededNumberDictionary> dict =
-          factory->DictionaryAtNumberPut(
-              Handle<UnseededNumberDictionary>(heap->code_stubs()),
-              GetKey(),
-              new_object);
-      heap->public_set_code_stubs(*dict);
-    }
+    // Update the dictionary and the root in Heap.
+    Handle<UnseededNumberDictionary> dict =
+        factory->DictionaryAtNumberPut(
+            Handle<UnseededNumberDictionary>(heap->code_stubs()),
+            GetKey(),
+            new_object);
+    heap->public_set_code_stubs(*dict);
+
     code = *new_object;
   }
 
-  Activate(code);
   ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
   return Handle<Code>(code, isolate);
 }
 
 
+MaybeObject* CodeStub::TryGetCode() {
+  Code* code;
+  if (!FindCodeInCache(&code)) {
+    // Generate the new code.
+    MacroAssembler masm(Isolate::Current(), NULL, 256);
+    GenerateCode(&masm);
+    Heap* heap = masm.isolate()->heap();
+
+    // Create the code object.
+    CodeDesc desc;
+    masm.GetCode(&desc);
+
+    // Try to copy the generated code into a heap object.
+    Code::Flags flags = Code::ComputeFlags(
+        static_cast<Code::Kind>(GetCodeKind()),
+        GetICState());
+    Object* new_object;
+    { MaybeObject* maybe_new_object =
+          heap->CreateCode(desc, flags, masm.CodeObject());
+      if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
+    }
+    code = Code::cast(new_object);
+    RecordCodeGeneration(code, &masm);
+    FinishCode(code);
+
+    // Try to update the code cache but do not fail if unable.
+    MaybeObject* maybe_new_object =
+        heap->code_stubs()->AtNumberPut(GetKey(), code);
+    if (maybe_new_object->ToObject(&new_object)) {
+      heap->public_set_code_stubs(UnseededNumberDictionary::cast(new_object));
+    }
+  }
+
+  return code;
+}
+
+
 const char* CodeStub::MajorName(CodeStub::Major major_key,
                                 bool allow_unknown_keys) {
   switch (major_key) {
@@ -163,37 +188,6 @@
 }
 
 
-void CodeStub::PrintName(StringStream* stream) {
-  stream->Add("%s", MajorName(MajorKey(), false));
-}
-
-
-void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
-  ASSERT(*known_map_ != NULL);
-  Isolate* isolate = new_object->GetIsolate();
-  Factory* factory = isolate->factory();
-  return Map::UpdateCodeCache(known_map_,
-                              factory->compare_ic_symbol(),
-                              new_object);
-}
-
-
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
-  Isolate* isolate = known_map_->GetIsolate();
-  Factory* factory = isolate->factory();
-  Code::Flags flags = Code::ComputeFlags(
-      static_cast<Code::Kind>(GetCodeKind()),
-      UNINITIALIZED);
-  Handle<Object> probe(
-      known_map_->FindInCodeCache(*factory->compare_ic_symbol(), flags));
-  if (probe->IsCode()) {
-    *code_out = Code::cast(*probe);
-    return true;
-  }
-  return false;
-}
-
-
 int ICCompareStub::MinorKey() {
   return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
 }
@@ -219,10 +213,6 @@
     case CompareIC::OBJECTS:
       GenerateObjects(masm);
       break;
-    case CompareIC::KNOWN_OBJECTS:
-      ASSERT(*known_map_ != NULL);
-      GenerateKnownObjects(masm);
-      break;
     default:
       UNREACHABLE();
   }
@@ -252,18 +242,9 @@
 }
 
 
-void JSEntryStub::FinishCode(Handle<Code> code) {
-  Handle<FixedArray> handler_table =
-      code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
-  handler_table->set(0, Smi::FromInt(handler_offset_));
-  code->set_handler_table(*handler_table);
-}
-
-
 void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
       KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
       break;
     case FAST_DOUBLE_ELEMENTS:
@@ -293,17 +274,11 @@
 void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS: {
-      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
-                                                       is_js_array_,
-                                                       elements_kind_,
-                                                       grow_mode_);
-    }
+      KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
       break;
     case FAST_DOUBLE_ELEMENTS:
       KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
-                                                             is_js_array_,
-                                                             grow_mode_);
+                                                             is_js_array_);
       break;
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -327,26 +302,24 @@
 
 
 void ArgumentsAccessStub::PrintName(StringStream* stream) {
-  stream->Add("ArgumentsAccessStub_");
+  const char* type_name = NULL;  // Make g++ happy.
   switch (type_) {
-    case READ_ELEMENT: stream->Add("ReadElement"); break;
-    case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
-    case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
-    case NEW_STRICT: stream->Add("NewStrict"); break;
+    case READ_ELEMENT: type_name = "ReadElement"; break;
+    case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
+    case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
+    case NEW_STRICT: type_name = "NewStrict"; break;
   }
+  stream->Add("ArgumentsAccessStub_%s", type_name);
 }
 
 
 void CallFunctionStub::PrintName(StringStream* stream) {
-  stream->Add("CallFunctionStub_Args%d", argc_);
-  if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
-  if (RecordCallTarget()) stream->Add("_Recording");
-}
-
-
-void CallConstructStub::PrintName(StringStream* stream) {
-  stream->Add("CallConstructStub");
-  if (RecordCallTarget()) stream->Add("_Recording");
+  const char* flags_name = NULL;  // Make g++ happy.
+  switch (flags_) {
+    case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
+    case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
+  }
+  stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
 }
 
 
@@ -429,32 +402,4 @@
 }
 
 
-void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
-  Label fail;
-  if (!FLAG_trace_elements_transitions) {
-    if (to_ == FAST_ELEMENTS) {
-      if (from_ == FAST_SMI_ONLY_ELEMENTS) {
-        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
-      } else if (from_ == FAST_DOUBLE_ELEMENTS) {
-        ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
-      } else {
-        UNREACHABLE();
-      }
-      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
-                                                       is_jsarray_,
-                                                       FAST_ELEMENTS,
-                                                       grow_mode_);
-    } else if (from_ == FAST_SMI_ONLY_ELEMENTS && to_ == FAST_DOUBLE_ELEMENTS) {
-      ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
-      KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
-                                                             is_jsarray_,
-                                                             grow_mode_);
-    } else {
-      UNREACHABLE();
-    }
-  }
-  masm->bind(&fail);
-  KeyedStoreIC::GenerateRuntimeSetProperty(masm, strict_mode_);
-}
-
 } }  // namespace v8::internal
diff --git a/src/code-stubs.h b/src/code-stubs.h
index b67e961..64c89b9 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,6 @@
 
 #include "allocation.h"
 #include "globals.h"
-#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -38,7 +37,6 @@
 // List of code stubs used on all platforms.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
   V(CallFunction)                        \
-  V(CallConstruct)                       \
   V(UnaryOp)                             \
   V(BinaryOp)                            \
   V(StringAdd)                           \
@@ -47,23 +45,27 @@
   V(Compare)                             \
   V(CompareIC)                           \
   V(MathPow)                             \
-  V(RecordWrite)                         \
-  V(StoreBufferOverflow)                 \
-  V(RegExpExec)                          \
   V(TranscendentalCache)                 \
   V(Instanceof)                          \
+  /* All stubs above this line only exist in a few versions, which are  */  \
+  /* generated ahead of time.  Therefore compiling a call to one of     */  \
+  /* them can't cause a new stub to be compiled, so compiling a call to */  \
+  /* them is GC safe.  The ones below this line exist in many variants  */  \
+  /* so code compiling a call to one can cause a GC.  This means they   */  \
+  /* can't be called from other stubs, since stub generation code is    */  \
+  /* not GC safe.                                                       */  \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
   V(StackCheck)                          \
-  V(Interrupt)                           \
   V(FastNewClosure)                      \
   V(FastNewContext)                      \
-  V(FastNewBlockContext)                 \
   V(FastCloneShallowArray)               \
-  V(FastCloneShallowObject)              \
+  V(RevertToNumber)                      \
   V(ToBoolean)                           \
   V(ToNumber)                            \
+  V(CounterOp)                           \
   V(ArgumentsAccess)                     \
+  V(RegExpExec)                          \
   V(RegExpConstructResult)               \
   V(NumberToString)                      \
   V(CEntry)                              \
@@ -71,9 +73,7 @@
   V(KeyedLoadElement)                    \
   V(KeyedStoreElement)                   \
   V(DebuggerStatement)                   \
-  V(StringDictionaryLookup)              \
-  V(ElementsTransitionAndStore)          \
-  V(StoreArrayLiteralElement)
+  V(StringDictionaryNegativeLookup)
 
 // List of code stubs only used on ARM platforms.
 #ifdef V8_TARGET_ARCH_ARM
@@ -121,6 +121,11 @@
   // Retrieve the code for the stub. Generate the code if needed.
   Handle<Code> GetCode();
 
+  // Retrieve the code for the stub if already generated.  Do not
+  // generate the code if not already generated and instead return a
+  // retry after GC Failure object.
+  MUST_USE_RESULT MaybeObject* TryGetCode();
+
   static Major MajorKeyFromKey(uint32_t key) {
     return static_cast<Major>(MajorKeyBits::decode(key));
   }
@@ -137,35 +142,14 @@
 
   virtual ~CodeStub() {}
 
-  bool CompilingCallsToThisStubIsGCSafe() {
-    bool is_pregenerated = IsPregenerated();
-    Code* code = NULL;
-    CHECK(!is_pregenerated || FindCodeInCache(&code));
-    return is_pregenerated;
-  }
-
-  // See comment above, where Instanceof is defined.
-  virtual bool IsPregenerated() { return false; }
-
-  static void GenerateStubsAheadOfTime();
-  static void GenerateFPStubs();
-
-  // Some stubs put untagged junk on the stack that cannot be scanned by the
-  // GC.  This means that we must be statically sure that no GC can occur while
-  // they are running.  If that is the case they should override this to return
-  // true, which will cause an assertion if we try to call something that can
-  // GC or if we try to put a stack frame on top of the junk, which would not
-  // result in a traversable stack.
-  virtual bool SometimesSetsUpAFrame() { return true; }
-
-  // Lookup the code in the (possibly custom) cache.
-  bool FindCodeInCache(Code** code_out);
-
  protected:
   static const int kMajorBits = 6;
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
 
  private:
+  // Lookup the code in the (possibly custom) cache.
+  bool FindCodeInCache(Code** code_out);
+
   // Nonvirtual wrapper around the stub-specific Generate function.  Call
   // this function to set up the macro assembler and generate the code.
   void GenerateCode(MacroAssembler* masm);
@@ -178,11 +162,7 @@
   void RecordCodeGeneration(Code* code, MacroAssembler* masm);
 
   // Finish the code object after it has been generated.
-  virtual void FinishCode(Handle<Code> code) { }
-
-  // Activate newly generated stub. Is called after
-  // registering stub in the stub cache.
-  virtual void Activate(Code* code) { }
+  virtual void FinishCode(Code* code) { }
 
   // Returns information for computing the number key.
   virtual Major MajorKey() = 0;
@@ -196,20 +176,11 @@
     return UNINITIALIZED;
   }
 
-  // Add the code to a specialized cache, specific to an individual
-  // stub type. Please note, this method must add the code object to a
-  // roots object, otherwise we will remove the code during GC.
-  virtual void AddToSpecialCache(Handle<Code> new_object) { }
-
-  // Find code in a specialized cache, work is delegated to the specific stub.
-  virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
-
-  // If a stub uses a special cache override this.
-  virtual bool UseSpecialCache() { return false; }
-
   // Returns a name for logging/debugging purposes.
   SmartArrayPointer<const char> GetName();
-  virtual void PrintName(StringStream* stream);
+  virtual void PrintName(StringStream* stream) {
+    stream->Add("%s", MajorName(MajorKey(), false));
+  }
 
   // Returns whether the code generated for this stub needs to be allocated as
   // a fixed (non-moveable) code object.
@@ -222,6 +193,9 @@
            MajorKeyBits::encode(MajorKey());
   }
 
+  // See comment above, where Instanceof is defined.
+  bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
+
   class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
   class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
 
@@ -298,18 +272,6 @@
 };
 
 
-class InterruptStub : public CodeStub {
- public:
-  InterruptStub() { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return Interrupt; }
-  int MinorKey() { return 0; }
-};
-
-
 class ToNumberStub: public CodeStub {
  public:
   ToNumberStub() { }
@@ -324,17 +286,16 @@
 
 class FastNewClosureStub : public CodeStub {
  public:
-  explicit FastNewClosureStub(LanguageMode language_mode)
-    : language_mode_(language_mode) { }
+  explicit FastNewClosureStub(StrictModeFlag strict_mode)
+    : strict_mode_(strict_mode) { }
 
   void Generate(MacroAssembler* masm);
 
  private:
   Major MajorKey() { return FastNewClosure; }
-  int MinorKey() { return language_mode_ == CLASSIC_MODE
-        ? kNonStrictMode : kStrictMode; }
+  int MinorKey() { return strict_mode_; }
 
-  LanguageMode language_mode_;
+  StrictModeFlag strict_mode_;
 };
 
 
@@ -343,7 +304,7 @@
   static const int kMaximumSlots = 64;
 
   explicit FastNewContextStub(int slots) : slots_(slots) {
-    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
+    ASSERT(slots_ > 0 && slots <= kMaximumSlots);
   }
 
   void Generate(MacroAssembler* masm);
@@ -356,24 +317,6 @@
 };
 
 
-class FastNewBlockContextStub : public CodeStub {
- public:
-  static const int kMaximumSlots = 64;
-
-  explicit FastNewBlockContextStub(int slots) : slots_(slots) {
-    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
-  }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  int slots_;
-
-  Major MajorKey() { return FastNewBlockContext; }
-  int MinorKey() { return slots_; }
-};
-
-
 class FastCloneShallowArrayStub : public CodeStub {
  public:
   // Maximum length of copied elements array.
@@ -381,16 +324,14 @@
 
   enum Mode {
     CLONE_ELEMENTS,
-    CLONE_DOUBLE_ELEMENTS,
-    COPY_ON_WRITE_ELEMENTS,
-    CLONE_ANY_ELEMENTS
+    COPY_ON_WRITE_ELEMENTS
   };
 
   FastCloneShallowArrayStub(Mode mode, int length)
       : mode_(mode),
         length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
-    ASSERT_GE(length_, 0);
-    ASSERT_LE(length_, kMaximumClonedLength);
+    ASSERT(length_ >= 0);
+    ASSERT(length_ <= kMaximumClonedLength);
   }
 
   void Generate(MacroAssembler* masm);
@@ -401,32 +342,12 @@
 
   Major MajorKey() { return FastCloneShallowArray; }
   int MinorKey() {
-    ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3);
-    return length_ * 4 +  mode_;
+    ASSERT(mode_ == 0 || mode_ == 1);
+    return (length_ << 1) | mode_;
   }
 };
 
 
-class FastCloneShallowObjectStub : public CodeStub {
- public:
-  // Maximum number of properties in copied object.
-  static const int kMaximumClonedProperties = 6;
-
-  explicit FastCloneShallowObjectStub(int length) : length_(length) {
-    ASSERT_GE(length_, 0);
-    ASSERT_LE(length_, kMaximumClonedProperties);
-  }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  int length_;
-
-  Major MajorKey() { return FastCloneShallowObject; }
-  int MinorKey() { return length_; }
-};
-
-
 class InstanceofStub: public CodeStub {
  public:
   enum Flags {
@@ -467,17 +388,12 @@
 
 class MathPowStub: public CodeStub {
  public:
-  enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
-
-  explicit MathPowStub(ExponentType exponent_type)
-      : exponent_type_(exponent_type) { }
+  MathPowStub() {}
   virtual void Generate(MacroAssembler* masm);
 
  private:
   virtual CodeStub::Major MajorKey() { return MathPow; }
-  virtual int MinorKey() { return exponent_type_; }
-
-  ExponentType exponent_type_;
+  virtual int MinorKey() { return 0; }
 };
 
 
@@ -490,15 +406,11 @@
 
   virtual void Generate(MacroAssembler* masm);
 
-  void set_known_map(Handle<Map> map) { known_map_ = map; }
-
  private:
   class OpField: public BitField<int, 0, 3> { };
   class StateField: public BitField<int, 3, 5> { };
 
-  virtual void FinishCode(Handle<Code> code) {
-    code->set_compare_state(state_);
-  }
+  virtual void FinishCode(Code* code) { code->set_compare_state(state_); }
 
   virtual CodeStub::Major MajorKey() { return CompareIC; }
   virtual int MinorKey();
@@ -511,18 +423,12 @@
   void GenerateStrings(MacroAssembler* masm);
   void GenerateObjects(MacroAssembler* masm);
   void GenerateMiss(MacroAssembler* masm);
-  void GenerateKnownObjects(MacroAssembler* masm);
 
   bool strict() const { return op_ == Token::EQ_STRICT; }
   Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
 
-  virtual void AddToSpecialCache(Handle<Code> new_object);
-  virtual bool FindCodeInSpecialCache(Code** code_out);
-  virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
-
   Token::Value op_;
   CompareIC::State state_;
-  Handle<Map> known_map_;
 };
 
 
@@ -607,7 +513,7 @@
   int MinorKey();
 
   virtual int GetCodeKind() { return Code::COMPARE_IC; }
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_compare_state(CompareIC::GENERIC);
   }
 
@@ -625,18 +531,11 @@
 
 class CEntryStub : public CodeStub {
  public:
-  explicit CEntryStub(int result_size,
-                      SaveFPRegsMode save_doubles = kDontSaveFPRegs)
-      : result_size_(result_size), save_doubles_(save_doubles) { }
+  explicit CEntryStub(int result_size)
+      : result_size_(result_size), save_doubles_(false) { }
 
   void Generate(MacroAssembler* masm);
-
-  // The version of this stub that doesn't save doubles is generated ahead of
-  // time, so it's OK to call it from other stubs that can't cope with GC during
-  // their code generation.  On machines that always have gp registers (x64) we
-  // can generate both variants ahead of time.
-  virtual bool IsPregenerated();
-  static void GenerateAheadOfTime();
+  void SaveDoubles() { save_doubles_ = true; }
 
  private:
   void GenerateCore(MacroAssembler* masm,
@@ -645,10 +544,13 @@
                     Label* throw_out_of_memory_exception,
                     bool do_gc,
                     bool always_allocate_scope);
+  void GenerateThrowTOS(MacroAssembler* masm);
+  void GenerateThrowUncatchable(MacroAssembler* masm,
+                                UncatchableExceptionType type);
 
   // Number of pointers/values returned.
   const int result_size_;
-  SaveFPRegsMode save_doubles_;
+  bool save_doubles_;
 
   Major MajorKey() { return CEntry; }
   int MinorKey();
@@ -669,10 +571,6 @@
  private:
   Major MajorKey() { return JSEntry; }
   int MinorKey() { return 0; }
-
-  virtual void FinishCode(Handle<Code> code);
-
-  int handler_offset_;
 };
 
 
@@ -749,10 +647,6 @@
 
   void Generate(MacroAssembler* masm);
 
-  virtual void FinishCode(Handle<Code> code) {
-    code->set_has_function_cache(RecordCallTarget());
-  }
-
   static int ExtractArgcFromMinorKey(int minor_key) {
     return ArgcBits::decode(minor_key);
   }
@@ -764,8 +658,8 @@
   virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
-  class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
-  class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
+  class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
+  class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
 
   Major MajorKey() { return CallFunction; }
   int MinorKey() {
@@ -776,34 +670,6 @@
   bool ReceiverMightBeImplicit() {
     return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
   }
-
-  bool RecordCallTarget() {
-    return (flags_ & RECORD_CALL_TARGET) != 0;
-  }
-};
-
-
-class CallConstructStub: public CodeStub {
- public:
-  explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
-
-  void Generate(MacroAssembler* masm);
-
-  virtual void FinishCode(Handle<Code> code) {
-    code->set_has_function_cache(RecordCallTarget());
-  }
-
- private:
-  CallFunctionFlags flags_;
-
-  virtual void PrintName(StringStream* stream);
-
-  Major MajorKey() { return CallConstruct; }
-  int MinorKey() { return flags_; }
-
-  bool RecordCallTarget() {
-    return (flags_ & RECORD_CALL_TARGET) != 0;
-  }
 };
 
 
@@ -832,6 +698,7 @@
  public:
   StringCharCodeAtGenerator(Register object,
                             Register index,
+                            Register scratch,
                             Register result,
                             Label* receiver_not_string,
                             Label* index_not_number,
@@ -839,11 +706,15 @@
                             StringIndexFlags index_flags)
       : object_(object),
         index_(index),
+        scratch_(scratch),
         result_(result),
         receiver_not_string_(receiver_not_string),
         index_not_number_(index_not_number),
         index_out_of_range_(index_out_of_range),
         index_flags_(index_flags) {
+    ASSERT(!scratch_.is(object_));
+    ASSERT(!scratch_.is(index_));
+    ASSERT(!scratch_.is(result_));
     ASSERT(!result_.is(object_));
     ASSERT(!result_.is(index_));
   }
@@ -861,6 +732,7 @@
  private:
   Register object_;
   Register index_;
+  Register scratch_;
   Register result_;
 
   Label* receiver_not_string_;
@@ -923,7 +795,8 @@
  public:
   StringCharAtGenerator(Register object,
                         Register index,
-                        Register scratch,
+                        Register scratch1,
+                        Register scratch2,
                         Register result,
                         Label* receiver_not_string,
                         Label* index_not_number,
@@ -931,12 +804,13 @@
                         StringIndexFlags index_flags)
       : char_code_at_generator_(object,
                                 index,
-                                scratch,
+                                scratch1,
+                                scratch2,
                                 receiver_not_string,
                                 index_not_number,
                                 index_out_of_range,
                                 index_flags),
-        char_from_code_generator_(scratch, result) {}
+        char_from_code_generator_(scratch2, result) {}
 
   // Generates the fast case code. On the fallthrough path |result|
   // register contains the result.
@@ -995,29 +869,20 @@
 class KeyedStoreElementStub : public CodeStub {
  public:
   KeyedStoreElementStub(bool is_js_array,
-                        ElementsKind elements_kind,
-                        KeyedAccessGrowMode grow_mode)
-      : is_js_array_(is_js_array),
-        elements_kind_(elements_kind),
-        grow_mode_(grow_mode) { }
+                        ElementsKind elements_kind)
+    : is_js_array_(is_js_array),
+    elements_kind_(elements_kind) { }
 
   Major MajorKey() { return KeyedStoreElement; }
   int MinorKey() {
-    return ElementsKindBits::encode(elements_kind_) |
-        IsJSArrayBits::encode(is_js_array_) |
-        GrowModeBits::encode(grow_mode_);
+    return (is_js_array_ ? 0 : kElementsKindCount) + elements_kind_;
   }
 
   void Generate(MacroAssembler* masm);
 
  private:
-  class ElementsKindBits: public BitField<ElementsKind,    0, 8> {};
-  class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {};
-  class IsJSArrayBits: public BitField<bool,               9, 1> {};
-
   bool is_js_array_;
   ElementsKind elements_kind_;
-  KeyedAccessGrowMode grow_mode_;
 
   DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
 };
@@ -1069,13 +934,11 @@
   virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
   virtual void PrintName(StringStream* stream);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
  private:
   Major MajorKey() { return ToBoolean; }
   int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_to_boolean_state(types_.ToByte());
   }
 
@@ -1089,61 +952,6 @@
   Types types_;
 };
 
-
-class ElementsTransitionAndStoreStub : public CodeStub {
- public:
-  ElementsTransitionAndStoreStub(ElementsKind from,
-                                 ElementsKind to,
-                                 bool is_jsarray,
-                                 StrictModeFlag strict_mode,
-                                 KeyedAccessGrowMode grow_mode)
-      : from_(from),
-        to_(to),
-        is_jsarray_(is_jsarray),
-        strict_mode_(strict_mode),
-        grow_mode_(grow_mode) {}
-
- private:
-  class FromBits:       public BitField<ElementsKind,      0, 8> {};
-  class ToBits:         public BitField<ElementsKind,      8, 8> {};
-  class IsJSArrayBits:  public BitField<bool,              16, 1> {};
-  class StrictModeBits: public BitField<StrictModeFlag,    17, 1> {};
-  class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {};
-
-  Major MajorKey() { return ElementsTransitionAndStore; }
-  int MinorKey() {
-    return FromBits::encode(from_) |
-        ToBits::encode(to_) |
-        IsJSArrayBits::encode(is_jsarray_) |
-        StrictModeBits::encode(strict_mode_) |
-        GrowModeBits::encode(grow_mode_);
-  }
-
-  void Generate(MacroAssembler* masm);
-
-  ElementsKind from_;
-  ElementsKind to_;
-  bool is_jsarray_;
-  StrictModeFlag strict_mode_;
-  KeyedAccessGrowMode grow_mode_;
-
-  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
-};
-
-
-class StoreArrayLiteralElementStub : public CodeStub {
- public:
-  explicit StoreArrayLiteralElementStub() {}
-
- private:
-  Major MajorKey() { return StoreArrayLiteralElement; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-
-  DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_CODE_STUBS_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index 0163580..cdc9ba1 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -62,15 +62,28 @@
 #ifdef DEBUG
   bool print_source = false;
   bool print_ast = false;
+  bool print_json_ast = false;
   const char* ftype;
 
   if (Isolate::Current()->bootstrapper()->IsActive()) {
     print_source = FLAG_print_builtin_source;
     print_ast = FLAG_print_builtin_ast;
+    print_json_ast = FLAG_print_builtin_json_ast;
     ftype = "builtin";
   } else {
     print_source = FLAG_print_source;
     print_ast = FLAG_print_ast;
+    print_json_ast = FLAG_print_json_ast;
+    Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+    if (print_source && !filter.is_empty()) {
+      print_source = info->function()->name()->IsEqualTo(filter);
+    }
+    if (print_ast && !filter.is_empty()) {
+      print_ast = info->function()->name()->IsEqualTo(filter);
+    }
+    if (print_json_ast && !filter.is_empty()) {
+      print_json_ast = info->function()->name()->IsEqualTo(filter);
+    }
     ftype = "user-defined";
   }
 
@@ -89,6 +102,11 @@
     PrintF("--- AST ---\n%s\n",
            AstPrinter().PrintProgram(info->function()));
   }
+
+  if (print_json_ast) {
+    JsonAstBuilder builder;
+    PrintF("%s", builder.BuildProgram(info->function()));
+  }
 #endif  // DEBUG
 }
 
@@ -117,9 +135,11 @@
   bool print_code = Isolate::Current()->bootstrapper()->IsActive()
       ? FLAG_print_builtin_code
       : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
-  if (print_code) {
+  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
+  FunctionLiteral* function = info->function();
+  bool match = filter.is_empty() || function->debug_name()->IsEqualTo(filter);
+  if (print_code && match) {
     // Print the source code if available.
-    FunctionLiteral* function = info->function();
     Handle<Script> script = info->script();
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
@@ -198,8 +218,8 @@
 
 
 int CEntryStub::MinorKey() {
-  int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
   ASSERT(result_size_ == 1 || result_size_ == 2);
+  int result = save_doubles_ ? 1 : 0;
 #ifdef _WIN64
   return result | ((result_size_ == 1) ? 0 : 2);
 #else
diff --git a/src/codegen.h b/src/codegen.h
index 50d70f2..e551abf 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -81,28 +81,4 @@
 #error Unsupported target architecture.
 #endif
 
-namespace v8 {
-namespace internal {
-
-// Results of the library implementation of transcendental functions may differ
-// from the one we use in our generated code.  Therefore we use the same
-// generated code both in runtime and compiled code.
-typedef double (*UnaryMathFunction)(double x);
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
-UnaryMathFunction CreateSqrtFunction();
-
-
-class ElementsTransitionGenerator : public AllStatic {
- public:
-  static void GenerateSmiOnlyToObject(MacroAssembler* masm);
-  static void GenerateSmiOnlyToDouble(MacroAssembler* masm, Label* fail);
-  static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
-};
-
-} }  // namespace v8::internal
-
 #endif  // V8_CODEGEN_H_
diff --git a/src/collection.js b/src/collection.js
deleted file mode 100644
index 75fe3d5..0000000
--- a/src/collection.js
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-var $Set = global.Set;
-var $Map = global.Map;
-var $WeakMap = global.WeakMap;
-
-//-------------------------------------------------------------------
-
-// Global sentinel to be used instead of undefined keys, which are not
-// supported internally but required for Harmony sets and maps.
-var undefined_sentinel = {};
-
-
-function SetConstructor() {
-  if (%_IsConstructCall()) {
-    %SetInitialize(this);
-  } else {
-    return new $Set();
-  }
-}
-
-
-function SetAdd(key) {
-  if (!IS_SET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['Set.prototype.add', this]);
-  }
-  if (IS_UNDEFINED(key)) {
-    key = undefined_sentinel;
-  }
-  return %SetAdd(this, key);
-}
-
-
-function SetHas(key) {
-  if (!IS_SET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['Set.prototype.has', this]);
-  }
-  if (IS_UNDEFINED(key)) {
-    key = undefined_sentinel;
-  }
-  return %SetHas(this, key);
-}
-
-
-function SetDelete(key) {
-  if (!IS_SET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['Set.prototype.delete', this]);
-  }
-  if (IS_UNDEFINED(key)) {
-    key = undefined_sentinel;
-  }
-  return %SetDelete(this, key);
-}
-
-
-function MapConstructor() {
-  if (%_IsConstructCall()) {
-    %MapInitialize(this);
-  } else {
-    return new $Map();
-  }
-}
-
-
-function MapGet(key) {
-  if (!IS_MAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['Map.prototype.get', this]);
-  }
-  if (IS_UNDEFINED(key)) {
-    key = undefined_sentinel;
-  }
-  return %MapGet(this, key);
-}
-
-
-function MapSet(key, value) {
-  if (!IS_MAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['Map.prototype.set', this]);
-  }
-  if (IS_UNDEFINED(key)) {
-    key = undefined_sentinel;
-  }
-  return %MapSet(this, key, value);
-}
-
-
-function MapHas(key) {
-  if (!IS_MAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['Map.prototype.has', this]);
-  }
-  if (IS_UNDEFINED(key)) {
-    key = undefined_sentinel;
-  }
-  return !IS_UNDEFINED(%MapGet(this, key));
-}
-
-
-function MapDelete(key) {
-  if (!IS_MAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['Map.prototype.delete', this]);
-  }
-  if (IS_UNDEFINED(key)) {
-    key = undefined_sentinel;
-  }
-  if (!IS_UNDEFINED(%MapGet(this, key))) {
-    %MapSet(this, key, void 0);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-
-function WeakMapConstructor() {
-  if (%_IsConstructCall()) {
-    %WeakMapInitialize(this);
-  } else {
-    return new $WeakMap();
-  }
-}
-
-
-function WeakMapGet(key) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.get', this]);
-  }
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakMapGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.set', this]);
-  }
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakMapSet(this, key, value);
-}
-
-
-function WeakMapHas(key) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.has', this]);
-  }
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return !IS_UNDEFINED(%WeakMapGet(this, key));
-}
-
-
-function WeakMapDelete(key) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.delete', this]);
-  }
-  if (!IS_SPEC_OBJECT(key)) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
-    %WeakMapSet(this, key, void 0);
-    return true;
-  } else {
-    return false;
-  }
-}
-
-// -------------------------------------------------------------------
-
-(function () {
-  %CheckIsBootstrapping();
-
-  // Set up the Set and Map constructor function.
-  %SetCode($Set, SetConstructor);
-  %SetCode($Map, MapConstructor);
-
-  // Set up the constructor property on the Set and Map prototype object.
-  %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
-  %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
-
-  // Set up the non-enumerable functions on the Set prototype object.
-  InstallFunctions($Set.prototype, DONT_ENUM, $Array(
-    "add", SetAdd,
-    "has", SetHas,
-    "delete", SetDelete
-  ));
-
-  // Set up the non-enumerable functions on the Map prototype object.
-  InstallFunctions($Map.prototype, DONT_ENUM, $Array(
-    "get", MapGet,
-    "set", MapSet,
-    "has", MapHas,
-    "delete", MapDelete
-  ));
-
-  // Set up the WeakMap constructor function.
-  %SetCode($WeakMap, WeakMapConstructor);
-
-  // Set up the constructor property on the WeakMap prototype object.
-  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
-  // Set up the non-enumerable functions on the WeakMap prototype object.
-  InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
-    "get", WeakMapGet,
-    "set", WeakMapSet,
-    "has", WeakMapHas,
-    "delete", WeakMapDelete
-  ));
-})();
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 82cc223..28e833a 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,6 @@
 
 #include "v8.h"
 
-#include "assembler.h"
 #include "compilation-cache.h"
 #include "serialize.h"
 
@@ -251,8 +250,7 @@
 Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
     Handle<String> source,
     Handle<Context> context,
-    LanguageMode language_mode,
-    int scope_position) {
+    StrictModeFlag strict_mode) {
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
   // having cleared the cache.
@@ -261,8 +259,7 @@
   { HandleScope scope(isolate());
     for (generation = 0; generation < generations(); generation++) {
       Handle<CompilationCacheTable> table = GetTable(generation);
-      result = table->LookupEval(
-          *source, *context, language_mode, scope_position);
+      result = table->LookupEval(*source, *context, strict_mode);
       if (result->IsSharedFunctionInfo()) {
         break;
       }
@@ -272,7 +269,7 @@
     Handle<SharedFunctionInfo>
         function_info(SharedFunctionInfo::cast(result), isolate());
     if (generation != 0) {
-      Put(source, context, function_info, scope_position);
+      Put(source, context, function_info);
     }
     isolate()->counters()->compilation_cache_hits()->Increment();
     return function_info;
@@ -286,31 +283,27 @@
 MaybeObject* CompilationCacheEval::TryTablePut(
     Handle<String> source,
     Handle<Context> context,
-    Handle<SharedFunctionInfo> function_info,
-    int scope_position) {
+    Handle<SharedFunctionInfo> function_info) {
   Handle<CompilationCacheTable> table = GetFirstTable();
-  return table->PutEval(*source, *context, *function_info, scope_position);
+  return table->PutEval(*source, *context, *function_info);
 }
 
 
 Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
     Handle<String> source,
     Handle<Context> context,
-    Handle<SharedFunctionInfo> function_info,
-    int scope_position) {
+    Handle<SharedFunctionInfo> function_info) {
   CALL_HEAP_FUNCTION(isolate(),
-                     TryTablePut(
-                         source, context, function_info, scope_position),
+                     TryTablePut(source, context, function_info),
                      CompilationCacheTable);
 }
 
 
 void CompilationCacheEval::Put(Handle<String> source,
                                Handle<Context> context,
-                               Handle<SharedFunctionInfo> function_info,
-                               int scope_position) {
+                               Handle<SharedFunctionInfo> function_info) {
   HandleScope scope(isolate());
-  SetFirstTable(TablePut(source, context, function_info, scope_position));
+  SetFirstTable(TablePut(source, context, function_info));
 }
 
 
@@ -396,20 +389,16 @@
     Handle<String> source,
     Handle<Context> context,
     bool is_global,
-    LanguageMode language_mode,
-    int scope_position) {
+    StrictModeFlag strict_mode) {
   if (!IsEnabled()) {
     return Handle<SharedFunctionInfo>::null();
   }
 
   Handle<SharedFunctionInfo> result;
   if (is_global) {
-    result = eval_global_.Lookup(
-        source, context, language_mode, scope_position);
+    result = eval_global_.Lookup(source, context, strict_mode);
   } else {
-    ASSERT(scope_position != RelocInfo::kNoPosition);
-    result = eval_contextual_.Lookup(
-        source, context, language_mode, scope_position);
+    result = eval_contextual_.Lookup(source, context, strict_mode);
   }
   return result;
 }
@@ -438,18 +427,16 @@
 void CompilationCache::PutEval(Handle<String> source,
                                Handle<Context> context,
                                bool is_global,
-                               Handle<SharedFunctionInfo> function_info,
-                               int scope_position) {
+                               Handle<SharedFunctionInfo> function_info) {
   if (!IsEnabled()) {
     return;
   }
 
   HandleScope scope(isolate());
   if (is_global) {
-    eval_global_.Put(source, context, function_info, scope_position);
+    eval_global_.Put(source, context, function_info);
   } else {
-    ASSERT(scope_position != RelocInfo::kNoPosition);
-    eval_contextual_.Put(source, context, function_info, scope_position);
+    eval_contextual_.Put(source, context, function_info);
   }
 }
 
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 2f2fbad..4339d22 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,8 @@
 namespace v8 {
 namespace internal {
 
+class HashMap;
+
 // The compilation cache consists of several generational sub-caches which uses
 // this class as a base class. A sub-cache contains a compilation cache tables
 // for each generation of the sub-cache. Since the same source code string has
@@ -121,19 +123,7 @@
 };
 
 
-// Sub-cache for eval scripts. Two caches for eval are used. One for eval calls
-// in global contexts and one for eval calls in other contexts. The cache
-// considers the following pieces of information when checking for matching
-// entries:
-// 1. The source string.
-// 2. The shared function info of the calling function.
-// 3. Whether the source should be compiled as strict code or as non-strict
-//    code.
-//    Note: Currently there are clients of CompileEval that always compile
-//    non-strict code even if the calling function is a strict mode function.
-//    More specifically these are the CompileString, DebugEvaluate and
-//    DebugEvaluateGlobal runtime functions.
-// 4. The start position of the calling scope.
+// Sub-cache for eval scripts.
 class CompilationCacheEval: public CompilationSubCache {
  public:
   CompilationCacheEval(Isolate* isolate, int generations)
@@ -141,27 +131,23 @@
 
   Handle<SharedFunctionInfo> Lookup(Handle<String> source,
                                     Handle<Context> context,
-                                    LanguageMode language_mode,
-                                    int scope_position);
+                                    StrictModeFlag strict_mode);
 
   void Put(Handle<String> source,
            Handle<Context> context,
-           Handle<SharedFunctionInfo> function_info,
-           int scope_position);
+           Handle<SharedFunctionInfo> function_info);
 
  private:
   MUST_USE_RESULT MaybeObject* TryTablePut(
       Handle<String> source,
       Handle<Context> context,
-      Handle<SharedFunctionInfo> function_info,
-      int scope_position);
+      Handle<SharedFunctionInfo> function_info);
 
   // Note: Returns a new hash table if operation results in expansion.
   Handle<CompilationCacheTable> TablePut(
       Handle<String> source,
       Handle<Context> context,
-      Handle<SharedFunctionInfo> function_info,
-      int scope_position);
+      Handle<SharedFunctionInfo> function_info);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
 };
@@ -212,8 +198,7 @@
   Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
                                         Handle<Context> context,
                                         bool is_global,
-                                        LanguageMode language_mode,
-                                        int scope_position);
+                                        StrictModeFlag strict_mode);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
@@ -230,8 +215,7 @@
   void PutEval(Handle<String> source,
                Handle<Context> context,
                bool is_global,
-               Handle<SharedFunctionInfo> function_info,
-               int scope_position);
+               Handle<SharedFunctionInfo> function_info);
 
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
diff --git a/src/compiler-intrinsics.h b/src/compiler-intrinsics.h
deleted file mode 100644
index 3b9c59e..0000000
--- a/src/compiler-intrinsics.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_COMPILER_INTRINSICS_H_
-#define V8_COMPILER_INTRINSICS_H_
-
-namespace v8 {
-namespace internal {
-
-class CompilerIntrinsics {
- public:
-  // Returns number of zero bits preceding least significant 1 bit.
-  // Undefined for zero value.
-  INLINE(static int CountTrailingZeros(uint32_t value));
-
-  // Returns number of zero bits following most significant 1 bit.
-  // Undefined for zero value.
-  INLINE(static int CountLeadingZeros(uint32_t value));
-};
-
-#ifdef __GNUC__
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
-  return __builtin_ctz(value);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
-  return __builtin_clz(value);
-}
-
-#elif defined(_MSC_VER)
-
-#pragma intrinsic(_BitScanForward)
-#pragma intrinsic(_BitScanReverse)
-
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
-  unsigned long result;  //NOLINT
-  _BitScanForward(&result, static_cast<long>(value));  //NOLINT
-  return static_cast<int>(result);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
-  unsigned long result;  //NOLINT
-  _BitScanReverse(&result, static_cast<long>(value));  //NOLINT
-  return 31 - static_cast<int>(result);
-}
-
-#else
-#error Unsupported compiler
-#endif
-
-} }  // namespace v8::internal
-
-#endif  // V8_COMPILER_INTRINSICS_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index 2272337..5e1c4a9 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,7 +36,6 @@
 #include "full-codegen.h"
 #include "gdb-jit.h"
 #include "hydrogen.h"
-#include "isolate-inl.h"
 #include "lithium.h"
 #include "liveedit.h"
 #include "parser.h"
@@ -53,29 +52,28 @@
 
 CompilationInfo::CompilationInfo(Handle<Script> script)
     : isolate_(script->GetIsolate()),
-      flags_(LanguageModeField::encode(CLASSIC_MODE)),
+      flags_(0),
       function_(NULL),
       scope_(NULL),
-      global_scope_(NULL),
       script_(script),
       extension_(NULL),
       pre_parse_data_(NULL),
+      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
-  Initialize(BASE);
+  Initialize(NONOPT);
 }
 
 
 CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
     : isolate_(shared_info->GetIsolate()),
-      flags_(LanguageModeField::encode(CLASSIC_MODE) |
-             IsLazy::encode(true)),
+      flags_(IsLazy::encode(true)),
       function_(NULL),
       scope_(NULL),
-      global_scope_(NULL),
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
+      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
   Initialize(BASE);
 }
@@ -83,16 +81,15 @@
 
 CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
     : isolate_(closure->GetIsolate()),
-      flags_(LanguageModeField::encode(CLASSIC_MODE) |
-             IsLazy::encode(true)),
+      flags_(IsLazy::encode(true)),
       function_(NULL),
       scope_(NULL),
-      global_scope_(NULL),
       closure_(closure),
       shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
+      supports_deoptimization_(false),
       osr_ast_id_(AstNode::kNoNumber) {
   Initialize(BASE);
 }
@@ -110,19 +107,6 @@
 }
 
 
-// Primitive functions are unlikely to be picked up by the stack-walking
-// profiler, so they trigger their own optimization when they're called
-// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
-bool CompilationInfo::ShouldSelfOptimize() {
-  return FLAG_self_optimization &&
-      FLAG_crankshaft &&
-      !function()->flags()->Contains(kDontSelfOptimize) &&
-      !function()->flags()->Contains(kDontOptimize) &&
-      function()->scope()->allows_lazy_recompilation() &&
-      (shared_info().is_null() || !shared_info()->optimization_disabled());
-}
-
-
 void CompilationInfo::AbortOptimization() {
   Handle<Code> code(shared_info()->code());
   SetCode(code);
@@ -183,9 +167,7 @@
 static bool MakeCrankshaftCode(CompilationInfo* info) {
   // Test if we can optimize this function when asked to. We can only
   // do this after the scopes are computed.
-  if (!V8::UseCrankshaft()) {
-    info->DisableOptimization();
-  }
+  if (!info->AllowOptimize()) info->DisableOptimization();
 
   // In case we are not optimizing simply return the code from
   // the full code generator.
@@ -205,7 +187,7 @@
   // Fall back to using the full code generator if it's not possible
   // to use the Hydrogen-based optimizing compiler. We already have
   // generated code for this from the shared function object.
-  if (AlwaysFullCompiler()) {
+  if (AlwaysFullCompiler() || !FLAG_use_hydrogen) {
     info->SetCode(code);
     return true;
   }
@@ -216,7 +198,8 @@
       FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
   if (info->shared_info()->opt_count() > kMaxOptCount) {
     info->AbortOptimization();
-    info->shared_info()->DisableOptimization();
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
@@ -236,22 +219,20 @@
       (info->osr_ast_id() != AstNode::kNoNumber &&
        scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
     info->AbortOptimization();
-    info->shared_info()->DisableOptimization();
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
     // True indicates the compilation pipeline is still going, not
     // necessarily that we optimized the code.
     return true;
   }
 
   // Take --hydrogen-filter into account.
+  Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
   Handle<String> name = info->function()->debug_name();
-  if (*FLAG_hydrogen_filter != '\0') {
-    Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
-    if ((filter[0] == '-'
-         && name->IsEqualTo(filter.SubVector(1, filter.length())))
-        || (filter[0] != '-' && !name->IsEqualTo(filter))) {
-      info->SetCode(code);
-      return true;
-    }
+  bool match = filter.is_empty() || name->IsEqualTo(filter);
+  if (!match) {
+    info->SetCode(code);
+    return true;
   }
 
   // Recompile the unoptimized version of the code if the current version
@@ -294,7 +275,7 @@
   }
 
   Handle<Context> global_context(info->closure()->context()->global_context());
-  TypeFeedbackOracle oracle(code, global_context, info->isolate());
+  TypeFeedbackOracle oracle(code, global_context);
   HGraphBuilder builder(info, &oracle);
   HPhase phase(HPhase::kTotal);
   HGraph* graph = builder.CreateGraph();
@@ -303,7 +284,7 @@
     return false;
   }
 
-  if (graph != NULL) {
+  if (graph != NULL && FLAG_build_lithium) {
     Handle<Code> optimized_code = graph->Compile(info);
     if (!optimized_code.is_null()) {
       info->SetCode(optimized_code);
@@ -317,7 +298,8 @@
   if (!builder.inline_bailout()) {
     // Mark the shared code as unoptimizable unless it was an inlined
     // function that bailed out.
-    info->shared_info()->DisableOptimization();
+    Handle<JSFunction> closure = info->closure();
+    info->shared_info()->DisableOptimization(*closure);
   }
   // True indicates the compilation pipeline is still going, not necessarily
   // that we optimized the code.
@@ -326,9 +308,9 @@
 
 
 static bool GenerateCode(CompilationInfo* info) {
-  return info->IsCompilingForDebugging() || !V8::UseCrankshaft() ?
-      FullCodeGenerator::MakeCode(info) :
-      MakeCrankshaftCode(info);
+  return V8::UseCrankshaft() ?
+    MakeCrankshaftCode(info) :
+    FullCodeGenerator::MakeCode(info);
 }
 
 
@@ -346,7 +328,8 @@
   // the compilation info is set if compilation succeeded.
   bool succeeded = MakeCode(info);
   if (!info->shared_info().is_null()) {
-    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
+    Handle<SerializedScopeInfo> scope_info =
+        SerializedScopeInfo::Create(info->scope());
     info->shared_info()->set_scope_info(*scope_info);
   }
   return succeeded;
@@ -388,14 +371,8 @@
 
   // Only allow non-global compiles for eval.
   ASSERT(info->is_eval() || info->is_global());
-  ParsingFlags flags = kNoParsingFlags;
-  if (info->pre_parse_data() != NULL ||
-      String::cast(script->source())->length() > FLAG_min_preparse_length) {
-    flags = kAllowLazy;
-  }
-  if (!ParserApi::Parse(info, flags)) {
-    return Handle<SharedFunctionInfo>::null();
-  }
+
+  if (!ParserApi::Parse(info)) return Handle<SharedFunctionInfo>::null();
 
   // Measure how long it takes to do the compilation; only take the
   // rest of the function into account to avoid overlap with the
@@ -409,7 +386,7 @@
   FunctionLiteral* lit = info->function();
   LiveEditFunctionTracker live_edit_tracker(isolate, lit);
   if (!MakeCode(info)) {
-    if (!isolate->has_pending_exception()) isolate->StackOverflow();
+    isolate->StackOverflow();
     return Handle<SharedFunctionInfo>::null();
   }
 
@@ -420,7 +397,7 @@
           lit->name(),
           lit->materialized_literal_count(),
           info->code(),
-          ScopeInfo::Create(info->scope()));
+          SerializedScopeInfo::Create(info->scope()));
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
   Compiler::SetFunctionInfo(result, lit, true, script);
@@ -453,9 +430,6 @@
   // the instances of the function.
   SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count());
 
-  script->set_compilation_state(
-      Smi::FromInt(Script::COMPILATION_STATE_COMPILED));
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger
   isolate->debugger()->OnAfterCompile(
@@ -473,7 +447,7 @@
                                              int line_offset,
                                              int column_offset,
                                              v8::Extension* extension,
-                                             ScriptDataImpl* pre_data,
+                                             ScriptDataImpl* input_pre_data,
                                              Handle<Object> script_data,
                                              NativesFlag natives) {
   Isolate* isolate = source->GetIsolate();
@@ -504,6 +478,24 @@
     // for small sources, odds are that there aren't many functions
     // that would be compiled lazily anyway, so we skip the preparse step
     // in that case too.
+    ScriptDataImpl* pre_data = input_pre_data;
+    bool harmony_block_scoping = natives != NATIVES_CODE &&
+                                 FLAG_harmony_block_scoping;
+    if (pre_data == NULL
+        && source_length >= FLAG_min_preparse_length) {
+      if (source->IsExternalTwoByteString()) {
+        ExternalTwoByteStringUC16CharacterStream stream(
+            Handle<ExternalTwoByteString>::cast(source), 0, source->length());
+        pre_data = ParserApi::PartialPreParse(&stream,
+                                              extension,
+                                              harmony_block_scoping);
+      } else {
+        GenericStringUC16CharacterStream stream(source, 0, source->length());
+        pre_data = ParserApi::PartialPreParse(&stream,
+                                              extension,
+                                              harmony_block_scoping);
+      }
+    }
 
     // Create a script object describing the script to be compiled.
     Handle<Script> script = FACTORY->NewScript(source);
@@ -524,13 +516,15 @@
     info.MarkAsGlobal();
     info.SetExtension(extension);
     info.SetPreParseData(pre_data);
-    if (FLAG_use_strict) {
-      info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
-    }
     result = MakeFunctionInfo(&info);
     if (extension == NULL && !result.is_null()) {
       compilation_cache->PutScript(source, result);
     }
+
+    // Get rid of the pre-parsing data (if necessary).
+    if (input_pre_data == NULL && pre_data != NULL) {
+      delete pre_data;
+    }
   }
 
   if (result.is_null()) isolate->ReportPendingMessages();
@@ -541,8 +535,7 @@
 Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
                                                  Handle<Context> context,
                                                  bool is_global,
-                                                 LanguageMode language_mode,
-                                                 int scope_position) {
+                                                 StrictModeFlag strict_mode) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_eval_size()->Increment(source_length);
@@ -558,8 +551,7 @@
   result = compilation_cache->LookupEval(source,
                                          context,
                                          is_global,
-                                         language_mode,
-                                         scope_position);
+                                         strict_mode);
 
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
@@ -567,24 +559,16 @@
     CompilationInfo info(script);
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
-    info.SetLanguageMode(language_mode);
+    if (strict_mode == kStrictMode) info.MarkAsStrictMode();
     info.SetCallingContext(context);
     result = MakeFunctionInfo(&info);
     if (!result.is_null()) {
-      // Explicitly disable optimization for eval code. We're not yet prepared
-      // to handle eval-code in the optimizing compiler.
-      result->DisableOptimization();
-
-      // If caller is strict mode, the result must be in strict mode or
-      // extended mode as well, but not the other way around. Consider:
+      CompilationCache* compilation_cache = isolate->compilation_cache();
+      // If caller is strict mode, the result must be strict as well,
+      // but not the other way around. Consider:
       // eval("'use strict'; ...");
-      ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
-      // If caller is in extended mode, the result must also be in
-      // extended mode.
-      ASSERT(language_mode != EXTENDED_MODE ||
-             result->is_extended_mode());
-      compilation_cache->PutEval(
-          source, context, is_global, result, scope_position);
+      ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
+      compilation_cache->PutEval(source, context, is_global, result);
     }
   }
 
@@ -607,16 +591,17 @@
   isolate->counters()->total_compile_size()->Increment(compiled_size);
 
   // Generate the AST for the lazily compiled function.
-  if (ParserApi::Parse(info, kNoParsingFlags)) {
+  if (ParserApi::Parse(info)) {
     // Measure how long it takes to do the lazy compilation; only take the
     // rest of the function into account to avoid overlap with the lazy
     // parsing statistics.
     HistogramTimerScope timer(isolate->counters()->compile_lazy());
 
-    // After parsing we know the function's language mode. Remember it.
-    LanguageMode language_mode = info->function()->language_mode();
-    info->SetLanguageMode(language_mode);
-    shared->set_language_mode(language_mode);
+    // After parsing we know function's strict mode. Remember it.
+    if (info->function()->strict_mode()) {
+      shared->set_strict_mode(true);
+      info->MarkAsStrictMode();
+    }
 
     // Compile the code.
     if (!MakeCode(info)) {
@@ -635,15 +620,16 @@
       RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
 
       if (info->IsOptimizing()) {
-        ASSERT(shared->scope_info() != ScopeInfo::Empty());
+        ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
         function->ReplaceCode(*code);
       } else {
         // Update the shared function info with the compiled code and the
         // scope info.  Please note, that the order of the shared function
         // info initialization is important since set_scope_info might
         // trigger a GC, causing the ASSERT below to be invalid if the code
-        // was flushed. By setting the code object last we avoid this.
-        Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope());
+        // was flushed. By settting the code object last we avoid this.
+        Handle<SerializedScopeInfo> scope_info =
+            SerializedScopeInfo::Create(info->scope());
         shared->set_scope_info(*scope_info);
         shared->set_code(*code);
         if (!function.is_null()) {
@@ -666,13 +652,8 @@
         // Check the function has compiled code.
         ASSERT(shared->is_compiled());
         shared->set_code_age(0);
-        shared->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
-        shared->set_dont_inline(lit->flags()->Contains(kDontInline));
-        shared->set_ast_node_count(lit->ast_node_count());
 
-        if (V8::UseCrankshaft()&&
-            !function.is_null() &&
-            !shared->optimization_disabled()) {
+        if (info->AllowOptimize() && !shared->optimization_disabled()) {
           // If we're asked to always optimize, we compile the optimized
           // version of the function right away - unless the debugger is
           // active as it makes no sense to compile optimized code then.
@@ -700,7 +681,7 @@
   CompilationInfo info(script);
   info.SetFunction(literal);
   info.SetScope(literal->scope());
-  info.SetLanguageMode(literal->scope()->language_mode());
+  if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
 
   LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
   // Determine if the function can be lazily compiled. This is necessary to
@@ -711,7 +692,7 @@
   bool allow_lazy = literal->AllowsLazyCompilation() &&
       !LiveEditFunctionTracker::IsActive(info.isolate());
 
-  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
+  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
 
   // Generate code
   if (FLAG_lazy && allow_lazy) {
@@ -720,7 +701,7 @@
   } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
              (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
     ASSERT(!info.code().is_null());
-    scope_info = ScopeInfo::Create(info.scope());
+    scope_info = SerializedScopeInfo::Create(info.scope());
   } else {
     return Handle<SharedFunctionInfo>::null();
   }
@@ -752,8 +733,8 @@
                                FunctionLiteral* lit,
                                bool is_toplevel,
                                Handle<Script> script) {
-  function_info->set_length(lit->parameter_count());
-  function_info->set_formal_parameter_count(lit->parameter_count());
+  function_info->set_length(lit->num_parameters());
+  function_info->set_formal_parameter_count(lit->num_parameters());
   function_info->set_script(*script);
   function_info->set_function_token_position(lit->function_token_position());
   function_info->set_start_position(lit->start_position());
@@ -766,13 +747,9 @@
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
-  function_info->set_language_mode(lit->language_mode());
+  function_info->set_strict_mode(lit->strict_mode());
   function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
-  function_info->set_ast_node_count(lit->ast_node_count());
-  function_info->set_is_function(lit->is_function());
-  function_info->set_dont_optimize(lit->flags()->Contains(kDontOptimize));
-  function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
 }
 
 
diff --git a/src/compiler.h b/src/compiler.h
index 44df9e0..69ab27d 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,15 +52,10 @@
   bool is_lazy() const { return IsLazy::decode(flags_); }
   bool is_eval() const { return IsEval::decode(flags_); }
   bool is_global() const { return IsGlobal::decode(flags_); }
-  bool is_classic_mode() const { return language_mode() == CLASSIC_MODE; }
-  bool is_extended_mode() const { return language_mode() == EXTENDED_MODE; }
-  LanguageMode language_mode() const {
-    return LanguageModeField::decode(flags_);
-  }
+  bool is_strict_mode() const { return IsStrictMode::decode(flags_); }
   bool is_in_loop() const { return IsInLoop::decode(flags_); }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
-  Scope* global_scope() const { return global_scope_; }
   Handle<Code> code() const { return code_; }
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@@ -78,11 +73,11 @@
     ASSERT(!is_lazy());
     flags_ |= IsGlobal::encode(true);
   }
-  void SetLanguageMode(LanguageMode language_mode) {
-    ASSERT(this->language_mode() == CLASSIC_MODE ||
-           this->language_mode() == language_mode ||
-           language_mode == EXTENDED_MODE);
-    flags_ = LanguageModeField::update(flags_, language_mode);
+  void MarkAsStrictMode() {
+    flags_ |= IsStrictMode::encode(true);
+  }
+  StrictModeFlag StrictMode() {
+    return is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
   void MarkAsInLoop() {
     ASSERT(is_lazy());
@@ -102,10 +97,6 @@
     ASSERT(scope_ == NULL);
     scope_ = scope;
   }
-  void SetGlobalScope(Scope* global_scope) {
-    ASSERT(global_scope_ == NULL);
-    global_scope_ = global_scope;
-  }
   void SetCode(Handle<Code> code) { code_ = code; }
   void SetExtension(v8::Extension* extension) {
     ASSERT(!is_lazy());
@@ -123,19 +114,6 @@
     ASSERT(IsOptimizing());
     osr_ast_id_ = osr_ast_id;
   }
-  void MarkCompilingForDebugging(Handle<Code> current_code) {
-    ASSERT(mode_ != OPTIMIZE);
-    ASSERT(current_code->kind() == Code::FUNCTION);
-    flags_ |= IsCompilingForDebugging::encode(true);
-    if (current_code->is_compiled_optimizable()) {
-      EnableDeoptimizationSupport();
-    } else {
-      mode_ = CompilationInfo::NONOPT;
-    }
-  }
-  bool IsCompilingForDebugging() {
-    return IsCompilingForDebugging::decode(flags_);
-  }
 
   bool has_global_object() const {
     return !closure().is_null() && (closure()->context()->global() != NULL);
@@ -155,16 +133,16 @@
   void DisableOptimization();
 
   // Deoptimization support.
-  bool HasDeoptimizationSupport() const {
-    return SupportsDeoptimization::decode(flags_);
-  }
+  bool HasDeoptimizationSupport() const { return supports_deoptimization_; }
   void EnableDeoptimizationSupport() {
     ASSERT(IsOptimizable());
-    flags_ |= SupportsDeoptimization::encode(true);
+    supports_deoptimization_ = true;
   }
 
-  // Determines whether or not to insert a self-optimization header.
-  bool ShouldSelfOptimize();
+  // Determine whether or not we can adaptively optimize.
+  bool AllowOptimize() {
+    return V8::UseCrankshaft() && !closure_.is_null();
+  }
 
   // Disable all optimization attempts of this info for the rest of the
   // current compilation pipeline.
@@ -176,8 +154,9 @@
   // Compilation mode.
   // BASE is generated by the full codegen, optionally prepared for bailouts.
   // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
-  // NONOPT is generated by the full codegen and is not prepared for
-  //   recompilation/bailouts.  These functions are never recompiled.
+  // NONOPT is generated by the full codegen or the classic backend
+  //   and is not prepared for recompilation/bailouts. These functions
+  //   are never recompiled.
   enum Mode {
     BASE,
     OPTIMIZE,
@@ -192,9 +171,8 @@
     if (script_->type()->value() == Script::TYPE_NATIVE) {
       MarkAsNative();
     }
-    if (!shared_info_.is_null()) {
-      ASSERT(language_mode() == CLASSIC_MODE);
-      SetLanguageMode(shared_info_->language_mode());
+    if (!shared_info_.is_null() && shared_info_->strict_mode()) {
+      MarkAsStrictMode();
     }
   }
 
@@ -214,14 +192,9 @@
   // Flags that can be set for lazy compilation.
   class IsInLoop: public BitField<bool, 3, 1> {};
   // Strict mode - used in eager compilation.
-  class LanguageModeField: public BitField<LanguageMode, 4, 2> {};
+  class IsStrictMode: public BitField<bool, 4, 1> {};
   // Is this a function from our natives.
   class IsNative: public BitField<bool, 6, 1> {};
-  // Is this code being compiled with support for deoptimization..
-  class SupportsDeoptimization: public BitField<bool, 7, 1> {};
-  // If compiling for debugging produce just full code matching the
-  // initial mode setting.
-  class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
 
 
   unsigned flags_;
@@ -232,8 +205,6 @@
   // The scope of the function literal as a convenience.  Set to indicate
   // that scopes have been analyzed.
   Scope* scope_;
-  // The global scope provided as a convenience.
-  Scope* global_scope_;
   // The compiled code.
   Handle<Code> code_;
 
@@ -252,6 +223,7 @@
 
   // Compilation mode flag and whether deoptimization is allowed.
   Mode mode_;
+  bool supports_deoptimization_;
   int osr_ast_id_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
@@ -277,9 +249,6 @@
 
   static const int kMaxInliningLevels = 3;
 
-  // Call count before primitive functions trigger their own optimization.
-  static const int kCallsUntilPrimitiveOpt = 200;
-
   // All routines return a SharedFunctionInfo.
   // If an error occurs an exception is raised and the return handle
   // contains NULL.
@@ -298,8 +267,7 @@
   static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
                                                 Handle<Context> context,
                                                 bool is_global,
-                                                LanguageMode language_mode,
-                                                int scope_position);
+                                                StrictModeFlag strict_mode);
 
   // Compile from function info (used for lazy compilation). Returns true on
   // success and false if the compilation resulted in a stack overflow.
diff --git a/src/contexts.cc b/src/contexts.cc
index 76784bd..4f93abd 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -86,14 +86,14 @@
 
 Handle<Object> Context::Lookup(Handle<String> name,
                                ContextLookupFlags flags,
-                               int* index,
+                               int* index_,
                                PropertyAttributes* attributes,
                                BindingFlags* binding_flags) {
   Isolate* isolate = GetIsolate();
   Handle<Context> context(this, isolate);
 
   bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
-  *index = -1;
+  *index_ = -1;
   *attributes = ABSENT;
   *binding_flags = MISSING_BINDING;
 
@@ -110,51 +110,70 @@
       PrintF("\n");
     }
 
-    // 1. Check global objects, subjects of with, and extension objects.
-    if (context->IsGlobalContext() ||
-        context->IsWithContext() ||
-        (context->IsFunctionContext() && context->has_extension())) {
-      Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
-      // Context extension objects needs to behave as if they have no
-      // prototype.  So even if we want to follow prototype chains, we need
-      // to only do a local lookup for context extension objects.
-      if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
-          object->IsJSContextExtensionObject()) {
-        *attributes = object->GetLocalPropertyAttribute(*name);
-      } else {
-        *attributes = object->GetPropertyAttribute(*name);
-      }
-      if (*attributes != ABSENT) {
-        if (FLAG_trace_contexts) {
-          PrintF("=> found property in context object %p\n",
-                 reinterpret_cast<void*>(*object));
+    // Check extension/with/global object.
+    if (!context->IsBlockContext() && context->has_extension()) {
+      if (context->IsCatchContext()) {
+        // Catch contexts have the variable name in the extension slot.
+        if (name->Equals(String::cast(context->extension()))) {
+          if (FLAG_trace_contexts) {
+            PrintF("=> found in catch context\n");
+          }
+          *index_ = Context::THROWN_OBJECT_INDEX;
+          *attributes = NONE;
+          *binding_flags = MUTABLE_IS_INITIALIZED;
+          return context;
         }
-        return object;
+      } else {
+        ASSERT(context->IsGlobalContext() ||
+               context->IsFunctionContext() ||
+               context->IsWithContext());
+        // Global, function, and with contexts may have an object in the
+        // extension slot.
+        Handle<JSObject> extension(JSObject::cast(context->extension()),
+                                   isolate);
+        // Context extension objects needs to behave as if they have no
+        // prototype.  So even if we want to follow prototype chains, we
+        // need to only do a local lookup for context extension objects.
+        if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+            extension->IsJSContextExtensionObject()) {
+          *attributes = extension->GetLocalPropertyAttribute(*name);
+        } else {
+          *attributes = extension->GetPropertyAttribute(*name);
+        }
+        if (*attributes != ABSENT) {
+          // property found
+          if (FLAG_trace_contexts) {
+            PrintF("=> found property in context object %p\n",
+                   reinterpret_cast<void*>(*extension));
+          }
+          return extension;
+        }
       }
     }
 
-    // 2. Check the context proper if it has slots.
+    // Check serialized scope information of functions and blocks. Only
+    // functions can have parameters, and a function name.
     if (context->IsFunctionContext() || context->IsBlockContext()) {
-      // Use serialized scope information of functions and blocks to search
-      // for the context index.
-      Handle<ScopeInfo> scope_info;
+      // We may have context-local slots.  Check locals in the context.
+      Handle<SerializedScopeInfo> scope_info;
       if (context->IsFunctionContext()) {
-        scope_info = Handle<ScopeInfo>(
+        scope_info = Handle<SerializedScopeInfo>(
             context->closure()->shared()->scope_info(), isolate);
       } else {
-        scope_info = Handle<ScopeInfo>(
-            ScopeInfo::cast(context->extension()), isolate);
+        ASSERT(context->IsBlockContext());
+        scope_info = Handle<SerializedScopeInfo>(
+            SerializedScopeInfo::cast(context->extension()), isolate);
       }
-      VariableMode mode;
-      InitializationFlag init_flag;
-      int slot_index = scope_info->ContextSlotIndex(*name, &mode, &init_flag);
-      ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
-      if (slot_index >= 0) {
+
+      Variable::Mode mode;
+      int index = scope_info->ContextSlotIndex(*name, &mode);
+      ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
+      if (index >= 0) {
         if (FLAG_trace_contexts) {
           PrintF("=> found local in context slot %d (mode = %d)\n",
-                 slot_index, mode);
+                 index, mode);
         }
-        *index = slot_index;
+        *index_ = index;
         // Note: Fixed context slots are statically allocated by the compiler.
         // Statically allocated variables always have a statically known mode,
         // which is the mode with which they were declared when added to the
@@ -162,31 +181,23 @@
         // declared variables that were introduced through declaration nodes)
         // must not appear here.
         switch (mode) {
-          case INTERNAL:  // Fall through.
-          case VAR:
+          case Variable::INTERNAL:  // Fall through.
+          case Variable::VAR:
             *attributes = NONE;
             *binding_flags = MUTABLE_IS_INITIALIZED;
             break;
-          case LET:
+          case Variable::LET:
             *attributes = NONE;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
+            *binding_flags = MUTABLE_CHECK_INITIALIZED;
             break;
-          case CONST:
+          case Variable::CONST:
             *attributes = READ_ONLY;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
+            *binding_flags = IMMUTABLE_CHECK_INITIALIZED;
             break;
-          case CONST_HARMONY:
-            *attributes = READ_ONLY;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
-                IMMUTABLE_IS_INITIALIZED_HARMONY;
-            break;
-          case DYNAMIC:
-          case DYNAMIC_GLOBAL:
-          case DYNAMIC_LOCAL:
-          case TEMPORARY:
+          case Variable::DYNAMIC:
+          case Variable::DYNAMIC_GLOBAL:
+          case Variable::DYNAMIC_LOCAL:
+          case Variable::TEMPORARY:
             UNREACHABLE();
             break;
         }
@@ -195,37 +206,22 @@
 
       // Check the slot corresponding to the intermediate context holding
       // only the function name variable.
-      if (follow_context_chain && context->IsFunctionContext()) {
-        VariableMode mode;
-        int function_index = scope_info->FunctionContextSlotIndex(*name, &mode);
-        if (function_index >= 0) {
+      if (follow_context_chain) {
+        int index = scope_info->FunctionContextSlotIndex(*name);
+        if (index >= 0) {
           if (FLAG_trace_contexts) {
             PrintF("=> found intermediate function in context slot %d\n",
-                   function_index);
+                   index);
           }
-          *index = function_index;
+          *index_ = index;
           *attributes = READ_ONLY;
-          ASSERT(mode == CONST || mode == CONST_HARMONY);
-          *binding_flags = (mode == CONST)
-              ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
+          *binding_flags = IMMUTABLE_IS_INITIALIZED;
           return context;
         }
       }
-
-    } else if (context->IsCatchContext()) {
-      // Catch contexts have the variable name in the extension slot.
-      if (name->Equals(String::cast(context->extension()))) {
-        if (FLAG_trace_contexts) {
-          PrintF("=> found in catch context\n");
-        }
-        *index = Context::THROWN_OBJECT_INDEX;
-        *attributes = NONE;
-        *binding_flags = MUTABLE_IS_INITIALIZED;
-        return context;
-      }
     }
 
-    // 3. Prepare to continue with the previous (next outermost) context.
+    // Proceed with the previous context.
     if (context->IsGlobalContext()) {
       follow_context_chain = false;
     } else {
@@ -240,6 +236,68 @@
 }
 
 
+bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
+  Context* context = this;
+
+  // Check that there is no local with the given name in contexts
+  // before the global context and check that there are no context
+  // extension objects (conservative check for with statements).
+  while (!context->IsGlobalContext()) {
+    // Check if the context is a catch or with context, or has introduced
+    // bindings by calling non-strict eval.
+    if (context->has_extension()) return false;
+
+    // Not a with context so it must be a function context.
+    ASSERT(context->IsFunctionContext());
+
+    // Check non-parameter locals.
+    Handle<SerializedScopeInfo> scope_info(
+        context->closure()->shared()->scope_info());
+    Variable::Mode mode;
+    int index = scope_info->ContextSlotIndex(*name, &mode);
+    ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
+    if (index >= 0) return false;
+
+    // Check parameter locals.
+    int param_index = scope_info->ParameterIndex(*name);
+    if (param_index >= 0) return false;
+
+    // Check context only holding the function name variable.
+    index = scope_info->FunctionContextSlotIndex(*name);
+    if (index >= 0) return false;
+    context = context->previous();
+  }
+
+  // No local or potential with statement found so the variable is
+  // global unless it is shadowed by an eval-introduced variable.
+  return true;
+}
+
+
+void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+                                   bool* outer_scope_calls_non_strict_eval) {
+  // Skip up the context chain checking all the function contexts to see
+  // whether they call eval.
+  Context* context = this;
+  while (!context->IsGlobalContext()) {
+    if (context->IsFunctionContext()) {
+      Handle<SerializedScopeInfo> scope_info(
+          context->closure()->shared()->scope_info());
+      if (scope_info->CallsEval()) {
+        *outer_scope_calls_eval = true;
+        if (!scope_info->IsStrictMode()) {
+          // No need to go further since the answers will not change from
+          // here.
+          *outer_scope_calls_non_strict_eval = true;
+          return;
+        }
+      }
+    }
+    context = context->previous();
+  }
+}
+
+
 void Context::AddOptimizedFunction(JSFunction* function) {
   ASSERT(IsGlobalContext());
 #ifdef DEBUG
diff --git a/src/contexts.h b/src/contexts.h
index af5cb03..505f86c 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -46,43 +46,24 @@
 
 // ES5 10.2 defines lexical environments with mutable and immutable bindings.
 // Immutable bindings have two states, initialized and uninitialized, and
-// their state is changed by the InitializeImmutableBinding method. The
-// BindingFlags enum represents information if a binding has definitely been
-// initialized. A mutable binding does not need to be checked and thus has
-// the BindingFlag MUTABLE_IS_INITIALIZED.
-//
-// There are two possibilities for immutable bindings
-//  * 'const' declared variables. They are initialized when evaluating the
-//    corresponding declaration statement. They need to be checked for being
-//    initialized and thus get the flag IMMUTABLE_CHECK_INITIALIZED.
-//  * The function name of a named function literal. The binding is immediately
-//    initialized when entering the function and thus does not need to be
-//    checked. it gets the BindingFlag IMMUTABLE_IS_INITIALIZED.
-// Accessing an uninitialized binding produces the undefined value.
+// their state is changed by the InitializeImmutableBinding method.
 //
 // The harmony proposal for block scoped bindings also introduces the
-// uninitialized state for mutable bindings.
-//  * A 'let' declared variable. They are initialized when evaluating the
-//    corresponding declaration statement. They need to be checked for being
-//    initialized and thus get the flag MUTABLE_CHECK_INITIALIZED.
-//  * A 'var' declared variable. It is initialized immediately upon creation
-//    and thus doesn't need to be checked. It gets the flag
-//    MUTABLE_IS_INITIALIZED.
-//  * Catch bound variables, function parameters and variables introduced by
-//    function declarations are initialized immediately and do not need to be
-//    checked. Thus they get the flag MUTABLE_IS_INITIALIZED.
-// Immutable bindings in harmony mode get the _HARMONY flag variants. Accessing
-// an uninitialized binding produces a reference error.
-//
-// In V8 uninitialized bindings are set to the hole value upon creation and set
-// to a different value upon initialization.
+// uninitialized state for mutable bindings. A 'let' declared variable
+// is a mutable binding that is created uninitalized upon activation of its
+// lexical environment and it is initialized when evaluating its declaration
+// statement. Var declared variables are mutable bindings that are
+// immediately initialized upon creation. The BindingFlags enum represents
+// information if a binding has definitely been initialized. 'const' declared
+// variables are created as uninitialized immutable bindings.
+
+// In harmony mode accessing an uninitialized binding produces a reference
+// error.
 enum BindingFlags {
   MUTABLE_IS_INITIALIZED,
   MUTABLE_CHECK_INITIALIZED,
   IMMUTABLE_IS_INITIALIZED,
   IMMUTABLE_CHECK_INITIALIZED,
-  IMMUTABLE_IS_INITIALIZED_HARMONY,
-  IMMUTABLE_CHECK_INITIALIZED_HARMONY,
   MISSING_BINDING
 };
 
@@ -104,11 +85,7 @@
   V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
   V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
   V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
-  V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
   V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
-  V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
-  V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
-  V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
   V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
   V(JSON_OBJECT_INDEX, JSObject, json_object) \
   V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -132,6 +109,7 @@
   V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
   V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
     strict_mode_function_instance_map) \
+  V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
   V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
   V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
   V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
@@ -156,13 +134,9 @@
   V(MAP_CACHE_INDEX, Object, map_cache) \
   V(CONTEXT_DATA_INDEX, Object, data) \
   V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
-  V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
-    to_complete_property_descriptor) \
   V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
   V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
-  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
-  V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
-  V(RANDOM_SEED_INDEX, ByteArray, random_seed)
+  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -218,8 +192,7 @@
     PREVIOUS_INDEX,
     // The extension slot is used for either the global object (in global
     // contexts), eval extension object (function contexts), subject of with
-    // (with contexts), or the variable name (catch contexts), the serialized
-    // scope info (block contexts).
+    // (with contexts), or the variable name (catch contexts).
     EXTENSION_INDEX,
     GLOBAL_INDEX,
     MIN_CONTEXT_SLOTS,
@@ -233,6 +206,7 @@
     ARGUMENTS_BOILERPLATE_INDEX,
     ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
     STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
+    JS_ARRAY_MAP_INDEX,
     REGEXP_RESULT_MAP_INDEX,
     FUNCTION_MAP_INDEX,
     STRICT_MODE_FUNCTION_MAP_INDEX,
@@ -246,11 +220,7 @@
     STRING_FUNCTION_INDEX,
     STRING_FUNCTION_PROTOTYPE_MAP_INDEX,
     OBJECT_FUNCTION_INDEX,
-    INTERNAL_ARRAY_FUNCTION_INDEX,
     ARRAY_FUNCTION_INDEX,
-    SMI_JS_ARRAY_MAP_INDEX,
-    DOUBLE_JS_ARRAY_MAP_INDEX,
-    OBJECT_JS_ARRAY_MAP_INDEX,
     DATE_FUNCTION_INDEX,
     JSON_OBJECT_INDEX,
     REGEXP_FUNCTION_INDEX,
@@ -282,12 +252,9 @@
     OUT_OF_MEMORY_INDEX,
     CONTEXT_DATA_INDEX,
     ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
-    TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
     DERIVED_HAS_TRAP_INDEX,
     DERIVED_GET_TRAP_INDEX,
     DERIVED_SET_TRAP_INDEX,
-    PROXY_ENUMERATE,
-    RANDOM_SEED_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
@@ -356,10 +323,6 @@
     Map* map = this->map();
     return map == map->GetHeap()->block_context_map();
   }
-  bool IsModuleContext() {
-    Map* map = this->map();
-    return map == map->GetHeap()->module_context_map();
-  }
 
   // Tells whether the global context is marked with out of memory.
   inline bool has_out_of_memory();
@@ -367,24 +330,18 @@
   // Mark the global context with out of memory.
   inline void mark_out_of_memory();
 
+  // The exception holder is the object used as a with object in
+  // the implementation of a catch block.
+  bool is_exception_holder(Object* object) {
+    return IsCatchContext() && extension() == object;
+  }
+
   // A global context hold a list of all functions which have been optimized.
   void AddOptimizedFunction(JSFunction* function);
   void RemoveOptimizedFunction(JSFunction* function);
   Object* OptimizedFunctionsListHead();
   void ClearOptimizedFunctions();
 
-  static int GetContextMapIndexFromElementsKind(
-      ElementsKind elements_kind) {
-    if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-      return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
-    } else if (elements_kind == FAST_ELEMENTS) {
-      return Context::OBJECT_JS_ARRAY_MAP_INDEX;
-    } else {
-      ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
-      return Context::SMI_JS_ARRAY_MAP_INDEX;
-    }
-  }
-
 #define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
   void  set_##name(type* value) {                         \
     ASSERT(IsGlobalContext());                            \
@@ -398,28 +355,46 @@
 #undef GLOBAL_CONTEXT_FIELD_ACCESSORS
 
   // Lookup the the slot called name, starting with the current context.
-  // There are three possibilities:
+  // There are 4 possible outcomes:
   //
-  // 1) result->IsContext():
-  //    The binding was found in a context.  *index is always the
-  //    non-negative slot index.  *attributes is NONE for var and let
-  //    declarations, READ_ONLY for const declarations (never ABSENT).
+  // 1) index_ >= 0 && result->IsContext():
+  //    most common case, the result is a Context, and index is the
+  //    context slot index, and the slot exists.
+  //    attributes == READ_ONLY for the function name variable, NONE otherwise.
   //
-  // 2) result->IsJSObject():
-  //    The binding was found as a named property in a context extension
-  //    object (i.e., was introduced via eval), as a property on the subject
-  //    of with, or as a property of the global object.  *index is -1 and
-  //    *attributes is not ABSENT.
+  // 2) index_ >= 0 && result->IsJSObject():
+  //    the result is the JSObject arguments object, the index is the parameter
+  //    index, i.e., key into the arguments object, and the property exists.
+  //    attributes != ABSENT.
   //
-  // 3) result.is_null():
-  //    There was no binding found, *index is always -1 and *attributes is
-  //    always ABSENT.
+  // 3) index_ < 0 && result->IsJSObject():
+  //    the result is the JSObject extension context or the global object,
+  //    and the name is the property name, and the property exists.
+  //    attributes != ABSENT.
+  //
+  // 4) index_ < 0 && result.is_null():
+  //    there was no context found with the corresponding property.
+  //    attributes == ABSENT.
   Handle<Object> Lookup(Handle<String> name,
                         ContextLookupFlags flags,
-                        int* index,
+                        int* index_,
                         PropertyAttributes* attributes,
                         BindingFlags* binding_flags);
 
+  // Determine if a local variable with the given name exists in a
+  // context.  Do not consider context extension objects.  This is
+  // used for compiling code using eval.  If the context surrounding
+  // the eval call does not have a local variable with this name and
+  // does not contain a with statement the property is global unless
+  // it is shadowed by a property in an extension object introduced by
+  // eval.
+  bool GlobalIfNotShadowedByEval(Handle<String> name);
+
+  // Determine if any function scope in the context call eval and if
+  // any of those calls are in non-strict mode.
+  void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
+                            bool* outer_scope_calls_non_strict_eval);
+
   // Code generation support.
   static int SlotOffset(int index) {
     return kHeaderSize + index * kPointerSize - kHeapObjectTag;
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index b098a1c..41cf0d5 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -46,15 +46,15 @@
 namespace v8 {
 namespace internal {
 
-inline double JunkStringValue() {
-  return BitCast<double, uint64_t>(kQuietNaNMask);
+static inline double JunkStringValue() {
+  return std::numeric_limits<double>::quiet_NaN();
 }
 
 
 // The fast double-to-unsigned-int conversion routine does not guarantee
 // rounding towards zero, or any reasonable value if the argument is larger
 // than what fits in an unsigned 32-bit integer.
-inline unsigned int FastD2UI(double x) {
+static inline unsigned int FastD2UI(double x) {
   // There is no unsigned version of lrint, so there is no fast path
   // in this function as there is in FastD2I. Using lrint doesn't work
   // for values of 2^31 and above.
@@ -80,7 +80,7 @@
 }
 
 
-inline double DoubleToInteger(double x) {
+static inline double DoubleToInteger(double x) {
   if (isnan(x)) return 0;
   if (!isfinite(x) || x == 0) return x;
   return (x >= 0) ? floor(x) : ceil(x);
@@ -103,9 +103,9 @@
 
 
 template <class Iterator, class EndMark>
-bool SubStringEquals(Iterator* current,
-                     EndMark end,
-                     const char* substring) {
+static bool SubStringEquals(Iterator* current,
+                            EndMark end,
+                            const char* substring) {
   ASSERT(**current == *substring);
   for (substring++; *substring != '\0'; substring++) {
     ++*current;
@@ -119,9 +119,9 @@
 // Returns true if a nonspace character has been found and false if the
 // end was been reached before finding a nonspace character.
 template <class Iterator, class EndMark>
-inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
-                              Iterator* current,
-                              EndMark end) {
+static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
+                                     Iterator* current,
+                                     EndMark end) {
   while (*current != end) {
     if (!unicode_cache->IsWhiteSpace(**current)) return true;
     ++*current;
@@ -132,11 +132,11 @@
 
 // Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
 template <int radix_log_2, class Iterator, class EndMark>
-double InternalStringToIntDouble(UnicodeCache* unicode_cache,
-                                 Iterator current,
-                                 EndMark end,
-                                 bool negative,
-                                 bool allow_trailing_junk) {
+static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
+                                        Iterator current,
+                                        EndMark end,
+                                        bool negative,
+                                        bool allow_trailing_junk) {
   ASSERT(current != end);
 
   // Skip leading 0s.
@@ -235,10 +235,10 @@
 
 
 template <class Iterator, class EndMark>
-double InternalStringToInt(UnicodeCache* unicode_cache,
-                           Iterator current,
-                           EndMark end,
-                           int radix) {
+static double InternalStringToInt(UnicodeCache* unicode_cache,
+                                  Iterator current,
+                                  EndMark end,
+                                  int radix) {
   const bool allow_trailing_junk = true;
   const double empty_string_val = JunkStringValue();
 
@@ -430,11 +430,11 @@
 // 2. *current - gets the current character in the sequence.
 // 3. ++current (advances the position).
 template <class Iterator, class EndMark>
-double InternalStringToDouble(UnicodeCache* unicode_cache,
-                              Iterator current,
-                              EndMark end,
-                              int flags,
-                              double empty_string_val) {
+static double InternalStringToDouble(UnicodeCache* unicode_cache,
+                                     Iterator current,
+                                     EndMark end,
+                                     int flags,
+                                     double empty_string_val) {
   // To make sure that iterator dereferencing is valid the following
   // convention is used:
   // 1. Each '++current' statement is followed by check for equality to 'end'.
diff --git a/src/conversions.h b/src/conversions.h
index 70559c9..e51ad65 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -28,6 +28,8 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
+#include <limits>
+
 #include "utils.h"
 
 namespace v8 {
@@ -45,14 +47,14 @@
 const int kMaxSignificantDigits = 772;
 
 
-inline bool isDigit(int x, int radix) {
+static inline bool isDigit(int x, int radix) {
   return (x >= '0' && x <= '9' && x < '0' + radix)
       || (radix > 10 && x >= 'a' && x < 'a' + radix - 10)
       || (radix > 10 && x >= 'A' && x < 'A' + radix - 10);
 }
 
 
-inline double SignedZero(bool negative) {
+static inline double SignedZero(bool negative) {
   return negative ? -0.0 : 0.0;
 }
 
@@ -61,16 +63,16 @@
 // rounding towards zero.
 // The result is unspecified if x is infinite or NaN, or if the rounded
 // integer value is outside the range of type int.
-inline int FastD2I(double x) {
+static inline int FastD2I(double x) {
   // The static_cast convertion from double to int used to be slow, but
   // as new benchmarks show, now it is much faster than lrint().
   return static_cast<int>(x);
 }
 
-inline unsigned int FastD2UI(double x);
+static inline unsigned int FastD2UI(double x);
 
 
-inline double FastI2D(int x) {
+static inline double FastI2D(int x) {
   // There is no rounding involved in converting an integer to a
   // double, so this code should compile to a few instructions without
   // any FPU pipeline stalls.
@@ -78,7 +80,7 @@
 }
 
 
-inline double FastUI2D(unsigned x) {
+static inline double FastUI2D(unsigned x) {
   // There is no rounding involved in converting an unsigned integer to a
   // double, so this code should compile to a few instructions without
   // any FPU pipeline stalls.
@@ -87,15 +89,15 @@
 
 
 // This function should match the exact semantics of ECMA-262 9.4.
-inline double DoubleToInteger(double x);
+static inline double DoubleToInteger(double x);
 
 
 // This function should match the exact semantics of ECMA-262 9.5.
-inline int32_t DoubleToInt32(double x);
+static inline int32_t DoubleToInt32(double x);
 
 
 // This function should match the exact semantics of ECMA-262 9.6.
-inline uint32_t DoubleToUint32(double x) {
+static inline uint32_t DoubleToUint32(double x) {
   return static_cast<uint32_t>(DoubleToInt32(x));
 }
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 3cbac77..d74c034 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,14 +39,13 @@
 namespace v8 {
 namespace internal {
 
-static const int kEventsBufferSize = 256 * KB;
-static const int kTickSamplesBufferChunkSize = 64 * KB;
+static const int kEventsBufferSize = 256*KB;
+static const int kTickSamplesBufferChunkSize = 64*KB;
 static const int kTickSamplesBufferChunksCount = 16;
-static const int kProfilerStackSize = 64 * KB;
 
 
 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
-    : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
+    : Thread("v8:ProfEvntProc"),
       generator_(generator),
       running_(true),
       ticks_buffer_(sizeof(TickSampleEventRecord),
@@ -494,7 +493,7 @@
     NoBarrier_Store(&is_profiling_, true);
     processor_->Start();
     // Enumerate stuff we already have in the heap.
-    if (isolate->heap()->HasBeenSetUp()) {
+    if (isolate->heap()->HasBeenSetup()) {
       if (!FLAG_prof_browser_mode) {
         bool saved_log_code_flag = FLAG_log_code;
         FLAG_log_code = true;
@@ -563,7 +562,7 @@
 }
 
 
-void CpuProfiler::SetUp() {
+void CpuProfiler::Setup() {
   Isolate* isolate = Isolate::Current();
   if (isolate->cpu_profiler() == NULL) {
     isolate->set_cpu_profiler(new CpuProfiler());
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 6e2e771..a71c0e0 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,6 +41,7 @@
 class CodeMap;
 class CpuProfile;
 class CpuProfilesCollection;
+class HashMap;
 class ProfileGenerator;
 class TokenEnumerator;
 
@@ -203,7 +204,7 @@
 // TODO(isolates): isolatify this class.
 class CpuProfiler {
  public:
-  static void SetUp();
+  static void Setup();
   static void TearDown();
 
   static void StartProfiling(const char* title);
@@ -229,11 +230,11 @@
                               Code* code, String* name);
   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                               Code* code,
-                              SharedFunctionInfo* shared,
+                              SharedFunctionInfo *shared,
                               String* name);
   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                               Code* code,
-                              SharedFunctionInfo* shared,
+                              SharedFunctionInfo *shared,
                               String* source, int line);
   static void CodeCreateEvent(Logger::LogEventsAndTags tag,
                               Code* code, int args_count);
diff --git a/src/cpu.h b/src/cpu.h
index 247af71..2525484 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -53,7 +53,7 @@
 class CPU : public AllStatic {
  public:
   // Initializes the cpu architecture support. Called once at VM startup.
-  static void SetUp();
+  static void Setup();
 
   static bool SupportsCrankshaft();
 
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index de0faa8..adefba7 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
 
 #include "d8.h"
 #include "d8-debug.h"
@@ -169,7 +168,7 @@
   bool ok;
 
   // Make sure that socket support is initialized.
-  ok = i::Socket::SetUp();
+  ok = i::Socket::Setup();
   if (!ok) {
     printf("Unable to initialize socket support %d\n", i::Socket::LastError());
     return;
@@ -310,7 +309,9 @@
   Handle<Value> request =
       Shell::DebugCommandToJSONRequest(String::New(command));
   if (try_catch.HasCaught()) {
-    Shell::ReportException(&try_catch);
+    v8::String::Utf8Value exception(try_catch.Exception());
+    const char* exception_string = Shell::ToCString(exception);
+    printf("%s\n", exception_string);
     PrintPrompt();
     return;
   }
@@ -366,5 +367,3 @@
 
 
 }  // namespace v8
-
-#endif  // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 8a278e4..289c3b0 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -366,8 +366,7 @@
 // We're disabling usage of waitid in Mac OS X because it doens't work for us:
 // a parent process hangs on waiting while a child process is already a zombie.
 // See http://code.google.com/p/v8/issues/detail?id=401.
-#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) \
-    && !defined(__NetBSD__)
+#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
 #if !defined(__FreeBSD__)
 #define HAS_WAITID 1
 #endif
diff --git a/src/d8-readline.cc b/src/d8-readline.cc
index ed7721c..71be933 100644
--- a/src/d8-readline.cc
+++ b/src/d8-readline.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,14 +49,10 @@
 class ReadLineEditor: public LineEditor {
  public:
   ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { }
-  virtual Handle<String> Prompt(const char* prompt);
+  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
   virtual bool Open();
   virtual bool Close();
   virtual void AddHistory(const char* str);
-
-  static const char* kHistoryFileName;
-  static const int kMaxHistoryEntries;
-
  private:
   static char** AttemptedCompletion(const char* text, int start, int end);
   static char* CompletionGenerator(const char* text, int state);
@@ -70,38 +66,25 @@
     '\0'};
 
 
-const char* ReadLineEditor::kHistoryFileName = ".d8_history";
-const int ReadLineEditor::kMaxHistoryEntries = 1000;
-
-
 bool ReadLineEditor::Open() {
   rl_initialize();
   rl_attempted_completion_function = AttemptedCompletion;
   rl_completer_word_break_characters = kWordBreakCharacters;
   rl_bind_key('\t', rl_complete);
   using_history();
-  stifle_history(kMaxHistoryEntries);
-  return read_history(kHistoryFileName) == 0;
+  stifle_history(Shell::kMaxHistoryEntries);
+  return read_history(Shell::kHistoryFileName) == 0;
 }
 
 
 bool ReadLineEditor::Close() {
-  return write_history(kHistoryFileName) == 0;
+  return write_history(Shell::kHistoryFileName) == 0;
 }
 
 
-Handle<String> ReadLineEditor::Prompt(const char* prompt) {
-  char* result = NULL;
-  {  // Release lock for blocking input.
-    Unlocker unlock(Isolate::GetCurrent());
-    result = readline(prompt);
-  }
-  if (result != NULL) {
-    AddHistory(result);
-  } else {
-    return Handle<String>();
-  }
-  return String::New(result);
+i::SmartArrayPointer<char> ReadLineEditor::Prompt(const char* prompt) {
+  char* result = readline(prompt);
+  return i::SmartArrayPointer<char>(result);
 }
 
 
@@ -135,10 +118,10 @@
   static unsigned current_index;
   static Persistent<Array> current_completions;
   if (state == 0) {
+    i::SmartArrayPointer<char> full_text(i::StrNDup(rl_line_buffer, rl_point));
     HandleScope scope;
-    Local<String> full_text = String::New(rl_line_buffer, rl_point);
     Handle<Array> completions =
-      Shell::GetCompletions(String::New(text), full_text);
+      Shell::GetCompletions(String::New(text), String::New(*full_text));
     current_completions = Persistent<Array>::New(completions);
     current_index = 0;
   }
diff --git a/src/d8.cc b/src/d8.cc
index 45781cf..63a7d15 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -66,7 +66,11 @@
 
 namespace v8 {
 
+
+#ifndef V8_SHARED
 LineEditor *LineEditor::first_ = NULL;
+const char* Shell::kHistoryFileName = ".d8_history";
+const int Shell::kMaxHistoryEntries = 1000;
 
 
 LineEditor::LineEditor(Type type, const char* name)
@@ -92,37 +96,36 @@
 class DumbLineEditor: public LineEditor {
  public:
   DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
-  virtual Handle<String> Prompt(const char* prompt);
+  virtual i::SmartArrayPointer<char> Prompt(const char* prompt);
 };
 
 
 static DumbLineEditor dumb_line_editor;
 
 
-Handle<String> DumbLineEditor::Prompt(const char* prompt) {
+i::SmartArrayPointer<char> DumbLineEditor::Prompt(const char* prompt) {
+  static const int kBufferSize = 256;
+  char buffer[kBufferSize];
   printf("%s", prompt);
-  return Shell::ReadFromStdin();
+  char* str = fgets(buffer, kBufferSize, stdin);
+  return i::SmartArrayPointer<char>(str ? i::StrDup(str) : str);
 }
 
 
-#ifndef V8_SHARED
 CounterMap* Shell::counter_map_;
 i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
 CounterCollection Shell::local_counters_;
 CounterCollection* Shell::counters_ = &local_counters_;
 i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
 Persistent<Context> Shell::utility_context_;
+LineEditor* Shell::console = NULL;
 #endif  // V8_SHARED
 
-LineEditor* Shell::console = NULL;
 Persistent<Context> Shell::evaluation_context_;
 ShellOptions Shell::options;
 const char* Shell::kPrompt = "d8> ";
 
 
-const int MB = 1024 * 1024;
-
-
 #ifndef V8_SHARED
 bool CounterMap::Match(void* key1, void* key2) {
   const char* name1 = reinterpret_cast<const char*>(key1);
@@ -143,11 +146,11 @@
                           Handle<Value> name,
                           bool print_result,
                           bool report_exceptions) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+#ifndef V8_SHARED
   bool FLAG_debugger = i::FLAG_debugger;
 #else
   bool FLAG_debugger = false;
-#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#endif  // V8_SHARED
   HandleScope handle_scope;
   TryCatch try_catch;
   options.script_executed = true;
@@ -175,8 +178,7 @@
         // If all went well and the result wasn't undefined then print
         // the returned value.
         v8::String::Utf8Value str(result);
-        size_t count = fwrite(*str, sizeof(**str), str.length(), stdout);
-        (void) count;  // Silence GCC-4.5.x "unused result" warning.
+        fwrite(*str, sizeof(**str), str.length(), stdout);
         printf("\n");
       }
       return true;
@@ -235,7 +237,7 @@
 }
 
 
-Handle<String> Shell::ReadFromStdin() {
+Handle<Value> Shell::ReadLine(const Arguments& args) {
   static const int kBufferSize = 256;
   char buffer[kBufferSize];
   Handle<String> accumulator = String::New("");
@@ -244,12 +246,7 @@
     // Continue reading if the line ends with an escape '\\' or the line has
     // not been fully read into the buffer yet (does not end with '\n').
     // If fgets gets an error, just give up.
-    char* input = NULL;
-    {  // Release lock for blocking input.
-      Unlocker unlock(Isolate::GetCurrent());
-      input = fgets(buffer, kBufferSize, stdin);
-    }
-    if (input == NULL) return Handle<String>();
+    if (fgets(buffer, kBufferSize, stdin) == NULL) return Null();
     length = static_cast<int>(strlen(buffer));
     if (length == 0) {
       return accumulator;
@@ -283,161 +280,51 @@
   return Undefined();
 }
 
-static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
-  if (value_in->IsUint32()) {
-    return value_in->Uint32Value();
-  }
-
-  Local<Value> number = value_in->ToNumber();
-  if (try_catch->HasCaught()) return 0;
-
-  ASSERT(number->IsNumber());
-  Local<Int32> int32 = number->ToInt32();
-  if (try_catch->HasCaught() || int32.IsEmpty()) return 0;
-
-  int32_t raw_value = int32->Int32Value();
-  if (try_catch->HasCaught()) return 0;
-
-  if (raw_value < 0) {
-    ThrowException(String::New("Array length must not be negative."));
-    return 0;
-  }
-
-  static const int kMaxLength = 0x3fffffff;
-#ifndef V8_SHARED
-  ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
-#endif  // V8_SHARED
-  if (raw_value > static_cast<int32_t>(kMaxLength)) {
-    ThrowException(
-        String::New("Array length exceeds maximum length."));
-  }
-  return static_cast<size_t>(raw_value);
-}
-
-
-const char kArrayBufferReferencePropName[] = "_is_array_buffer_";
-const char kArrayBufferMarkerPropName[] = "_array_buffer_ref_";
-
 
 Handle<Value> Shell::CreateExternalArray(const Arguments& args,
                                          ExternalArrayType type,
                                          size_t element_size) {
-  TryCatch try_catch;
-  bool is_array_buffer_construct = element_size == 0;
-  if (is_array_buffer_construct) {
-    type = v8::kExternalByteArray;
-    element_size = 1;
-  }
   ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
          element_size == 8);
-  if (args.Length() == 0) {
+  if (args.Length() != 1) {
     return ThrowException(
-        String::New("Array constructor must have at least one "
-                    "parameter."));
+        String::New("Array constructor needs one parameter."));
   }
-  bool first_arg_is_array_buffer =
-      args[0]->IsObject() &&
-      args[0]->ToObject()->Get(
-          String::New(kArrayBufferMarkerPropName))->IsTrue();
-  // Currently, only the following constructors are supported:
-  //   TypedArray(unsigned long length)
-  //   TypedArray(ArrayBuffer buffer,
-  //              optional unsigned long byteOffset,
-  //              optional unsigned long length)
-  if (args.Length() > 3) {
-    return ThrowException(
-        String::New("Array constructor from ArrayBuffer must "
-                    "have 1-3 parameters."));
+  static const int kMaxLength = 0x3fffffff;
+#ifndef V8_SHARED
+  ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
+#endif  // V8_SHARED
+  size_t length = 0;
+  if (args[0]->IsUint32()) {
+    length = args[0]->Uint32Value();
+  } else {
+    Local<Number> number = args[0]->ToNumber();
+    if (number.IsEmpty() || !number->IsNumber()) {
+      return ThrowException(String::New("Array length must be a number."));
+    }
+    int32_t raw_length = number->ToInt32()->Int32Value();
+    if (raw_length < 0) {
+      return ThrowException(String::New("Array length must not be negative."));
+    }
+    if (raw_length > static_cast<int32_t>(kMaxLength)) {
+      return ThrowException(
+          String::New("Array length exceeds maximum length."));
+    }
+    length = static_cast<size_t>(raw_length);
   }
-
-  Local<Value> length_value = (args.Length() < 3)
-      ? (first_arg_is_array_buffer
-         ? args[0]->ToObject()->Get(String::New("length"))
-         : args[0])
-      : args[2];
-  size_t length = convertToUint(length_value, &try_catch);
-  if (try_catch.HasCaught()) return try_catch.Exception();
-
-  void* data = NULL;
-  size_t offset = 0;
-
+  if (length > static_cast<size_t>(kMaxLength)) {
+    return ThrowException(String::New("Array length exceeds maximum length."));
+  }
+  void* data = calloc(length, element_size);
+  if (data == NULL) {
+    return ThrowException(String::New("Memory allocation failed."));
+  }
   Handle<Object> array = Object::New();
-  if (first_arg_is_array_buffer) {
-    Handle<Object> derived_from = args[0]->ToObject();
-    data = derived_from->GetIndexedPropertiesExternalArrayData();
-
-    size_t array_buffer_length = convertToUint(
-        derived_from->Get(String::New("length")),
-        &try_catch);
-    if (try_catch.HasCaught()) return try_catch.Exception();
-
-    if (data == NULL && array_buffer_length != 0) {
-      return ThrowException(
-          String::New("ArrayBuffer doesn't have data"));
-    }
-
-    if (args.Length() > 1) {
-      offset = convertToUint(args[1], &try_catch);
-      if (try_catch.HasCaught()) return try_catch.Exception();
-
-      // The given byteOffset must be a multiple of the element size of the
-      // specific type, otherwise an exception is raised.
-      if (offset % element_size != 0) {
-        return ThrowException(
-            String::New("offset must be multiple of element_size"));
-      }
-    }
-
-    if (offset > array_buffer_length) {
-      return ThrowException(
-          String::New("byteOffset must be less than ArrayBuffer length."));
-    }
-
-    if (args.Length() == 2) {
-      // If length is not explicitly specified, the length of the ArrayBuffer
-      // minus the byteOffset must be a multiple of the element size of the
-      // specific type, or an exception is raised.
-      length = array_buffer_length - offset;
-    }
-
-    if (args.Length() != 3) {
-      if (length % element_size != 0) {
-        return ThrowException(
-            String::New("ArrayBuffer length minus the byteOffset must be a "
-                        "multiple of the element size"));
-      }
-      length /= element_size;
-    }
-
-    // If a given byteOffset and length references an area beyond the end of
-    // the ArrayBuffer an exception is raised.
-    if (offset + (length * element_size) > array_buffer_length) {
-      return ThrowException(
-          String::New("length references an area beyond the end of the "
-                      "ArrayBuffer"));
-    }
-
-    // Hold a reference to the ArrayBuffer so its buffer doesn't get collected.
-    array->Set(String::New(kArrayBufferReferencePropName), args[0], ReadOnly);
-  }
-
-  if (is_array_buffer_construct) {
-    array->Set(String::New(kArrayBufferMarkerPropName), True(), ReadOnly);
-  }
-
   Persistent<Object> persistent_array = Persistent<Object>::New(array);
   persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
   persistent_array.MarkIndependent();
-  if (data == NULL && length != 0) {
-    data = calloc(length, element_size);
-    if (data == NULL) {
-      return ThrowException(String::New("Memory allocation failed."));
-    }
-  }
-
-  array->SetIndexedPropertiesToExternalArrayData(
-      reinterpret_cast<uint8_t*>(data) + offset, type,
-      static_cast<int>(length));
+  array->SetIndexedPropertiesToExternalArrayData(data, type,
+                                                 static_cast<int>(length));
   array->Set(String::New("length"),
              Int32::New(static_cast<int32_t>(length)), ReadOnly);
   array->Set(String::New("BYTES_PER_ELEMENT"),
@@ -447,22 +334,11 @@
 
 
 void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
-  HandleScope scope;
-  Handle<String> prop_name = String::New(kArrayBufferReferencePropName);
-  Handle<Object> converted_object = object->ToObject();
-  Local<Value> prop_value = converted_object->Get(prop_name);
-  if (data != NULL && !prop_value->IsObject()) {
-    free(data);
-  }
+  free(data);
   object.Dispose();
 }
 
 
-Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
-  return CreateExternalArray(args, v8::kExternalByteArray, 0);
-}
-
-
 Handle<Value> Shell::Int8Array(const Arguments& args) {
   return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
 }
@@ -534,10 +410,6 @@
 
 void Shell::ReportException(v8::TryCatch* try_catch) {
   HandleScope handle_scope;
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
-  bool enter_context = !Context::InContext();
-  if (enter_context) utility_context_->Enter();
-#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
   v8::String::Utf8Value exception(try_catch->Exception());
   const char* exception_string = ToCString(exception);
   Handle<Message> message = try_catch->Message();
@@ -572,9 +444,6 @@
     }
   }
   printf("\n");
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
-  if (enter_context) utility_context_->Exit();
-#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
 }
 
 
@@ -612,12 +481,6 @@
   Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
   return val;
 }
-
-
-void Shell::DispatchDebugMessages() {
-  v8::Context::Scope scope(Shell::evaluation_context_);
-  v8::Debug::ProcessDebugMessages();
-}
 #endif  // ENABLE_DEBUGGER_SUPPORT
 #endif  // V8_SHARED
 
@@ -731,7 +594,6 @@
   Context::Scope utility_scope(utility_context_);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
   // Install the debugger object in the utility scope
   i::Debug* debug = i::Isolate::Current()->debug();
   debug->Load();
@@ -806,8 +668,6 @@
   global_template->Set(String::New("print"), FunctionTemplate::New(Print));
   global_template->Set(String::New("write"), FunctionTemplate::New(Write));
   global_template->Set(String::New("read"), FunctionTemplate::New(Read));
-  global_template->Set(String::New("readbinary"),
-                       FunctionTemplate::New(ReadBinary));
   global_template->Set(String::New("readline"),
                        FunctionTemplate::New(ReadLine));
   global_template->Set(String::New("load"), FunctionTemplate::New(Load));
@@ -819,8 +679,6 @@
                        FunctionTemplate::New(DisableProfiler));
 
   // Bind the handlers for external arrays.
-  global_template->Set(String::New("ArrayBuffer"),
-                       FunctionTemplate::New(ArrayBuffer));
   global_template->Set(String::New("Int8Array"),
                        FunctionTemplate::New(Int8Array));
   global_template->Set(String::New("Uint8Array"),
@@ -889,7 +747,6 @@
   // Start the debugger agent if requested.
   if (i::FLAG_debugger_agent) {
     v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
-    v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
   }
 #endif  // ENABLE_DEBUGGER_SUPPORT
 #endif  // V8_SHARED
@@ -903,8 +760,13 @@
 #endif  // V8_SHARED
   // Initialize the global objects
   Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+
+  v8::TryCatch try_catch;
   Persistent<Context> context = Context::New(NULL, global_template);
-  ASSERT(!context.IsEmpty());
+  if (context.IsEmpty()) {
+    v8::Local<v8::Value> st = try_catch.StackTrace();
+    ASSERT(!context.IsEmpty());
+  }
   Context::Scope scope(context);
 
 #ifndef V8_SHARED
@@ -935,47 +797,22 @@
 
 
 #ifndef V8_SHARED
-struct CounterAndKey {
-  Counter* counter;
-  const char* key;
-};
-
-
-int CompareKeys(const void* a, const void* b) {
-  return strcmp(static_cast<const CounterAndKey*>(a)->key,
-                static_cast<const CounterAndKey*>(b)->key);
-}
-
-
 void Shell::OnExit() {
   if (console != NULL) console->Close();
   if (i::FLAG_dump_counters) {
-    int number_of_counters = 0;
+    printf("+----------------------------------------+-------------+\n");
+    printf("| Name                                   | Value       |\n");
+    printf("+----------------------------------------+-------------+\n");
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
-      number_of_counters++;
-    }
-    CounterAndKey* counters = new CounterAndKey[number_of_counters];
-    int j = 0;
-    for (CounterMap::Iterator i(counter_map_); i.More(); i.Next(), j++) {
-      counters[j].counter = i.CurrentValue();
-      counters[j].key = i.CurrentKey();
-    }
-    qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys);
-    printf("+--------------------------------------------+-------------+\n");
-    printf("| Name                                       | Value       |\n");
-    printf("+--------------------------------------------+-------------+\n");
-    for (j = 0; j < number_of_counters; j++) {
-      Counter* counter = counters[j].counter;
-      const char* key = counters[j].key;
+      Counter* counter = i.CurrentValue();
       if (counter->is_histogram()) {
-        printf("| c:%-40s | %11i |\n", key, counter->count());
-        printf("| t:%-40s | %11i |\n", key, counter->sample_total());
+        printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count());
+        printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total());
       } else {
-        printf("| %-42s | %11i |\n", key, counter->count());
+        printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count());
       }
     }
-    printf("+--------------------------------------------+-------------+\n");
-    delete [] counters;
+    printf("+----------------------------------------+-------------+\n");
   }
   if (counters_file_ != NULL)
     delete counters_file_;
@@ -984,7 +821,7 @@
 
 
 static FILE* FOpen(const char* path, const char* mode) {
-#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
+#if (defined(_WIN32) || defined(_WIN64))
   FILE* result;
   if (fopen_s(&result, path, mode) == 0) {
     return result;
@@ -1026,23 +863,6 @@
 }
 
 
-Handle<Value> Shell::ReadBinary(const Arguments& args) {
-  String::Utf8Value filename(args[0]);
-  int size;
-  if (*filename == NULL) {
-    return ThrowException(String::New("Error loading file"));
-  }
-  char* chars = ReadChars(*filename, &size);
-  if (chars == NULL) {
-    return ThrowException(String::New("Error reading file"));
-  }
-  // We skip checking the string for UTF8 characters and use it raw as
-  // backing store for the external string with 8-bit characters.
-  BinaryResource* resource = new BinaryResource(chars, size);
-  return String::NewExternal(resource);
-}
-
-
 #ifndef V8_SHARED
 static char* ReadToken(char* data, char token) {
   char* next = i::OS::StrChr(data, token);
@@ -1082,15 +902,31 @@
   Context::Scope context_scope(evaluation_context_);
   HandleScope outer_scope;
   Handle<String> name = String::New("(d8)");
+#ifndef V8_SHARED
   console = LineEditor::Get();
   printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
+  if (i::FLAG_debugger) {
+    printf("JavaScript debugger enabled\n");
+  }
   console->Open();
   while (true) {
+    i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
+    if (input.is_empty()) break;
+    console->AddHistory(*input);
     HandleScope inner_scope;
-    Handle<String> input = console->Prompt(Shell::kPrompt);
-    if (input.IsEmpty()) break;
-    ExecuteString(input, name, true, true);
+    ExecuteString(String::New(*input), name, true, true);
   }
+#else
+  printf("V8 version %s [D8 light using shared library]\n", V8::GetVersion());
+  static const int kBufferSize = 256;
+  while (true) {
+    char buffer[kBufferSize];
+    printf("%s", Shell::kPrompt);
+    if (fgets(buffer, kBufferSize, stdin) == NULL) break;
+    HandleScope inner_scope;
+    ExecuteString(String::New(buffer), name, true, true);
+  }
+#endif  // V8_SHARED
   printf("\n");
 }
 
@@ -1213,11 +1049,14 @@
 
 #ifndef V8_SHARED
 i::Thread::Options SourceGroup::GetThreadOptions() {
+  i::Thread::Options options;
+  options.name = "IsolateThread";
   // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
   // which is not enough to parse the big literal expressions used in tests.
   // The stack size should be at least StackGuard::kLimitSize + some
-  // OS-specific padding for thread startup code.  2Mbytes seems to be enough.
-  return i::Thread::Options("IsolateThread", 2 * MB);
+  // OS-specific padding for thread startup code.
+  options.stack_size = 2 << 20;  // 2 Mb seems to be enough
+  return options;
 }
 
 
@@ -1288,7 +1127,7 @@
       options.use_preemption = true;
       argv[i] = NULL;
 #endif  // V8_SHARED
-    } else if (strcmp(argv[i], "--nopreemption") == 0) {
+    } else if (strcmp(argv[i], "--no-preemption") == 0) {
 #ifdef V8_SHARED
       printf("D8 with shared library does not support multi-threading\n");
       return false;
@@ -1419,30 +1258,15 @@
     Locker lock;
     HandleScope scope;
     Persistent<Context> context = CreateEvaluationContext();
-    if (options.last_run) {
-      // Keep using the same context in the interactive shell.
-      evaluation_context_ = context;
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
-      // If the interactive debugger is enabled make sure to activate
-      // it before running the files passed on the command line.
-      if (i::FLAG_debugger) {
-        InstallUtilityScript();
-      }
-#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
-    }
     {
       Context::Scope cscope(context);
       options.isolate_sources[0].Execute();
     }
-    if (!options.last_run) {
+    if (options.last_run) {
+      // Keep using the same context in the interactive shell
+      evaluation_context_ = context;
+    } else {
       context.Dispose();
-#if !defined(V8_SHARED)
-      if (i::FLAG_send_idle_notification) {
-        const int kLongIdlePauseInMs = 1000;
-        V8::ContextDisposedNotification();
-        V8::IdleNotification(kLongIdlePauseInMs);
-      }
-#endif  // !V8_SHARED
     }
 
 #ifndef V8_SHARED
@@ -1492,15 +1316,6 @@
     }
     printf("======== Full Deoptimization =======\n");
     Testing::DeoptimizeAll();
-#if !defined(V8_SHARED)
-  } else if (i::FLAG_stress_runs > 0) {
-    int stress_runs = i::FLAG_stress_runs;
-    for (int i = 0; i < stress_runs && result == 0; i++) {
-      printf("============ Run %d/%d ============\n", i + 1, stress_runs);
-      options.last_run = (i == stress_runs - 1);
-      result = RunMain(argc, argv);
-    }
-#endif
   } else {
     result = RunMain(argc, argv);
   }
@@ -1521,11 +1336,9 @@
   if (( options.interactive_shell
       || !options.script_executed )
       && !options.test_shell ) {
-#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
-    if (!i::FLAG_debugger) {
-      InstallUtilityScript();
-    }
-#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+#ifndef V8_SHARED
+    InstallUtilityScript();
+#endif  // V8_SHARED
     RunShell();
   }
 
diff --git a/src/d8.gyp b/src/d8.gyp
index a8361e6..70186cf 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -1,4 +1,4 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
+# Copyright 2010 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -41,6 +41,9 @@
       'include_dirs+': [
         '../src',
       ],
+      'defines': [
+        'ENABLE_DEBUGGER_SUPPORT',
+      ],
       'sources': [
         'd8.cc',
       ],
@@ -61,8 +64,8 @@
               'libraries': [ '-lreadline', ],
               'sources': [ 'd8-readline.cc' ],
             }],
-            ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
-               or OS=="openbsd" or OS=="solaris" or OS=="android")', {
+            [ '(OS=="linux" or OS=="mac" or OS=="freebsd" \
+              or OS=="openbsd" or OS=="solaris")', {
               'sources': [ 'd8-posix.cc', ]
             }],
             [ 'OS=="win"', {
diff --git a/src/d8.h b/src/d8.h
index c872f90..15d8d5d 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -116,13 +116,14 @@
 #endif  // V8_SHARED
 
 
+#ifndef V8_SHARED
 class LineEditor {
  public:
   enum Type { DUMB = 0, READLINE = 1 };
   LineEditor(Type type, const char* name);
   virtual ~LineEditor() { }
 
-  virtual Handle<String> Prompt(const char* prompt) = 0;
+  virtual i::SmartArrayPointer<char> Prompt(const char* prompt) = 0;
   virtual bool Open() { return true; }
   virtual bool Close() { return true; }
   virtual void AddHistory(const char* str) { }
@@ -135,6 +136,7 @@
   LineEditor* next_;
   static LineEditor* first_;
 };
+#endif  // V8_SHARED
 
 
 class SourceGroup {
@@ -195,27 +197,6 @@
 };
 
 
-class BinaryResource : public v8::String::ExternalAsciiStringResource {
- public:
-  BinaryResource(const char* string, int length)
-      : data_(string),
-        length_(length) { }
-
-  ~BinaryResource() {
-    delete[] data_;
-    data_ = NULL;
-    length_ = 0;
-  }
-
-  virtual const char* data() const { return data_; }
-  virtual size_t length() const { return length_; }
-
- private:
-  const char* data_;
-  size_t length_;
-};
-
-
 class ShellOptions {
  public:
   ShellOptions() :
@@ -287,13 +268,12 @@
                                size_t buckets);
   static void AddHistogramSample(void* histogram, int sample);
   static void MapCounters(const char* name);
+#endif  // V8_SHARED
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static Handle<Object> DebugMessageDetails(Handle<String> message);
   static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
-  static void DispatchDebugMessages();
-#endif  // ENABLE_DEBUGGER_SUPPORT
-#endif  // V8_SHARED
+#endif
 
 #ifdef WIN32
 #undef Yield
@@ -307,13 +287,8 @@
   static Handle<Value> EnableProfiler(const Arguments& args);
   static Handle<Value> DisableProfiler(const Arguments& args);
   static Handle<Value> Read(const Arguments& args);
-  static Handle<Value> ReadBinary(const Arguments& args);
-  static Handle<String> ReadFromStdin();
-  static Handle<Value> ReadLine(const Arguments& args) {
-    return ReadFromStdin();
-  }
+  static Handle<Value> ReadLine(const Arguments& args);
   static Handle<Value> Load(const Arguments& args);
-  static Handle<Value> ArrayBuffer(const Arguments& args);
   static Handle<Value> Int8Array(const Arguments& args);
   static Handle<Value> Uint8Array(const Arguments& args);
   static Handle<Value> Int16Array(const Arguments& args);
@@ -359,8 +334,11 @@
   static Handle<Value> RemoveDirectory(const Arguments& args);
 
   static void AddOSMethods(Handle<ObjectTemplate> os_template);
-
+#ifndef V8_SHARED
+  static const char* kHistoryFileName;
+  static const int kMaxHistoryEntries;
   static LineEditor* console;
+#endif  // V8_SHARED
   static const char* kPrompt;
   static ShellOptions options;
 
diff --git a/src/d8.js b/src/d8.js
index bf26923..3009037 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -25,14 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"use strict";
-
 String.prototype.startsWith = function (str) {
-  if (str.length > this.length) {
+  if (str.length > this.length)
     return false;
-  }
   return this.substr(0, str.length) == str;
-};
+}
 
 function log10(num) {
   return Math.log(num)/Math.log(10);
@@ -55,9 +52,8 @@
   for (var i = 0; i < parts.length; i++) {
     var part = parts[i];
     var next = current[part];
-    if (!next) {
+    if (!next)
       return [];
-    }
     current = next;
   }
   var result = [];
@@ -67,9 +63,8 @@
     var properties = mirror.properties();
     for (var i = 0; i < properties.length; i++) {
       var name = properties[i].name();
-      if (typeof name === 'string' && name.startsWith(last)) {
+      if (typeof name === 'string' && name.startsWith(last))
         result.push(name);
-      }
     }
     current = ToInspectableObject(current.__proto__);
   }
@@ -78,7 +73,7 @@
 
 
 // Global object holding debugger related constants and state.
-var Debug = {};
+const Debug = {};
 
 
 // Debug events which can occour in the V8 JavaScript engine. These originate
@@ -113,24 +108,22 @@
 
 
 // Current debug state.
-var kNoFrame = -1;
+const kNoFrame = -1;
 Debug.State = {
   currentFrame: kNoFrame,
   displaySourceStartLine: -1,
   displaySourceEndLine: -1,
   currentSourceLine: -1
-};
+}
 var trace_compile = false;  // Tracing all compile events?
 var trace_debug_json = false; // Tracing all debug json packets?
-var last_cmd = '';
+var last_cmd_line = '';
 //var lol_is_enabled;  // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
 var lol_next_dump_index = 0;
-var kDefaultLolLinesToPrintAtATime = 10;
-var kMaxLolLinesToPrintAtATime = 1000;
+const kDefaultLolLinesToPrintAtATime = 10;
+const kMaxLolLinesToPrintAtATime = 1000;
 var repeat_cmd_line = '';
 var is_running = true;
-// Global variable used to store whether a handle was requested.
-var lookup_handle = null;
 
 // Copied from debug-delay.js.  This is needed below:
 function ScriptTypeFlag(type) {
@@ -157,7 +150,7 @@
 }
 
 function DebugEventDetails(response) {
-  var details = {text:'', running:false};
+  details = {text:'', running:false}
 
   // Get the running state.
   details.running = response.running();
@@ -224,7 +217,7 @@
 
     case 'afterCompile':
       if (trace_compile) {
-        result = 'Source ' + body.script.name + ' compiled:\n';
+        result = 'Source ' + body.script.name + ' compiled:\n'
         var source = body.script.source;
         if (!(source[source.length - 1] == '\n')) {
           result += source;
@@ -244,7 +237,7 @@
   }
 
   return details;
-}
+};
 
 
 function SourceInfo(body) {
@@ -286,7 +279,7 @@
 
   // Return the source line text with the underline beneath.
   return source_text + '\n' + underline;
-}
+};
 
 
 // Converts a text command to a JSON request.
@@ -296,7 +289,7 @@
     print("sending: '" + result + "'");
   }
   return result;
-}
+};
 
 
 function DebugRequest(cmd_line) {
@@ -521,7 +514,7 @@
 
 DebugRequest.prototype.JSONRequest = function() {
   return this.request_;
-};
+}
 
 
 function RequestPacket(command) {
@@ -543,14 +536,14 @@
     json += ',"arguments":';
     // Encode the arguments part.
     if (this.arguments.toJSONProtocol) {
-      json += this.arguments.toJSONProtocol();
+      json += this.arguments.toJSONProtocol()
     } else {
       json += SimpleObjectToJSON_(this.arguments);
     }
   }
   json += '}';
   return json;
-};
+}
 
 
 DebugRequest.prototype.createRequest = function(command) {
@@ -590,6 +583,7 @@
 
 // Create a JSON request for the evaluation command.
 DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
+  // Global varaible used to store whether a handle was requested.
   lookup_handle = null;
 
   if (lol_is_enabled) {
@@ -1316,7 +1310,7 @@
   }
 
   return request;
-};
+}
 
 
 function extractObjId(args) {
@@ -1505,7 +1499,7 @@
   } else {
     throw new Error('Invalid trace arguments.');
   }
-};
+}
 
 // Handle the help command.
 DebugRequest.prototype.helpCommand_ = function(args) {
@@ -1614,7 +1608,7 @@
   print('');
   print('disconnect|exit|quit       - disconnects and quits the debugger');
   print('help                       - prints this help information');
-};
+}
 
 
 function formatHandleReference_(value) {
@@ -1629,7 +1623,7 @@
 function formatObject_(value, include_properties) {
   var result = '';
   result += formatHandleReference_(value);
-  result += ', type: object';
+  result += ', type: object'
   result += ', constructor ';
   var ctor = value.constructorFunctionValue();
   result += formatHandleReference_(ctor);
@@ -1949,7 +1943,7 @@
 
 // Convert a JSON response to text for display in a text based debugger.
 function DebugResponseDetails(response) {
-  var details = { text: '', running: false };
+  details = {text:'', running:false}
 
   try {
     if (!response.success()) {
@@ -2314,7 +2308,7 @@
   }
 
   return details;
-}
+};
 
 
 /**
@@ -2340,7 +2334,7 @@
  */
 ProtocolPackage.prototype.type = function() {
   return this.packet_.type;
-};
+}
 
 
 /**
@@ -2349,7 +2343,7 @@
  */
 ProtocolPackage.prototype.event = function() {
   return this.packet_.event;
-};
+}
 
 
 /**
@@ -2358,7 +2352,7 @@
  */
 ProtocolPackage.prototype.requestSeq = function() {
   return this.packet_.request_seq;
-};
+}
 
 
 /**
@@ -2367,27 +2361,27 @@
  */
 ProtocolPackage.prototype.running = function() {
   return this.packet_.running ? true : false;
-};
+}
 
 
 ProtocolPackage.prototype.success = function() {
   return this.packet_.success ? true : false;
-};
+}
 
 
 ProtocolPackage.prototype.message = function() {
   return this.packet_.message;
-};
+}
 
 
 ProtocolPackage.prototype.command = function() {
   return this.packet_.command;
-};
+}
 
 
 ProtocolPackage.prototype.body = function() {
   return this.packet_.body;
-};
+}
 
 
 ProtocolPackage.prototype.bodyValue = function(index) {
@@ -2396,12 +2390,12 @@
   } else {
     return new ProtocolValue(this.packet_.body, this);
   }
-};
+}
 
 
 ProtocolPackage.prototype.body = function() {
   return this.packet_.body;
-};
+}
 
 
 ProtocolPackage.prototype.lookup = function(handle) {
@@ -2411,12 +2405,12 @@
   } else {
     return new ProtocolReference(handle);
   }
-};
+}
 
 
 ProtocolPackage.prototype.raw_json = function() {
   return this.raw_json_;
-};
+}
 
 
 function ProtocolValue(value, packet) {
@@ -2431,7 +2425,7 @@
  */
 ProtocolValue.prototype.type = function() {
   return this.value_.type;
-};
+}
 
 
 /**
@@ -2440,7 +2434,7 @@
  */
 ProtocolValue.prototype.field = function(name) {
   return this.value_[name];
-};
+}
 
 
 /**
@@ -2450,7 +2444,7 @@
 ProtocolValue.prototype.isPrimitive = function() {
   return this.isUndefined() || this.isNull() || this.isBoolean() ||
          this.isNumber() || this.isString();
-};
+}
 
 
 /**
@@ -2459,7 +2453,7 @@
  */
 ProtocolValue.prototype.handle = function() {
   return this.value_.handle;
-};
+}
 
 
 /**
@@ -2468,7 +2462,7 @@
  */
 ProtocolValue.prototype.isUndefined = function() {
   return this.value_.type == 'undefined';
-};
+}
 
 
 /**
@@ -2477,7 +2471,7 @@
  */
 ProtocolValue.prototype.isNull = function() {
   return this.value_.type == 'null';
-};
+}
 
 
 /**
@@ -2486,7 +2480,7 @@
  */
 ProtocolValue.prototype.isBoolean = function() {
   return this.value_.type == 'boolean';
-};
+}
 
 
 /**
@@ -2495,7 +2489,7 @@
  */
 ProtocolValue.prototype.isNumber = function() {
   return this.value_.type == 'number';
-};
+}
 
 
 /**
@@ -2504,7 +2498,7 @@
  */
 ProtocolValue.prototype.isString = function() {
   return this.value_.type == 'string';
-};
+}
 
 
 /**
@@ -2514,7 +2508,7 @@
 ProtocolValue.prototype.isObject = function() {
   return this.value_.type == 'object' || this.value_.type == 'function' ||
          this.value_.type == 'error' || this.value_.type == 'regexp';
-};
+}
 
 
 /**
@@ -2524,7 +2518,7 @@
 ProtocolValue.prototype.constructorFunctionValue = function() {
   var ctor = this.value_.constructorFunction;
   return this.packet_.lookup(ctor.ref);
-};
+}
 
 
 /**
@@ -2534,7 +2528,7 @@
 ProtocolValue.prototype.protoObjectValue = function() {
   var proto = this.value_.protoObject;
   return this.packet_.lookup(proto.ref);
-};
+}
 
 
 /**
@@ -2543,7 +2537,7 @@
  */
 ProtocolValue.prototype.propertyCount = function() {
   return this.value_.properties ? this.value_.properties.length : 0;
-};
+}
 
 
 /**
@@ -2553,7 +2547,7 @@
 ProtocolValue.prototype.propertyName = function(index) {
   var property = this.value_.properties[index];
   return property.name;
-};
+}
 
 
 /**
@@ -2568,7 +2562,7 @@
     }
   }
   return null;
-};
+}
 
 
 /**
@@ -2578,7 +2572,7 @@
 ProtocolValue.prototype.propertyValue = function(index) {
   var property = this.value_.properties[index];
   return this.packet_.lookup(property.ref);
-};
+}
 
 
 /**
@@ -2587,12 +2581,12 @@
  */
 ProtocolValue.prototype.value = function() {
   return this.value_.value;
-};
+}
 
 
 ProtocolValue.prototype.valueString = function() {
   return this.value_.text;
-};
+}
 
 
 function ProtocolReference(handle) {
@@ -2602,7 +2596,7 @@
 
 ProtocolReference.prototype.handle = function() {
   return this.handle_;
-};
+}
 
 
 function MakeJSONPair_(name, value) {
@@ -2632,7 +2626,7 @@
 
 // Mapping of some control characters to avoid the \uXXXX syntax for most
 // commonly used control cahracters.
-var ctrlCharMap_ = {
+const ctrlCharMap_ = {
   '\b': '\\b',
   '\t': '\\t',
   '\n': '\\n',
@@ -2644,12 +2638,12 @@
 
 
 // Regular expression testing for ", \ and control characters (0x00 - 0x1F).
-var ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
+const ctrlCharTest_ = new RegExp('["\\\\\x00-\x1F]');
 
 
 // Regular expression matching ", \ and control characters (0x00 - 0x1F)
 // globally.
-var ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
+const ctrlCharMatch_ = new RegExp('["\\\\\x00-\x1F]', 'g');
 
 
 /**
@@ -2673,7 +2667,7 @@
         // Convert control character to unicode escape sequence.
         return '\\u00' +
           '0' + // TODO %NumberToRadixString(Math.floor(mapped / 16), 16) +
-          '0'; // TODO %NumberToRadixString(mapped % 16, 16)
+          '0' // TODO %NumberToRadixString(mapped % 16, 16);
       })
     + '"';
   }
@@ -2691,12 +2685,12 @@
  * @return {string} JSON formatted Date value
  */
 function DateToISO8601_(value) {
-  var f = function(n) {
+  function f(n) {
     return n < 10 ? '0' + n : n;
-  };
-  var g = function(n) {
+  }
+  function g(n) {
     return n < 10 ? '00' + n : n < 100 ? '0' + n : n;
-  };
+  }
   return builtins.GetUTCFullYearFrom(value)         + '-' +
           f(builtins.GetUTCMonthFrom(value) + 1)    + '-' +
           f(builtins.GetUTCDateFrom(value))         + 'T' +
@@ -2744,7 +2738,7 @@
           if (property_value === null) {
             property_value_json = 'null';
           } else if (typeof property_value.toJSONProtocol == 'function') {
-            property_value_json = property_value.toJSONProtocol(true);
+            property_value_json = property_value.toJSONProtocol(true)
           } else if (property_value.constructor.name == 'Array'){
             property_value_json = SimpleArrayToJSON_(property_value);
           } else {
@@ -2795,7 +2789,7 @@
     }
     var elem = array[i];
     if (elem.toJSONProtocol) {
-      json += elem.toJSONProtocol(true);
+      json += elem.toJSONProtocol(true)
     } else if (typeof(elem) === 'object')  {
       json += SimpleObjectToJSON_(elem);
     } else if (typeof(elem) === 'boolean')  {
diff --git a/src/data-flow.h b/src/data-flow.h
index 71f56e7..d69d6c7 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -85,18 +85,18 @@
     friend class BitVector;
   };
 
-  BitVector(int length, Zone* zone)
+  explicit BitVector(int length)
       : length_(length),
         data_length_(SizeFor(length)),
-        data_(zone->NewArray<uint32_t>(data_length_)) {
+        data_(ZONE->NewArray<uint32_t>(data_length_)) {
     ASSERT(length > 0);
     Clear();
   }
 
-  BitVector(const BitVector& other, Zone* zone)
+  BitVector(const BitVector& other)
       : length_(other.length()),
         data_length_(SizeFor(length_)),
-        data_(zone->NewArray<uint32_t>(data_length_)) {
+        data_(ZONE->NewArray<uint32_t>(data_length_)) {
     CopyFrom(other);
   }
 
diff --git a/src/date.cc b/src/date.cc
deleted file mode 100644
index a377451..0000000
--- a/src/date.cc
+++ /dev/null
@@ -1,384 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "date.h"
-
-#include "v8.h"
-
-#include "objects.h"
-#include "objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
-static const int kDaysIn4Years = 4 * 365 + 1;
-static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
-static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
-static const int kDays1970to2000 = 30 * 365 + 7;
-static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
-                               kDays1970to2000;
-static const int kYearsOffset = 400000;
-static const char kDaysInMonths[] =
-    {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-
-
-void DateCache::ResetDateCache() {
-  static const int kMaxStamp = Smi::kMaxValue;
-  stamp_ = Smi::FromInt(stamp_->value() + 1);
-  if (stamp_->value() > kMaxStamp) {
-    stamp_ = Smi::FromInt(0);
-  }
-  ASSERT(stamp_ != Smi::FromInt(kInvalidStamp));
-  for (int i = 0; i < kDSTSize; ++i) {
-    ClearSegment(&dst_[i]);
-  }
-  dst_usage_counter_ = 0;
-  before_ = &dst_[0];
-  after_ = &dst_[1];
-  local_offset_ms_ = kInvalidLocalOffsetInMs;
-  ymd_valid_ = false;
-}
-
-
-void DateCache::ClearSegment(DST* segment) {
-  segment->start_sec = kMaxEpochTimeInSec;
-  segment->end_sec = -kMaxEpochTimeInSec;
-  segment->offset_ms = 0;
-  segment->last_used = 0;
-}
-
-
-void DateCache::YearMonthDayFromDays(
-    int days, int* year, int* month, int* day) {
-  if (ymd_valid_) {
-    // Check conservatively if the given 'days' has
-    // the same year and month as the cached 'days'.
-    int new_day = ymd_day_ + (days - ymd_days_);
-    if (new_day >= 1 && new_day <= 28) {
-      ymd_day_ = new_day;
-      ymd_days_ = days;
-      *year = ymd_year_;
-      *month = ymd_month_;
-      *day = new_day;
-      return;
-    }
-  }
-  int save_days = days;
-
-  days += kDaysOffset;
-  *year = 400 * (days / kDaysIn400Years) - kYearsOffset;
-  days %= kDaysIn400Years;
-
-  ASSERT(DaysFromYearMonth(*year, 0) + days == save_days);
-
-  days--;
-  int yd1 = days / kDaysIn100Years;
-  days %= kDaysIn100Years;
-  *year += 100 * yd1;
-
-  days++;
-  int yd2 = days / kDaysIn4Years;
-  days %= kDaysIn4Years;
-  *year += 4 * yd2;
-
-  days--;
-  int yd3 = days / 365;
-  days %= 365;
-  *year += yd3;
-
-
-  bool is_leap = (!yd1 || yd2) && !yd3;
-
-  ASSERT(days >= -1);
-  ASSERT(is_leap || (days >= 0));
-  ASSERT((days < 365) || (is_leap && (days < 366)));
-  ASSERT(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0))));
-  ASSERT(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days));
-  ASSERT(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days));
-
-  days += is_leap;
-
-  // Check if the date is after February.
-  if (days >= 31 + 28 + is_leap) {
-    days -= 31 + 28 + is_leap;
-    // Find the date starting from March.
-    for (int i = 2; i < 12; i++) {
-      if (days < kDaysInMonths[i]) {
-        *month = i;
-        *day = days + 1;
-        break;
-      }
-      days -= kDaysInMonths[i];
-    }
-  } else {
-    // Check January and February.
-    if (days < 31) {
-      *month = 0;
-      *day = days + 1;
-    } else {
-      *month = 1;
-      *day = days - 31 + 1;
-    }
-  }
-  ASSERT(DaysFromYearMonth(*year, *month) + *day - 1 == save_days);
-  ymd_valid_ = true;
-  ymd_year_ = *year;
-  ymd_month_ = *month;
-  ymd_day_ = *day;
-  ymd_days_ = save_days;
-}
-
-
-int DateCache::DaysFromYearMonth(int year, int month) {
-  static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
-                                       181, 212, 243, 273, 304, 334};
-  static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
-                                            182, 213, 244, 274, 305, 335};
-
-  year += month / 12;
-  month %= 12;
-  if (month < 0) {
-    year--;
-    month += 12;
-  }
-
-  ASSERT(month >= 0);
-  ASSERT(month < 12);
-
-  // year_delta is an arbitrary number such that:
-  // a) year_delta = -1 (mod 400)
-  // b) year + year_delta > 0 for years in the range defined by
-  //    ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
-  //    Jan 1 1970. This is required so that we don't run into integer
-  //    division of negative numbers.
-  // c) there shouldn't be an overflow for 32-bit integers in the following
-  //    operations.
-  static const int year_delta = 399999;
-  static const int base_day = 365 * (1970 + year_delta) +
-                              (1970 + year_delta) / 4 -
-                              (1970 + year_delta) / 100 +
-                              (1970 + year_delta) / 400;
-
-  int year1 = year + year_delta;
-  int day_from_year = 365 * year1 +
-                      year1 / 4 -
-                      year1 / 100 +
-                      year1 / 400 -
-                      base_day;
-
-  if ((year % 4 != 0) || (year % 100 == 0 && year % 400 != 0)) {
-    return day_from_year + day_from_month[month];
-  }
-  return day_from_year + day_from_month_leap[month];
-}
-
-
-void DateCache::ExtendTheAfterSegment(int time_sec, int offset_ms) {
-  if (after_->offset_ms == offset_ms &&
-      after_->start_sec <= time_sec + kDefaultDSTDeltaInSec &&
-      time_sec <= after_->end_sec) {
-    // Extend the after_ segment.
-    after_->start_sec = time_sec;
-  } else {
-    // The after_ segment is either invalid or starts too late.
-    if (after_->start_sec <= after_->end_sec) {
-      // If the after_ segment is valid, replace it with a new segment.
-      after_ = LeastRecentlyUsedDST(before_);
-    }
-    after_->start_sec = time_sec;
-    after_->end_sec = time_sec;
-    after_->offset_ms = offset_ms;
-    after_->last_used = ++dst_usage_counter_;
-  }
-}
-
-
-int DateCache::DaylightSavingsOffsetInMs(int64_t time_ms) {
-  int time_sec = (time_ms >= 0 && time_ms <= kMaxEpochTimeInMs)
-      ? static_cast<int>(time_ms / 1000)
-      : static_cast<int>(EquivalentTime(time_ms) / 1000);
-
-  // Invalidate cache if the usage counter is close to overflow.
-  // Note that dst_usage_counter is incremented less than ten times
-  // in this function.
-  if (dst_usage_counter_ >= kMaxInt - 10) {
-    dst_usage_counter_ = 0;
-    for (int i = 0; i < kDSTSize; ++i) {
-      ClearSegment(&dst_[i]);
-    }
-  }
-
-  // Optimistic fast check.
-  if (before_->start_sec <= time_sec &&
-      time_sec <= before_->end_sec) {
-    // Cache hit.
-    before_->last_used = ++dst_usage_counter_;
-    return before_->offset_ms;
-  }
-
-  ProbeDST(time_sec);
-
-  ASSERT(InvalidSegment(before_) || before_->start_sec <= time_sec);
-  ASSERT(InvalidSegment(after_) || time_sec < after_->start_sec);
-
-  if (InvalidSegment(before_)) {
-    // Cache miss.
-    before_->start_sec = time_sec;
-    before_->end_sec = time_sec;
-    before_->offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
-    before_->last_used = ++dst_usage_counter_;
-    return before_->offset_ms;
-  }
-
-  if (time_sec <= before_->end_sec) {
-    // Cache hit.
-    before_->last_used = ++dst_usage_counter_;
-    return before_->offset_ms;
-  }
-
-  if (time_sec > before_->end_sec + kDefaultDSTDeltaInSec) {
-    // If the before_ segment ends too early, then just
-    // query for the offset of the time_sec
-    int offset_ms = GetDaylightSavingsOffsetFromOS(time_sec);
-    ExtendTheAfterSegment(time_sec, offset_ms);
-    // This swap helps the optimistic fast check in subsequent invocations.
-    DST* temp = before_;
-    before_ = after_;
-    after_ = temp;
-    return offset_ms;
-  }
-
-  // Now the time_sec is between
-  // before_->end_sec and before_->end_sec + default DST delta.
-  // Update the usage counter of before_ since it is going to be used.
-  before_->last_used = ++dst_usage_counter_;
-
-  // Check if after_ segment is invalid or starts too late.
-  // Note that start_sec of invalid segments is kMaxEpochTimeInSec.
-  if (before_->end_sec + kDefaultDSTDeltaInSec <= after_->start_sec) {
-    int new_after_start_sec = before_->end_sec + kDefaultDSTDeltaInSec;
-    int new_offset_ms = GetDaylightSavingsOffsetFromOS(new_after_start_sec);
-    ExtendTheAfterSegment(new_after_start_sec, new_offset_ms);
-  } else {
-    ASSERT(!InvalidSegment(after_));
-    // Update the usage counter of after_ since it is going to be used.
-    after_->last_used = ++dst_usage_counter_;
-  }
-
-  // Now the time_sec is between before_->end_sec and after_->start_sec.
-  // Only one daylight savings offset change can occur in this interval.
-
-  if (before_->offset_ms == after_->offset_ms) {
-    // Merge two segments if they have the same offset.
-    before_->end_sec = after_->end_sec;
-    ClearSegment(after_);
-    return before_->offset_ms;
-  }
-
-  // Binary search for daylight savings offset change point,
-  // but give up if we don't find it in four iterations.
-  for (int i = 4; i >= 0; --i) {
-    int delta = after_->start_sec - before_->end_sec;
-    int middle_sec = (i == 0) ? time_sec : before_->end_sec + delta / 2;
-    int offset_ms = GetDaylightSavingsOffsetFromOS(middle_sec);
-    if (before_->offset_ms == offset_ms) {
-      before_->end_sec = middle_sec;
-      if (time_sec <= before_->end_sec) {
-        return offset_ms;
-      }
-    } else {
-      ASSERT(after_->offset_ms == offset_ms);
-      after_->start_sec = middle_sec;
-      if (time_sec >= after_->start_sec) {
-        // This swap helps the optimistic fast check in subsequent invocations.
-        DST* temp = before_;
-        before_ = after_;
-        after_ = temp;
-        return offset_ms;
-      }
-    }
-  }
-  UNREACHABLE();
-  return 0;
-}
-
-
-void DateCache::ProbeDST(int time_sec) {
-  DST* before = NULL;
-  DST* after = NULL;
-  ASSERT(before_ != after_);
-
-  for (int i = 0; i < kDSTSize; ++i) {
-    if (dst_[i].start_sec <= time_sec) {
-      if (before == NULL || before->start_sec < dst_[i].start_sec) {
-        before = &dst_[i];
-      }
-    } else if (time_sec < dst_[i].end_sec) {
-      if (after == NULL || after->end_sec > dst_[i].end_sec) {
-        after = &dst_[i];
-      }
-    }
-  }
-
-  // If before or after segments were not found,
-  // then set them to any invalid segment.
-  if (before == NULL) {
-    before = InvalidSegment(before_) ? before_ : LeastRecentlyUsedDST(after);
-  }
-  if (after == NULL) {
-    after = InvalidSegment(after_) && before != after_
-            ? after_ : LeastRecentlyUsedDST(before);
-  }
-
-  ASSERT(before != NULL);
-  ASSERT(after != NULL);
-  ASSERT(before != after);
-  ASSERT(InvalidSegment(before) || before->start_sec <= time_sec);
-  ASSERT(InvalidSegment(after) || time_sec < after->start_sec);
-  ASSERT(InvalidSegment(before) || InvalidSegment(after) ||
-         before->end_sec < after->start_sec);
-
-  before_ = before;
-  after_ = after;
-}
-
-
-DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
-  DST* result = NULL;
-  for (int i = 0; i < kDSTSize; ++i) {
-    if (&dst_[i] == skip) continue;
-    if (result == NULL || result->last_used > dst_[i].last_used) {
-      result = &dst_[i];
-    }
-  }
-  ClearSegment(result);
-  return result;
-}
-
-} }  // namespace v8::internal
diff --git a/src/date.h b/src/date.h
deleted file mode 100644
index fcd61db..0000000
--- a/src/date.h
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_DATE_H_
-#define V8_DATE_H_
-
-#include "allocation.h"
-#include "globals.h"
-#include "platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-class DateCache {
- public:
-  static const int kMsPerMin = 60 * 1000;
-  static const int kSecPerDay = 24 * 60 * 60;
-  static const int64_t kMsPerDay = kSecPerDay * 1000;
-
-  // The largest time that can be passed to OS date-time library functions.
-  static const int kMaxEpochTimeInSec = kMaxInt;
-  static const int64_t kMaxEpochTimeInMs =
-      static_cast<int64_t>(kMaxInt) * 1000;
-
-  // The largest time that can be stored in JSDate.
-  static const int64_t kMaxTimeInMs =
-      static_cast<int64_t>(864000000) * 10000000;
-
-  // Conservative upper bound on time that can be stored in JSDate
-  // before UTC conversion.
-  static const int64_t kMaxTimeBeforeUTCInMs =
-      kMaxTimeInMs + 10 * kMsPerDay;
-
-  // Sentinel that denotes an invalid local offset.
-  static const int kInvalidLocalOffsetInMs = kMaxInt;
-  // Sentinel that denotes an invalid cache stamp.
-  // It is an invariant of DateCache that cache stamp is non-negative.
-  static const int kInvalidStamp = -1;
-
-  DateCache() : stamp_(0) {
-    ResetDateCache();
-  }
-
-  virtual ~DateCache() {}
-
-
-  // Clears cached timezone information and increments the cache stamp.
-  void ResetDateCache();
-
-
-  // Computes floor(time_ms / kMsPerDay).
-  static int DaysFromTime(int64_t time_ms) {
-    if (time_ms < 0) time_ms -= (kMsPerDay - 1);
-    return static_cast<int>(time_ms / kMsPerDay);
-  }
-
-
-  // Computes modulo(time_ms, kMsPerDay) given that
-  // days = floor(time_ms / kMsPerDay).
-  static int TimeInDay(int64_t time_ms, int days) {
-    return static_cast<int>(time_ms - days * kMsPerDay);
-  }
-
-
-  // Given the number of days since the epoch, computes the weekday.
-  // ECMA 262 - 15.9.1.6.
-  int Weekday(int days) {
-    int result = (days + 4) % 7;
-    return result >= 0 ? result : result + 7;
-  }
-
-
-  bool IsLeap(int year) {
-    return year % 4 == 0 && (year % 100 != 0 || year % 400 == 0);
-  }
-
-
-  // ECMA 262 - 15.9.1.7.
-  int LocalOffsetInMs() {
-    if (local_offset_ms_ == kInvalidLocalOffsetInMs)  {
-      local_offset_ms_ = GetLocalOffsetFromOS();
-    }
-    return local_offset_ms_;
-  }
-
-
-  const char* LocalTimezone(int64_t time_ms) {
-    if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
-      time_ms = EquivalentTime(time_ms);
-    }
-    return OS::LocalTimezone(static_cast<double>(time_ms));
-  }
-
-  // ECMA 262 - 15.9.5.26
-  int TimezoneOffset(int64_t time_ms) {
-    int64_t local_ms = ToLocal(time_ms);
-    return static_cast<int>((time_ms - local_ms) / kMsPerMin);
-  }
-
-  // ECMA 262 - 15.9.1.9
-  int64_t ToLocal(int64_t time_ms) {
-    return time_ms + LocalOffsetInMs() + DaylightSavingsOffsetInMs(time_ms);
-  }
-
-  // ECMA 262 - 15.9.1.9
-  int64_t ToUTC(int64_t time_ms) {
-    time_ms -= LocalOffsetInMs();
-    return time_ms - DaylightSavingsOffsetInMs(time_ms);
-  }
-
-
-  // Computes a time equivalent to the given time according
-  // to ECMA 262 - 15.9.1.9.
-  // The issue here is that some library calls don't work right for dates
-  // that cannot be represented using a non-negative signed 32 bit integer
-  // (measured in whole seconds based on the 1970 epoch).
-  // We solve this by mapping the time to a year with same leap-year-ness
-  // and same starting day for the year. The ECMAscript specification says
-  // we must do this, but for compatibility with other browsers, we use
-  // the actual year if it is in the range 1970..2037
-  int64_t EquivalentTime(int64_t time_ms) {
-    int days = DaysFromTime(time_ms);
-    int time_within_day_ms = static_cast<int>(time_ms - days * kMsPerDay);
-    int year, month, day;
-    YearMonthDayFromDays(days, &year, &month, &day);
-    int new_days = DaysFromYearMonth(EquivalentYear(year), month) + day - 1;
-    return static_cast<int64_t>(new_days) * kMsPerDay + time_within_day_ms;
-  }
-
-  // Returns an equivalent year in the range [2008-2035] matching
-  // - leap year,
-  // - week day of first day.
-  // ECMA 262 - 15.9.1.9.
-  int EquivalentYear(int year) {
-    int week_day = Weekday(DaysFromYearMonth(year, 0));
-    int recent_year = (IsLeap(year) ? 1956 : 1967) + (week_day * 12) % 28;
-    // Find the year in the range 2008..2037 that is equivalent mod 28.
-    // Add 3*28 to give a positive argument to the modulus operator.
-    return 2008 + (recent_year + 3 * 28 - 2008) % 28;
-  }
-
-  // Given the number of days since the epoch, computes
-  // the corresponding year, month, and day.
-  void YearMonthDayFromDays(int days, int* year, int* month, int* day);
-
-  // Computes the number of days since the epoch for
-  // the first day of the given month in the given year.
-  int DaysFromYearMonth(int year, int month);
-
-  // Cache stamp is used for invalidating caches in JSDate.
-  // We increment the stamp each time when the timezone information changes.
-  // JSDate objects perform stamp check and invalidate their caches if
-  // their saved stamp is not equal to the current stamp.
-  Smi* stamp() { return stamp_; }
-  void* stamp_address() { return &stamp_; }
-
-  // These functions are virtual so that we can override them when testing.
-  virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
-    double time_ms = static_cast<double>(time_sec * 1000);
-    return static_cast<int>(OS::DaylightSavingsOffset(time_ms));
-  }
-
-  virtual int GetLocalOffsetFromOS() {
-    double offset = OS::LocalTimeOffset();
-    ASSERT(offset < kInvalidLocalOffsetInMs);
-    return static_cast<int>(offset);
-  }
-
- private:
-  // The implementation relies on the fact that no time zones have
-  // more than one daylight savings offset change per 19 days.
-  // In Egypt in 2010 they decided to suspend DST during Ramadan. This
-  // led to a short interval where DST is in effect from September 10 to
-  // September 30.
-  static const int kDefaultDSTDeltaInSec = 19 * kSecPerDay;
-
-  // Size of the Daylight Savings Time cache.
-  static const int kDSTSize = 32;
-
-  // Daylight Savings Time segment stores a segment of time where
-  // daylight savings offset does not change.
-  struct DST {
-    int start_sec;
-    int end_sec;
-    int offset_ms;
-    int last_used;
-  };
-
-  // Computes the daylight savings offset for the given time.
-  // ECMA 262 - 15.9.1.8
-  int DaylightSavingsOffsetInMs(int64_t time_ms);
-
-  // Sets the before_ and the after_ segments from the DST cache such that
-  // the before_ segment starts earlier than the given time and
-  // the after_ segment start later than the given time.
-  // Both segments might be invalid.
-  // The last_used counters of the before_ and after_ are updated.
-  void ProbeDST(int time_sec);
-
-  // Finds the least recently used segment from the DST cache that is not
-  // equal to the given 'skip' segment.
-  DST* LeastRecentlyUsedDST(DST* skip);
-
-  // Extends the after_ segment with the given point or resets it
-  // if it starts later than the given time + kDefaultDSTDeltaInSec.
-  inline void ExtendTheAfterSegment(int time_sec, int offset_ms);
-
-  // Makes the given segment invalid.
-  inline void ClearSegment(DST* segment);
-
-  bool InvalidSegment(DST* segment) {
-    return segment->start_sec > segment->end_sec;
-  }
-
-  Smi* stamp_;
-
-  // Daylight Saving Time cache.
-  DST dst_[kDSTSize];
-  int dst_usage_counter_;
-  DST* before_;
-  DST* after_;
-
-  int local_offset_ms_;
-
-  // Year/Month/Day cache.
-  bool ymd_valid_;
-  int ymd_days_;
-  int ymd_year_;
-  int ymd_month_;
-  int ymd_day_;
-};
-
-} }   // namespace v8::internal
-
-#endif
diff --git a/src/date.js b/src/date.js
index 75edf6d..ccefce5 100644
--- a/src/date.js
+++ b/src/date.js
@@ -28,22 +28,190 @@
 
 // This file relies on the fact that the following declarations have been made
 // in v8natives.js:
-// var $isFinite = GlobalIsFinite;
+// const $isFinite = GlobalIsFinite;
 
 // -------------------------------------------------------------------
 
 // This file contains date support implemented in JavaScript.
 
+
 // Keep reference to original values of some global properties.  This
 // has the added benefit that the code in this file is isolated from
 // changes to these properties.
-var $Date = global.Date;
+const $Date = global.Date;
 
 // Helper function to throw error.
 function ThrowDateTypeError() {
   throw new $TypeError('this is not a Date object.');
 }
 
+// ECMA 262 - 5.2
+function Modulo(value, remainder) {
+  var mod = value % remainder;
+  // Guard against returning -0.
+  if (mod == 0) return 0;
+  return mod >= 0 ? mod : mod + remainder;
+}
+
+
+function TimeWithinDay(time) {
+  return Modulo(time, msPerDay);
+}
+
+
+// ECMA 262 - 15.9.1.3
+function DaysInYear(year) {
+  if (year % 4 != 0) return 365;
+  if ((year % 100 == 0) && (year % 400 != 0)) return 365;
+  return 366;
+}
+
+
+function DayFromYear(year) {
+  return 365 * (year-1970)
+      + FLOOR((year-1969)/4)
+      - FLOOR((year-1901)/100)
+      + FLOOR((year-1601)/400);
+}
+
+
+function TimeFromYear(year) {
+  return msPerDay * DayFromYear(year);
+}
+
+
+function InLeapYear(time) {
+  return DaysInYear(YearFromTime(time)) - 365;  // Returns 1 or 0.
+}
+
+
+// ECMA 262 - 15.9.1.9
+function EquivalentYear(year) {
+  // Returns an equivalent year in the range [2008-2035] matching
+  // - leap year.
+  // - week day of first day.
+  var time = TimeFromYear(year);
+  var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
+      (WeekDay(time) * 12) % 28;
+  // Find the year in the range 2008..2037 that is equivalent mod 28.
+  // Add 3*28 to give a positive argument to the modulus operator.
+  return 2008 + (recent_year + 3*28 - 2008) % 28;
+}
+
+
+function EquivalentTime(t) {
+  // The issue here is that some library calls don't work right for dates
+  // that cannot be represented using a non-negative signed 32 bit integer
+  // (measured in whole seconds based on the 1970 epoch).
+  // We solve this by mapping the time to a year with same leap-year-ness
+  // and same starting day for the year.  The ECMAscript specification says
+  // we must do this, but for compatibility with other browsers, we use
+  // the actual year if it is in the range 1970..2037
+  if (t >= 0 && t <= 2.1e12) return t;
+
+  var day = MakeDay(EquivalentYear(YearFromTime(t)),
+                    MonthFromTime(t),
+                    DateFromTime(t));
+  return MakeDate(day, TimeWithinDay(t));
+}
+
+
+// local_time_offset is initialized when the DST_offset_cache is missed.
+// It must not be used until after a call to DaylightSavingsOffset().
+// In this way, only one check, for a DST cache miss, is needed.
+var local_time_offset;
+
+
+// Because computing the DST offset is an expensive operation,
+// we keep a cache of the last computed DST offset along with a time interval
+// where we know the cache is valid.
+// When the cache is valid, local_time_offset is also valid.
+var DST_offset_cache = {
+  // Cached DST offset.
+  offset: 0,
+  // Time interval where the cached offset is valid.
+  start: 0, end: -1,
+  // Size of next interval expansion.
+  increment: 0,
+  initial_increment: 19 * msPerDay
+};
+
+
+// NOTE: The implementation relies on the fact that no time zones have
+// more than one daylight savings offset change per 19 days.
+//
+// In Egypt in 2010 they decided to suspend DST during Ramadan. This
+// led to a short interval where DST is in effect from September 10 to
+// September 30.
+//
+// If this function is called with NaN it returns NaN.
+function DaylightSavingsOffset(t) {
+  // Load the cache object from the builtins object.
+  var cache = DST_offset_cache;
+
+  // Cache the start and the end in local variables for fast access.
+  var start = cache.start;
+  var end = cache.end;
+
+  if (start <= t) {
+    // If the time fits in the cached interval, return the cached offset.
+    if (t <= end) return cache.offset;
+
+    // If the cache misses, the local_time_offset may not be initialized.
+    if (IS_UNDEFINED(local_time_offset)) {
+      local_time_offset = %DateLocalTimeOffset();
+    }
+
+    // Compute a possible new interval end.
+    var new_end = end + cache.increment;
+
+    if (t <= new_end) {
+      var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
+      if (cache.offset == end_offset) {
+        // If the offset at the end of the new interval still matches
+        // the offset in the cache, we grow the cached time interval
+        // and return the offset.
+        cache.end = new_end;
+        cache.increment = cache.initial_increment;
+        return end_offset;
+      } else {
+        var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+        if (offset == end_offset) {
+          // The offset at the given time is equal to the offset at the
+          // new end of the interval, so that means that we've just skipped
+          // the point in time where the DST offset change occurred. Updated
+          // the interval to reflect this and reset the increment.
+          cache.start = t;
+          cache.end = new_end;
+          cache.increment = cache.initial_increment;
+        } else {
+          // The interval contains a DST offset change and the given time is
+          // before it. Adjust the increment to avoid a linear search for
+          // the offset change point and change the end of the interval.
+          cache.increment /= 3;
+          cache.end = t;
+        }
+        // Update the offset in the cache and return it.
+        cache.offset = offset;
+        return offset;
+      }
+    }
+  }
+
+  // If the cache misses, the local_time_offset may not be initialized.
+  if (IS_UNDEFINED(local_time_offset)) {
+    local_time_offset = %DateLocalTimeOffset();
+  }
+  // Compute the DST offset for the time and shrink the cache interval
+  // to only contain the time. This allows fast repeated DST offset
+  // computations for the same time.
+  var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+  cache.offset = offset;
+  cache.start = cache.end = t;
+  cache.increment = cache.initial_increment;
+  return offset;
+}
+
 
 var timezone_cache_time = $NaN;
 var timezone_cache_timezone;
@@ -53,18 +221,57 @@
   if (t == timezone_cache_time) {
     return timezone_cache_timezone;
   }
-  var timezone = %DateLocalTimezone(t);
+  var timezone = %DateLocalTimezone(EquivalentTime(t));
   timezone_cache_time = t;
   timezone_cache_timezone = timezone;
   return timezone;
 }
 
 
+function WeekDay(time) {
+  return Modulo(DAY(time) + 4, 7);
+}
+
+
+function LocalTime(time) {
+  if (NUMBER_IS_NAN(time)) return time;
+  // DaylightSavingsOffset called before local_time_offset used.
+  return time + DaylightSavingsOffset(time) + local_time_offset;
+}
+
+
+var ltcache = {
+  key: null,
+  val: null
+};
+
+function LocalTimeNoCheck(time) {
+  var ltc = ltcache;
+  if (%_ObjectEquals(time, ltc.key)) return ltc.val;
+
+  // Inline the DST offset cache checks for speed.
+  // The cache is hit, or DaylightSavingsOffset is called,
+  // before local_time_offset is used.
+  var cache = DST_offset_cache;
+  if (cache.start <= time && time <= cache.end) {
+    var dst_offset = cache.offset;
+  } else {
+    var dst_offset = DaylightSavingsOffset(time);
+  }
+  ltc.key = time;
+  return (ltc.val = time + local_time_offset + dst_offset);
+}
+
+
 function UTC(time) {
   if (NUMBER_IS_NAN(time)) return time;
   // local_time_offset is needed before the call to DaylightSavingsOffset,
   // so it may be uninitialized.
-  return %DateToUTC(time);
+  if (IS_UNDEFINED(local_time_offset)) {
+    local_time_offset = %DateLocalTimeOffset();
+  }
+  var tmp = time - local_time_offset;
+  return tmp - DaylightSavingsOffset(tmp);
 }
 
 
@@ -87,6 +294,48 @@
 }
 
 
+var ymd_from_time_cache = [$NaN, $NaN, $NaN];
+var ymd_from_time_cached_time = $NaN;
+
+function YearFromTime(t) {
+  if (t !== ymd_from_time_cached_time) {
+    if (!$isFinite(t)) {
+      return $NaN;
+    }
+
+    %DateYMDFromTime(t, ymd_from_time_cache);
+    ymd_from_time_cached_time = t
+  }
+
+  return ymd_from_time_cache[0];
+}
+
+function MonthFromTime(t) {
+  if (t !== ymd_from_time_cached_time) {
+    if (!$isFinite(t)) {
+      return $NaN;
+    }
+    %DateYMDFromTime(t, ymd_from_time_cache);
+    ymd_from_time_cached_time = t
+  }
+
+  return ymd_from_time_cache[1];
+}
+
+function DateFromTime(t) {
+  if (t !== ymd_from_time_cached_time) {
+    if (!$isFinite(t)) {
+      return $NaN;
+    }
+
+    %DateYMDFromTime(t, ymd_from_time_cache);
+    ymd_from_time_cached_time = t
+  }
+
+  return ymd_from_time_cache[2];
+}
+
+
 // Compute number of days given a year, month, date.
 // Note that month and date can lie outside the normal range.
 //   For example:
@@ -102,12 +351,13 @@
   date = TO_INTEGER_MAP_MINUS_ZERO(date);
 
   if (year < kMinYear || year > kMaxYear ||
-      month < kMinMonth || month > kMaxMonth) {
+      month < kMinMonth || month > kMaxMonth ||
+      date < kMinDate || date > kMaxDate) {
     return $NaN;
   }
 
-  // Now we rely on year and month being SMIs.
-  return %DateMakeDay(year, month) + date - 1;
+  // Now we rely on year, month and date being SMIs.
+  return %DateMakeDay(year, month, date);
 }
 
 
@@ -137,6 +387,9 @@
 var Date_cache = {
   // Cached time value.
   time: $NaN,
+  // Cached year when interpreting the time as a local time. Only
+  // valid when the time matches cached time.
+  year: $NaN,
   // String input for which the cached time is valid.
   string: null
 };
@@ -153,10 +406,11 @@
   var value;
   if (argc == 0) {
     value = %DateCurrentTime();
-    SET_UTC_DATE_VALUE(this, value);
+
   } else if (argc == 1) {
     if (IS_NUMBER(year)) {
-      value = year;
+      value = TimeClip(year);
+
     } else if (IS_STRING(year)) {
       // Probe the Date cache. If we already have a time value for the
       // given time, we re-use that instead of parsing the string again.
@@ -167,6 +421,7 @@
         value = DateParse(year);
         if (!NUMBER_IS_NAN(value)) {
           cache.time = value;
+          cache.year = YearFromTime(LocalTimeNoCheck(value));
           cache.string = year;
         }
       }
@@ -180,9 +435,9 @@
       // which is the default for everything else than Date objects.
       // This makes us behave like KJS and SpiderMonkey.
       var time = ToPrimitive(year, NUMBER_HINT);
-      value = IS_STRING(time) ? DateParse(time) : ToNumber(time);
+      value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time));
     }
-    SET_UTC_DATE_VALUE(this, value);
+
   } else {
     year = ToNumber(year);
     month = ToNumber(month);
@@ -191,14 +446,13 @@
     minutes = argc > 4 ? ToNumber(minutes) : 0;
     seconds = argc > 5 ? ToNumber(seconds) : 0;
     ms = argc > 6 ? ToNumber(ms) : 0;
-    year = (!NUMBER_IS_NAN(year) &&
-            0 <= TO_INTEGER(year) &&
-            TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
+    year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+        ? 1900 + TO_INTEGER(year) : year;
     var day = MakeDay(year, month, date);
     var time = MakeTime(hours, minutes, seconds, ms);
-    value = MakeDate(day, time);
-    SET_LOCAL_DATE_VALUE(this, value);
+    value = TimeClip(UTC(MakeDate(day, time)));
   }
+  %_SetValueOf(this, value);
 });
 
 
@@ -206,8 +460,7 @@
 
 
 var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
-var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
-              'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
+var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'];
 
 
 function TwoDigitString(value) {
@@ -215,46 +468,50 @@
 }
 
 
-function DateString(date) {
-  return WeekDays[LOCAL_WEEKDAY(date)] + ' '
-      + Months[LOCAL_MONTH(date)] + ' '
-      + TwoDigitString(LOCAL_DAY(date)) + ' '
-      + LOCAL_YEAR(date);
+function DateString(time) {
+  return WeekDays[WeekDay(time)] + ' '
+      + Months[MonthFromTime(time)] + ' '
+      + TwoDigitString(DateFromTime(time)) + ' '
+      + YearFromTime(time);
 }
 
 
-var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday',
-    'Thursday', 'Friday', 'Saturday'];
-var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June',
-    'July', 'August', 'September', 'October', 'November', 'December'];
+var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
+var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'];
 
 
-function LongDateString(date) {
-  return LongWeekDays[LOCAL_WEEKDAY(date)] + ', '
-      + LongMonths[LOCAL_MONTH(date)] + ' '
-      + TwoDigitString(LOCAL_DAY(date)) + ', '
-      + LOCAL_YEAR(date);
+function LongDateString(time) {
+  return LongWeekDays[WeekDay(time)] + ', '
+      + LongMonths[MonthFromTime(time)] + ' '
+      + TwoDigitString(DateFromTime(time)) + ', '
+      + YearFromTime(time);
 }
 
 
-function TimeString(date) {
-  return TwoDigitString(LOCAL_HOUR(date)) + ':'
-      + TwoDigitString(LOCAL_MIN(date)) + ':'
-      + TwoDigitString(LOCAL_SEC(date));
+function TimeString(time) {
+  return TwoDigitString(HOUR_FROM_TIME(time)) + ':'
+      + TwoDigitString(MIN_FROM_TIME(time)) + ':'
+      + TwoDigitString(SEC_FROM_TIME(time));
 }
 
 
-function TimeStringUTC(date) {
-  return TwoDigitString(UTC_HOUR(date)) + ':'
-      + TwoDigitString(UTC_MIN(date)) + ':'
-      + TwoDigitString(UTC_SEC(date));
-}
+function LocalTimezoneString(time) {
+  var old_timezone = timezone_cache_timezone;
+  var timezone = LocalTimezone(time);
+  if (old_timezone && timezone != old_timezone) {
+    // If the timezone string has changed from the one that we cached,
+    // the local time offset may now be wrong. So we need to update it
+    // and try again.
+    local_time_offset = %DateLocalTimeOffset();
+    // We also need to invalidate the DST cache as the new timezone may have
+    // different DST times.
+    var dst_cache = DST_offset_cache;
+    dst_cache.start = 0;
+    dst_cache.end = -1;
+  }
 
-
-function LocalTimezoneString(date) {
-  var timezone = LocalTimezone(UTC_DATE_VALUE(date));
-
-  var timezoneOffset = -TIMEZONE_OFFSET(date);
+  var timezoneOffset =
+      (DaylightSavingsOffset(time) + local_time_offset) / msPerMinute;
   var sign = (timezoneOffset >= 0) ? 1 : -1;
   var hours = FLOOR((sign * timezoneOffset)/60);
   var min   = FLOOR((sign * timezoneOffset)%60);
@@ -264,8 +521,8 @@
 }
 
 
-function DatePrintString(date) {
-  return DateString(date) + ' ' + TimeString(date);
+function DatePrintString(time) {
+  return DateString(time) + ' ' + TimeString(time);
 }
 
 // -------------------------------------------------------------------
@@ -300,12 +557,11 @@
   minutes = argc > 4 ? ToNumber(minutes) : 0;
   seconds = argc > 5 ? ToNumber(seconds) : 0;
   ms = argc > 6 ? ToNumber(ms) : 0;
-  year = (!NUMBER_IS_NAN(year) &&
-          0 <= TO_INTEGER(year) &&
-          TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year;
+  year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
+      ? 1900 + TO_INTEGER(year) : year;
   var day = MakeDay(year, month, date);
   var time = MakeTime(hours, minutes, seconds, ms);
-  return TimeClip(MakeDate(day, time));
+  return %_SetValueOf(this, TimeClip(MakeDate(day, time)));
 }
 
 
@@ -318,30 +574,27 @@
 
 // ECMA 262 - 15.9.5.2
 function DateToString() {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this)
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  var time_zone_string = LocalTimezoneString(this)
-  return DatePrintString(this) + time_zone_string;
+  var time_zone_string = LocalTimezoneString(t);  // May update local offset.
+  return DatePrintString(LocalTimeNoCheck(t)) + time_zone_string;
 }
 
 
 // ECMA 262 - 15.9.5.3
 function DateToDateString() {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  return DateString(this);
+  return DateString(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.4
 function DateToTimeString() {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  var time_zone_string = LocalTimezoneString(this);
-  return TimeString(this) + time_zone_string;
+  var time_zone_string = LocalTimezoneString(t);  // May update local offset.
+  return TimeString(LocalTimeNoCheck(t)) + time_zone_string;
 }
 
 
@@ -353,388 +606,357 @@
 
 // ECMA 262 - 15.9.5.6
 function DateToLocaleDateString() {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  return LongDateString(this);
+  return LongDateString(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.7
 function DateToLocaleTimeString() {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
-  return TimeString(this);
+  var lt = LocalTimeNoCheck(t);
+  return TimeString(lt);
 }
 
 
 // ECMA 262 - 15.9.5.8
 function DateValueOf() {
-  CHECK_DATE(this);
-  return UTC_DATE_VALUE(this);
+  return DATE_VALUE(this);
 }
 
 
 // ECMA 262 - 15.9.5.9
 function DateGetTime() {
-  CHECK_DATE(this);
-  return UTC_DATE_VALUE(this);
+  return DATE_VALUE(this);
 }
 
 
 // ECMA 262 - 15.9.5.10
 function DateGetFullYear() {
-  CHECK_DATE(this);
-  return LOCAL_YEAR(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  var cache = Date_cache;
+  if (cache.time === t) return cache.year;
+  return YearFromTime(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.11
 function DateGetUTCFullYear() {
-  CHECK_DATE(this);
-  return UTC_YEAR(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return YearFromTime(t);
 }
 
 
 // ECMA 262 - 15.9.5.12
 function DateGetMonth() {
-  CHECK_DATE(this);
-  return LOCAL_MONTH(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MonthFromTime(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.13
 function DateGetUTCMonth() {
-  CHECK_DATE(this);
-  return UTC_MONTH(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MonthFromTime(t);
 }
 
 
 // ECMA 262 - 15.9.5.14
 function DateGetDate() {
-  CHECK_DATE(this);
-  return LOCAL_DAY(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return DateFromTime(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.15
 function DateGetUTCDate() {
-  CHECK_DATE(this);
-  return UTC_DAY(this);
+  var t = DATE_VALUE(this);
+  return NAN_OR_DATE_FROM_TIME(t);
 }
 
 
 // ECMA 262 - 15.9.5.16
 function DateGetDay() {
-  CHECK_DATE(this);
-  return LOCAL_WEEKDAY(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return WeekDay(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.17
 function DateGetUTCDay() {
-  CHECK_DATE(this);
-  return UTC_WEEKDAY(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return WeekDay(t);
 }
 
 
 // ECMA 262 - 15.9.5.18
 function DateGetHours() {
-  CHECK_DATE(this);
-  return LOCAL_HOUR(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return HOUR_FROM_TIME(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.19
 function DateGetUTCHours() {
-  CHECK_DATE(this);
-  return UTC_HOUR(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return HOUR_FROM_TIME(t);
 }
 
 
 // ECMA 262 - 15.9.5.20
 function DateGetMinutes() {
-  CHECK_DATE(this);
-  return LOCAL_MIN(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MIN_FROM_TIME(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.21
 function DateGetUTCMinutes() {
-  CHECK_DATE(this);
-  return UTC_MIN(this);
+  var t = DATE_VALUE(this);
+  return NAN_OR_MIN_FROM_TIME(t);
 }
 
 
 // ECMA 262 - 15.9.5.22
 function DateGetSeconds() {
-  CHECK_DATE(this);
-  return LOCAL_SEC(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return SEC_FROM_TIME(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.23
 function DateGetUTCSeconds() {
-  CHECK_DATE(this);
-  return UTC_SEC(this)
+  var t = DATE_VALUE(this);
+  return NAN_OR_SEC_FROM_TIME(t);
 }
 
 
 // ECMA 262 - 15.9.5.24
 function DateGetMilliseconds() {
-  CHECK_DATE(this);
-  return LOCAL_MS(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return MS_FROM_TIME(LocalTimeNoCheck(t));
 }
 
 
 // ECMA 262 - 15.9.5.25
 function DateGetUTCMilliseconds() {
-  CHECK_DATE(this);
-  return UTC_MS(this);
+  var t = DATE_VALUE(this);
+  return NAN_OR_MS_FROM_TIME(t);
 }
 
 
 // ECMA 262 - 15.9.5.26
 function DateGetTimezoneOffset() {
-  CHECK_DATE(this);
-  return TIMEZONE_OFFSET(this);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return t;
+  return (t - LocalTimeNoCheck(t)) / msPerMinute;
 }
 
 
 // ECMA 262 - 15.9.5.27
 function DateSetTime(ms) {
-  CHECK_DATE(this);
-  SET_UTC_DATE_VALUE(this, ToNumber(ms));
-  return UTC_DATE_VALUE(this);
+  if (!IS_DATE(this)) ThrowDateTypeError();
+  return %_SetValueOf(this, TimeClip(ToNumber(ms)));
 }
 
 
 // ECMA 262 - 15.9.5.28
 function DateSetMilliseconds(ms) {
-  CHECK_DATE(this);
-  var t = LOCAL_DATE_VALUE(this);
+  var t = LocalTime(DATE_VALUE(this));
   ms = ToNumber(ms);
-  var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), LOCAL_SEC(this), ms);
-  SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
-  return this;
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
 }
 
 
 // ECMA 262 - 15.9.5.29
 function DateSetUTCMilliseconds(ms) {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   ms = ToNumber(ms);
-  var time = MakeTime(UTC_HOUR(this),
-                      UTC_MIN(this),
-                      UTC_SEC(this),
-                      ms);
-  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
 }
 
 
 // ECMA 262 - 15.9.5.30
 function DateSetSeconds(sec, ms) {
-  CHECK_DATE(this);
-  var t = LOCAL_DATE_VALUE(this);
+  var t = LocalTime(DATE_VALUE(this));
   sec = ToNumber(sec);
-  ms = %_ArgumentsLength() < 2 ? LOCAL_MS(this) : ToNumber(ms);
-  var time = MakeTime(LOCAL_HOUR(this), LOCAL_MIN(this), sec, ms);
-  return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
+  ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
 }
 
 
 // ECMA 262 - 15.9.5.31
 function DateSetUTCSeconds(sec, ms) {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   sec = ToNumber(sec);
-  ms = %_ArgumentsLength() < 2 ? UTC_MS(this) : ToNumber(ms);
-  var time = MakeTime(UTC_HOUR(this), UTC_MIN(this), sec, ms);
-  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
+  ms = %_ArgumentsLength() < 2 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
 }
 
 
 // ECMA 262 - 15.9.5.33
 function DateSetMinutes(min, sec, ms) {
-  CHECK_DATE(this);
-  var t = LOCAL_DATE_VALUE(this);
+  var t = LocalTime(DATE_VALUE(this));
   min = ToNumber(min);
   var argc = %_ArgumentsLength();
-  sec = argc < 2 ? LOCAL_SEC(this) : ToNumber(sec);
-  ms = argc < 3 ? LOCAL_MS(this) : ToNumber(ms);
-  var time = MakeTime(LOCAL_HOUR(this), min, sec, ms);
-  return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
+  sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
+  ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
 }
 
 
 // ECMA 262 - 15.9.5.34
 function DateSetUTCMinutes(min, sec, ms) {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   min = ToNumber(min);
   var argc = %_ArgumentsLength();
-  sec = argc < 2 ? UTC_SEC(this) : ToNumber(sec);
-  ms = argc < 3 ? UTC_MS(this) : ToNumber(ms);
-  var time = MakeTime(UTC_HOUR(this), min, sec, ms);
-  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
+  sec = argc < 2 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
+  ms = argc < 3 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
+  var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
 }
 
 
 // ECMA 262 - 15.9.5.35
 function DateSetHours(hour, min, sec, ms) {
-  CHECK_DATE(this);
-  var t = LOCAL_DATE_VALUE(this);
+  var t = LocalTime(DATE_VALUE(this));
   hour = ToNumber(hour);
   var argc = %_ArgumentsLength();
-  min = argc < 2 ? LOCAL_MIN(this) : ToNumber(min);
-  sec = argc < 3 ? LOCAL_SEC(this) : ToNumber(sec);
-  ms = argc < 4 ? LOCAL_MS(this) : ToNumber(ms);
+  min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
+  sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
+  ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
   var time = MakeTime(hour, min, sec, ms);
-  return SET_LOCAL_DATE_VALUE(this, MakeDate(LOCAL_DAYS(this), time));
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
 }
 
 
 // ECMA 262 - 15.9.5.34
 function DateSetUTCHours(hour, min, sec, ms) {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   hour = ToNumber(hour);
   var argc = %_ArgumentsLength();
-  min = argc < 2 ? UTC_MIN(this) : ToNumber(min);
-  sec = argc < 3 ? UTC_SEC(this) : ToNumber(sec);
-  ms = argc < 4 ? UTC_MS(this) : ToNumber(ms);
+  min = argc < 2 ? NAN_OR_MIN_FROM_TIME(t) : ToNumber(min);
+  sec = argc < 3 ? NAN_OR_SEC_FROM_TIME(t) : ToNumber(sec);
+  ms = argc < 4 ? NAN_OR_MS_FROM_TIME(t) : ToNumber(ms);
   var time = MakeTime(hour, min, sec, ms);
-  return SET_UTC_DATE_VALUE(this, MakeDate(UTC_DAYS(this), time));
+  return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
 }
 
 
 // ECMA 262 - 15.9.5.36
 function DateSetDate(date) {
-  CHECK_DATE(this);
-  var t = LOCAL_DATE_VALUE(this);
+  var t = LocalTime(DATE_VALUE(this));
   date = ToNumber(date);
-  var day = MakeDay(LOCAL_YEAR(this), LOCAL_MONTH(this), date);
-  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
+  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
 
 
 // ECMA 262 - 15.9.5.37
 function DateSetUTCDate(date) {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   date = ToNumber(date);
-  var day = MakeDay(UTC_YEAR(this), UTC_MONTH(this), date);
-  return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
+  var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
 }
 
 
 // ECMA 262 - 15.9.5.38
 function DateSetMonth(month, date) {
-  CHECK_DATE(this);
-  var t = LOCAL_DATE_VALUE(this);
+  var t = LocalTime(DATE_VALUE(this));
   month = ToNumber(month);
-  date = %_ArgumentsLength() < 2 ? LOCAL_DAY(this) : ToNumber(date);
-  var day = MakeDay(LOCAL_YEAR(this), month, date);
-  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, LOCAL_TIME_IN_DAY(this)));
+  date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
+  var day = MakeDay(YearFromTime(t), month, date);
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
 
 
 // ECMA 262 - 15.9.5.39
 function DateSetUTCMonth(month, date) {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   month = ToNumber(month);
-  date = %_ArgumentsLength() < 2 ? UTC_DAY(this) : ToNumber(date);
-  var day = MakeDay(UTC_YEAR(this), month, date);
-  return SET_UTC_DATE_VALUE(this, MakeDate(day, UTC_TIME_IN_DAY(this)));
+  date = %_ArgumentsLength() < 2 ? NAN_OR_DATE_FROM_TIME(t) : ToNumber(date);
+  var day = MakeDay(YearFromTime(t), month, date);
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
 }
 
 
 // ECMA 262 - 15.9.5.40
 function DateSetFullYear(year, month, date) {
-  CHECK_DATE(this);
-  var t = LOCAL_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
+  t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
   year = ToNumber(year);
   var argc = %_ArgumentsLength();
-  var time ;
-  if (NUMBER_IS_NAN(t)) {
-    month = argc < 2 ? 0 : ToNumber(month);
-    date = argc < 3 ? 1 : ToNumber(date);
-    time = 0;
-  } else {
-    month = argc < 2 ? LOCAL_MONTH(this) : ToNumber(month);
-    date = argc < 3 ? LOCAL_DAY(this) : ToNumber(date);
-    time = LOCAL_TIME_IN_DAY(this);
-  }
+  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
   var day = MakeDay(year, month, date);
-  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
 
 
 // ECMA 262 - 15.9.5.41
 function DateSetUTCFullYear(year, month, date) {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
-  year = ToNumber(year);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) t = 0;
   var argc = %_ArgumentsLength();
-  var time ;
-  if (NUMBER_IS_NAN(t)) {
-    month = argc < 2 ? 0 : ToNumber(month);
-    date = argc < 3 ? 1 : ToNumber(date);
-    time = 0;
-  } else {
-    month = argc < 2 ? UTC_MONTH(this) : ToNumber(month);
-    date = argc < 3 ? UTC_DAY(this) : ToNumber(date);
-    time = UTC_TIME_IN_DAY(this);
-  }
+  year = ToNumber(year);
+  month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
+  date = argc < 3 ? DateFromTime(t) : ToNumber(date);
   var day = MakeDay(year, month, date);
-  return SET_UTC_DATE_VALUE(this, MakeDate(day, time));
+  return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
 }
 
 
 // ECMA 262 - 15.9.5.42
 function DateToUTCString() {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
+  var t = DATE_VALUE(this);
   if (NUMBER_IS_NAN(t)) return kInvalidDate;
   // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
-  return WeekDays[UTC_WEEKDAY(this)] + ', '
-      + TwoDigitString(UTC_DAY(this)) + ' '
-      + Months[UTC_MONTH(this)] + ' '
-      + UTC_YEAR(this) + ' '
-      + TimeStringUTC(this) + ' GMT';
+  return WeekDays[WeekDay(t)] + ', '
+      + TwoDigitString(DateFromTime(t)) + ' '
+      + Months[MonthFromTime(t)] + ' '
+      + YearFromTime(t) + ' '
+      + TimeString(t) + ' GMT';
 }
 
 
 // ECMA 262 - B.2.4
 function DateGetYear() {
-  CHECK_DATE(this);
-  return LOCAL_YEAR(this) - 1900;
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return $NaN;
+  return YearFromTime(LocalTimeNoCheck(t)) - 1900;
 }
 
 
 // ECMA 262 - B.2.5
 function DateSetYear(year) {
-  CHECK_DATE(this);
+  var t = LocalTime(DATE_VALUE(this));
+  if (NUMBER_IS_NAN(t)) t = 0;
   year = ToNumber(year);
-  if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN);
+  if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
   year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
       ? 1900 + TO_INTEGER(year) : year;
-  var t = LOCAL_DATE_VALUE(this);
-  var month, date, time;
-  if (NUMBER_IS_NAN(t))  {
-    month = 0;
-    date = 1;
-    time = 0;
-  } else {
-    month = LOCAL_MONTH(this);
-    date = LOCAL_DAY(this);
-    time = LOCAL_TIME_IN_DAY(this);
-  }
-  var day = MakeDay(year, month, date);
-  return SET_LOCAL_DATE_VALUE(this, MakeDate(day, time));
+  var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+  return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
 }
 
 
@@ -756,11 +978,9 @@
 }
 
 
-// ECMA 262 - 15.9.5.43
 function DateToISOString() {
-  CHECK_DATE(this);
-  var t = UTC_DATE_VALUE(this);
-  if (NUMBER_IS_NAN(t)) throw MakeRangeError("invalid_time_value", []);
+  var t = DATE_VALUE(this);
+  if (NUMBER_IS_NAN(t)) return kInvalidDate;
   var year = this.getUTCFullYear();
   var year_string;
   if (year >= 0 && year <= 9999) {
@@ -794,13 +1014,34 @@
 
 
 function ResetDateCache() {
+
+  // Reset the local_time_offset:
+  local_time_offset = %DateLocalTimeOffset();
+
+  // Reset the DST offset cache:
+  var cache = DST_offset_cache;
+  cache.offset = 0;
+  cache.start = 0;
+  cache.end = -1;
+  cache.increment = 0;
+  cache.initial_increment = 19 * msPerDay;
+
   // Reset the timezone cache:
   timezone_cache_time = $NaN;
   timezone_cache_timezone = undefined;
 
+  // Reset the ltcache:
+  ltcache.key = null;
+  ltcache.val = null;
+
+  // Reset the ymd_from_time_cache:
+  ymd_from_time_cache = [$NaN, $NaN, $NaN];
+  ymd_from_time_cached_time = $NaN;
+
   // Reset the date cache:
   cache = Date_cache;
   cache.time = $NaN;
+  cache.year = $NaN;
   cache.string = null;
 }
 
@@ -821,7 +1062,7 @@
 
   // Set up non-enumerable functions of the Date prototype object and
   // set their names.
-  InstallFunctions($Date.prototype, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
     "toString", DateToString,
     "toDateString", DateToDateString,
     "toTimeString", DateToTimeString,
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 511663d..591d0b3 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -229,6 +229,8 @@
 
 
 const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
+const int DebuggerAgentUtil::kContentLengthSize =
+    StrLength(kContentLength);
 
 
 SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
@@ -372,11 +374,8 @@
 
   // Calculate the message size in UTF-8 encoding.
   int utf8_len = 0;
-  int previous = unibrow::Utf16::kNoPreviousCharacter;
   for (int i = 0; i < message.length(); i++) {
-    uint16_t character = message[i];
-    utf8_len += unibrow::Utf8::Length(character, previous);
-    previous = character;
+    utf8_len += unibrow::Utf8::Length(message[i]);
   }
 
   // Send the header.
@@ -391,33 +390,17 @@
 
   // Send message body as UTF-8.
   int buffer_position = 0;  // Current buffer position.
-  previous = unibrow::Utf16::kNoPreviousCharacter;
   for (int i = 0; i < message.length(); i++) {
     // Write next UTF-8 encoded character to buffer.
-    uint16_t character = message[i];
     buffer_position +=
-        unibrow::Utf8::Encode(buffer + buffer_position, character, previous);
+        unibrow::Utf8::Encode(buffer + buffer_position, message[i]);
     ASSERT(buffer_position < kBufferSize);
 
     // Send buffer if full or last character is encoded.
-    if (kBufferSize - buffer_position <
-          unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit ||
-        i == message.length() - 1) {
-      if (unibrow::Utf16::IsLeadSurrogate(character)) {
-        const int kEncodedSurrogateLength =
-            unibrow::Utf16::kUtf8BytesToCodeASurrogate;
-        ASSERT(buffer_position >= kEncodedSurrogateLength);
-        conn->Send(buffer, buffer_position - kEncodedSurrogateLength);
-        for (int i = 0; i < kEncodedSurrogateLength; i++) {
-          buffer[i] = buffer[buffer_position + i];
-        }
-        buffer_position = kEncodedSurrogateLength;
-      } else {
-        conn->Send(buffer, buffer_position);
-        buffer_position = 0;
-      }
+    if (kBufferSize - buffer_position < 3 || i == message.length() - 1) {
+      conn->Send(buffer, buffer_position);
+      buffer_position = 0;
     }
-    previous = character;
   }
 
   return true;
diff --git a/src/debug-agent.h b/src/debug-agent.h
index 6115190..a07fb0f 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -115,6 +115,7 @@
 class DebuggerAgentUtil {
  public:
   static const char* const kContentLength;
+  static const int kContentLengthSize;
 
   static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
   static bool SendConnectMessage(const Socket* conn,
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 802f622..d254ee5 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -26,14 +26,14 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Default number of frames to include in the response to backtrace request.
-var kDefaultBacktraceLength = 10;
+const kDefaultBacktraceLength = 10;
 
-var Debug = {};
+const Debug = {};
 
 // Regular expression to skip "crud" at the beginning of a source line which is
 // not really code. Currently the regular expression matches whitespace and
 // comments.
-var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
+const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
 
 // Debug events which can occour in the V8 JavaScript engine. These originate
 // from the API include file debug.h.
@@ -286,7 +286,7 @@
   copy.condition_ = this.condition_;
   copy.ignoreCount_ = this.ignoreCount_;
   return copy;
-};
+}
 
 
 ScriptBreakPoint.prototype.number = function() {
@@ -335,13 +335,13 @@
     locations.push(this.break_points_[i].actual_location);
   }
   return locations;
-};
+}
 
 
 ScriptBreakPoint.prototype.update_positions = function(line, column) {
   this.line_ = line;
   this.column_ = column;
-};
+}
 
 
 ScriptBreakPoint.prototype.hit_count = function() {
@@ -477,11 +477,9 @@
 // break points set in this script.
 function UpdateScriptBreakPoints(script) {
   for (var i = 0; i < script_break_points.length; i++) {
-    var break_point = script_break_points[i];
-    if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName ||
-         break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) &&
-        break_point.matchesScript(script)) {
-      break_point.set(script);
+    if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
+        script_break_points[i].matchesScript(script)) {
+      script_break_points[i].set(script);
     }
   }
 }
@@ -587,7 +585,7 @@
   var script = %FunctionGetScript(func);
   var script_offset = %FunctionGetScriptSourcePosition(func);
   return script.locationFromLine(opt_line, opt_column, script_offset);
-};
+}
 
 
 // Returns the character position in a script based on a line number and an
@@ -595,7 +593,7 @@
 Debug.findScriptSourcePosition = function(script, opt_line, opt_column) {
   var location = script.locationFromLine(opt_line, opt_column);
   return location ? location.position : null;
-};
+}
 
 
 Debug.findBreakPoint = function(break_point_number, remove) {
@@ -629,7 +627,7 @@
     }
   }
   return [];
-};
+}
 
 Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) {
   if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.');
@@ -679,9 +677,8 @@
 {
   break_point = MakeBreakPoint(position);
   break_point.setCondition(condition);
-  if (!enabled) {
+  if (!enabled)
     break_point.disable();
-  }
   var scripts = this.scripts();
   for (var i = 0; i < scripts.length; i++) {
     if (script_id == scripts[i].id) {
@@ -774,7 +771,7 @@
     }
   }
   return script_break_point;
-};
+}
 
 
 // Sets a breakpoint in a script identified through id or name at the
@@ -802,7 +799,7 @@
   }
 
   return script_break_point.number();
-};
+}
 
 
 Debug.setScriptBreakPointById = function(script_id,
@@ -811,7 +808,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
                                   script_id, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-};
+}
 
 
 Debug.setScriptBreakPointByName = function(script_name,
@@ -820,7 +817,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName,
                                   script_name, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-};
+}
 
 
 Debug.setScriptBreakPointByRegExp = function(script_regexp,
@@ -829,7 +826,7 @@
   return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptRegExp,
                                   script_regexp, opt_line, opt_column,
                                   opt_condition, opt_groupId);
-};
+}
 
 
 Debug.enableScriptBreakPoint = function(break_point_number) {
@@ -844,15 +841,13 @@
 };
 
 
-Debug.changeScriptBreakPointCondition = function(
-    break_point_number, condition) {
+Debug.changeScriptBreakPointCondition = function(break_point_number, condition) {
   var script_break_point = this.findScriptBreakPoint(break_point_number, false);
   script_break_point.setCondition(condition);
 };
 
 
-Debug.changeScriptBreakPointIgnoreCount = function(
-    break_point_number, ignoreCount) {
+Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
   if (ignoreCount < 0) {
     throw new Error('Invalid argument');
   }
@@ -863,12 +858,12 @@
 
 Debug.scriptBreakPoints = function() {
   return script_break_points;
-};
+}
 
 
 Debug.clearStepping = function() {
   %ClearStepping();
-};
+}
 
 Debug.setBreakOnException = function() {
   return %ChangeBreakOnException(Debug.ExceptionBreak.Caught, true);
@@ -945,7 +940,7 @@
   var count = opt_count ? %ToNumber(opt_count) : 1;
 
   return %PrepareStep(this.break_id, action, count);
-};
+}
 
 ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
     opt_additional_context) {
@@ -965,9 +960,8 @@
 ExecutionState.prototype.frame = function(opt_index) {
   // If no index supplied return the selected frame.
   if (opt_index == null) opt_index = this.selected_frame;
-  if (opt_index < 0 || opt_index >= this.frameCount()) {
+  if (opt_index < 0 || opt_index >= this.frameCount())
     throw new Error('Illegal frame index.');
-  }
   return new FrameMirror(this.break_id, opt_index);
 };
 
@@ -1094,12 +1088,12 @@
 
 ExceptionEvent.prototype.exception = function() {
   return this.exception_;
-};
+}
 
 
 ExceptionEvent.prototype.uncaught = function() {
   return this.uncaught_;
-};
+}
 
 
 ExceptionEvent.prototype.func = function() {
@@ -1191,7 +1185,7 @@
   o.body.script = this.script_;
 
   return o.toJSONProtocol();
-};
+}
 
 
 function MakeNewFunctionEvent(func) {
@@ -1247,7 +1241,7 @@
   o.body = {};
   o.body.script = { id: this.id() };
   return o.toJSONProtocol();
-};
+}
 
 
 function MakeScriptObject_(script, include_source) {
@@ -1264,18 +1258,18 @@
     o.source = script.source();
   }
   return o;
-}
+};
 
 
 function DebugCommandProcessor(exec_state, opt_is_running) {
   this.exec_state_ = exec_state;
   this.running_ = opt_is_running || false;
-}
+};
 
 
 DebugCommandProcessor.prototype.processDebugRequest = function (request) {
   return this.processDebugJSONRequest(request);
-};
+}
 
 
 function ProtocolMessage(request) {
@@ -1303,13 +1297,13 @@
     this.options_ = {};
   }
   this.options_[name] = value;
-};
+}
 
 
 ProtocolMessage.prototype.failed = function(message) {
   this.success = false;
   this.message = message;
-};
+}
 
 
 ProtocolMessage.prototype.toJSONProtocol = function() {
@@ -1357,7 +1351,7 @@
   }
   json.running = this.running;
   return JSON.stringify(json);
-};
+}
 
 
 DebugCommandProcessor.prototype.createResponse = function(request) {
@@ -1365,8 +1359,7 @@
 };
 
 
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(
-    json_request) {
+DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) {
   var request;  // Current request.
   var response;  // Generated response.
   try {
@@ -1548,7 +1541,7 @@
       }
     }
 
-    // Set up the VM for stepping.
+    // Setup the VM for stepping.
     this.exec_state_.prepareStep(action, count);
   }
 
@@ -1653,7 +1646,7 @@
 
   // Add the break point number to the response.
   response.body = { type: type,
-                    breakpoint: break_point_number };
+                    breakpoint: break_point_number }
 
   // Add break point information to the response.
   if (break_point instanceof ScriptBreakPoint) {
@@ -1667,8 +1660,7 @@
       response.body.type = 'scriptRegExp';
       response.body.script_regexp = break_point.script_regexp_object().source;
     } else {
-      throw new Error("Internal error: Unexpected breakpoint type: " +
-                      break_point.type());
+      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
     }
     response.body.line = break_point.line();
     response.body.column = break_point.column();
@@ -1680,8 +1672,7 @@
 };
 
 
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
-    request, response) {
+DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1718,11 +1709,10 @@
   if (!IS_UNDEFINED(ignoreCount)) {
     Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
   }
-};
+}
 
 
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(
-    request, response) {
+DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1753,11 +1743,10 @@
 
   // Add the cleared break point numbers to the response.
   response.body = { breakpoints: cleared_break_points };
-};
+}
 
 
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
-    request, response) {
+DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) {
   // Check for legal request.
   if (!request.arguments) {
     response.failed('Missing arguments');
@@ -1777,12 +1766,11 @@
   Debug.clearBreakPoint(break_point);
 
   // Add the cleared break point number to the response.
-  response.body = { breakpoint: break_point };
-};
+  response.body = { breakpoint: break_point }
+}
 
 
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
-    request, response) {
+DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
   var array = [];
   for (var i = 0; i < script_break_points.length; i++) {
     var break_point = script_break_points[i];
@@ -1797,7 +1785,7 @@
       condition: break_point.condition(),
       ignoreCount: break_point.ignoreCount(),
       actual_locations: break_point.actual_locations()
-    };
+    }
 
     if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
       description.type = 'scriptId';
@@ -1809,8 +1797,7 @@
       description.type = 'scriptRegExp';
       description.script_regexp = break_point.script_regexp_object().source;
     } else {
-      throw new Error("Internal error: Unexpected breakpoint type: " +
-                      break_point.type());
+      throw new Error("Internal error: Unexpected breakpoint type: " + break_point.type());
     }
     array.push(description);
   }
@@ -1819,15 +1806,15 @@
     breakpoints: array,
     breakOnExceptions: Debug.isBreakOnException(),
     breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
-  };
-};
+  }
+}
 
 
 DebugCommandProcessor.prototype.disconnectRequest_ =
     function(request, response) {
   Debug.disableAllBreakPoints();
   this.continueRequest_(request, response);
-};
+}
 
 
 DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
@@ -1872,11 +1859,10 @@
 
   // Add the cleared break point number to the response.
   response.body = { 'type': type, 'enabled': enabled };
-};
+}
 
 
-DebugCommandProcessor.prototype.backtraceRequest_ = function(
-    request, response) {
+DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
   // Get the number of frames.
   var total_frames = this.exec_state_.frameCount();
 
@@ -1884,12 +1870,12 @@
   if (total_frames == 0) {
     response.body = {
       totalFrames: total_frames
-    };
+    }
     return;
   }
 
   // Default frame range to include in backtrace.
-  var from_index = 0;
+  var from_index = 0
   var to_index = kDefaultBacktraceLength;
 
   // Get the range from the arguments.
@@ -1902,7 +1888,7 @@
     }
     if (request.arguments.bottom) {
       var tmp_index = total_frames - from_index;
-      from_index = total_frames - to_index;
+      from_index = total_frames - to_index
       to_index = tmp_index;
     }
     if (from_index < 0 || to_index < 0) {
@@ -1928,7 +1914,7 @@
     toFrame: to_index,
     totalFrames: total_frames,
     frames: frames
-  };
+  }
 };
 
 
@@ -1952,8 +1938,8 @@
 
 
 DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
-  // Get the frame for which the scope or scopes are requested.
-  // With no frameNumber argument use the currently selected frame.
+  // Get the frame for which the scope or scopes are requested. With no frameNumber
+  // argument use the currently selected frame.
   if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
     frame_index = request.arguments.frameNumber;
     if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
@@ -1963,7 +1949,7 @@
   } else {
     return this.exec_state_.frame();
   }
-};
+}
 
 
 DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
@@ -1986,7 +1972,7 @@
     toScope: total_scopes,
     totalScopes: total_scopes,
     scopes: scopes
-  };
+  }
 };
 
 
@@ -2231,8 +2217,7 @@
     if (!IS_UNDEFINED(request.arguments.types)) {
       types = %ToNumber(request.arguments.types);
       if (isNaN(types) || types < 0) {
-        return response.failed('Invalid types "' +
-                               request.arguments.types + '"');
+        return response.failed('Invalid types "' + request.arguments.types + '"');
       }
     }
 
@@ -2301,7 +2286,7 @@
     var details = %GetThreadDetails(this.exec_state_.break_id, i);
     var thread_info = { current: details[0],
                         id: details[1]
-                      };
+                      }
     threads.push(thread_info);
   }
 
@@ -2309,7 +2294,7 @@
   response.body = {
     totalThreads: total_threads,
     threads: threads
-  };
+  }
 };
 
 
@@ -2321,7 +2306,7 @@
 DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
   response.body = {
     V8Version: %GetV8Version()
-  };
+  }
 };
 
 
@@ -2337,8 +2322,7 @@
 };
 
 
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(
-    request, response) {
+DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) {
   if (!Debug.LiveEdit) {
     return response.failed('LiveEdit feature is not supported');
   }
@@ -2409,7 +2393,7 @@
       response.body.flags.push({ name: name, value: value });
     }
   }
-};
+}
 
 
 DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
@@ -2515,7 +2499,7 @@
 // running.
 DebugCommandProcessor.prototype.isRunning = function() {
   return this.running_;
-};
+}
 
 
 DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
@@ -2531,7 +2515,7 @@
     n = n >>> 4;
   }
   return r;
-}
+};
 
 
 /**
@@ -2607,7 +2591,7 @@
     case 'string':
     case 'number':
       json = value;
-      break;
+      break
 
     default:
       json = null;
diff --git a/src/debug.cc b/src/debug.cc
index f8a1ecf..20cd802 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,11 +37,9 @@
 #include "debug.h"
 #include "deoptimizer.h"
 #include "execution.h"
-#include "full-codegen.h"
 #include "global-handles.h"
 #include "ic.h"
 #include "ic-inl.h"
-#include "isolate-inl.h"
 #include "list.h"
 #include "messages.h"
 #include "natives.h"
@@ -86,9 +84,21 @@
 }
 
 
+static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
+  Isolate* isolate = Isolate::Current();
+  CALL_HEAP_FUNCTION(
+      isolate,
+      isolate->stub_cache()->ComputeCallDebugBreak(argc, kind),
+      Code);
+}
+
+
 static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
   Isolate* isolate = Isolate::Current();
-  return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
+  CALL_HEAP_FUNCTION(
+      isolate,
+      isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind),
+      Code);
 }
 
 
@@ -391,15 +401,15 @@
   // Step in can only be prepared if currently positioned on an IC call,
   // construct call or CallFunction stub call.
   Address target = rinfo()->target_address();
-  Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
-  if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
+  Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+  if (code->is_call_stub() || code->is_keyed_call_stub()) {
     // Step in through IC call is handled by the runtime system. Therefore make
     // sure that the any current IC is cleared and the runtime system is
     // called. If the executing code has a debug break at the location change
     // the call in the original code as it is the code there that will be
     // executed in place of the debug break call.
-    Handle<Code> stub = ComputeCallDebugPrepareStepIn(
-        target_code->arguments_count(), target_code->kind());
+    Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
+                                                      code->kind());
     if (IsDebugBreak()) {
       original_rinfo()->set_target_address(stub->entry());
     } else {
@@ -409,7 +419,7 @@
 #ifdef DEBUG
     // All the following stuff is needed only for assertion checks so the code
     // is wrapped in ifdef.
-    Handle<Code> maybe_call_function_stub = target_code;
+    Handle<Code> maybe_call_function_stub = code;
     if (IsDebugBreak()) {
       Address original_target = original_rinfo()->target_address();
       maybe_call_function_stub =
@@ -426,9 +436,8 @@
     // Step in through CallFunction stub should also be prepared by caller of
     // this function (Debug::PrepareStep) which should flood target function
     // with breakpoints.
-    ASSERT(RelocInfo::IsConstructCall(rmode()) ||
-           target_code->is_inline_cache_stub() ||
-           is_call_function_stub);
+    ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
+           || is_call_function_stub);
 #endif
   }
 }
@@ -465,11 +474,11 @@
   RelocInfo::Mode mode = rmode();
   if (RelocInfo::IsCodeTarget(mode)) {
     Address target = rinfo()->target_address();
-    Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+    Handle<Code> code(Code::GetCodeFromTargetAddress(target));
 
     // Patch the code to invoke the builtin debug break function matching the
     // calling convention used by the call site.
-    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
+    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
     rinfo()->set_target_address(dbgbrk_code->entry());
   }
 }
@@ -677,7 +686,7 @@
 }
 
 
-void Debug::SetUp(bool create_heap_objects) {
+void Debug::Setup(bool create_heap_objects) {
   ThreadInit();
   if (create_heap_objects) {
     // Get code to handle debug break on return.
@@ -763,26 +772,19 @@
 
   // Execute the shared function in the debugger context.
   Handle<Context> context = isolate->global_context();
-  bool caught_exception;
+  bool caught_exception = false;
   Handle<JSFunction> function =
       factory->NewFunctionFromSharedFunctionInfo(function_info, context);
 
-  Handle<Object> exception =
-      Execution::TryCall(function, Handle<Object>(context->global()),
-                         0, NULL, &caught_exception);
+  Execution::TryCall(function, Handle<Object>(context->global()),
+                     0, NULL, &caught_exception);
 
   // Check for caught exceptions.
   if (caught_exception) {
-    ASSERT(!isolate->has_pending_exception());
-    MessageLocation computed_location;
-    isolate->ComputeLocation(&computed_location);
     Handle<Object> message = MessageHandler::MakeMessageObject(
-        "error_loading_debugger", &computed_location,
-        Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
-    ASSERT(!isolate->has_pending_exception());
-    isolate->set_pending_exception(*exception);
+        "error_loading_debugger", NULL, Vector<Handle<Object> >::empty(),
+        Handle<String>(), Handle<JSArray>());
     MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
-    isolate->clear_pending_exception();
     return false;
   }
 
@@ -820,9 +822,6 @@
           v8::Handle<ObjectTemplate>(),
           NULL);
 
-  // Fail if no context could be created.
-  if (context.is_null()) return false;
-
   // Use the debugger context.
   SaveContext save(isolate_);
   isolate_->set_context(*context);
@@ -832,8 +831,8 @@
   Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
   RETURN_IF_EMPTY_HANDLE_VALUE(
       isolate_,
-      JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
-                              NONE, kNonStrictMode),
+      SetProperty(global, key, Handle<Object>(global->builtins()),
+                  NONE, kNonStrictMode),
       false);
 
   // Compile the JavaScript for the debugger in the debugger context.
@@ -1104,13 +1103,14 @@
   Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
 
   // Call HandleBreakPointx.
-  bool caught_exception;
-  Handle<Object> argv[] = { break_id, break_point_object };
+  bool caught_exception = false;
+  const int argc = 2;
+  Object** argv[argc] = {
+    break_id.location(),
+    reinterpret_cast<Object**>(break_point_object.location())
+  };
   Handle<Object> result = Execution::TryCall(check_break_point,
-                                             isolate_->js_builtins_object(),
-                                             ARRAY_SIZE(argv),
-                                             argv,
-                                             &caught_exception);
+      isolate_->js_builtins_object(), argc, argv, &caught_exception);
 
   // If exception or non boolean result handle as not triggered
   if (caught_exception || !result->IsBoolean()) {
@@ -1151,7 +1151,7 @@
 
   Handle<DebugInfo> debug_info = GetDebugInfo(shared);
   // Source positions starts with zero.
-  ASSERT(*source_position >= 0);
+  ASSERT(source_position >= 0);
 
   // Find the break point and change it.
   BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
@@ -1218,7 +1218,7 @@
 
 void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
   PrepareForBreakPoints();
-  // Make sure the function has set up the debug info.
+  // Make sure the function has setup the debug info.
   if (!EnsureDebugInfo(shared)) {
     // Return if we failed to retrieve the debug info.
     return;
@@ -1233,18 +1233,6 @@
 }
 
 
-void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
-  Handle<FixedArray> new_bindings(function->function_bindings());
-  Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
-
-  if (!bindee.is_null() && bindee->IsJSFunction() &&
-      !JSFunction::cast(*bindee)->IsBuiltin()) {
-    Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
-    Debug::FloodWithOneShot(shared_info);
-  }
-}
-
-
 void Debug::FloodHandlerWithOneShot() {
   // Iterate through the JavaScript stack looking for handlers.
   StackFrame::Id id = break_frame_id();
@@ -1464,10 +1452,8 @@
           expressions_count - 2 - call_function_arg_count);
       if (fun->IsJSFunction()) {
         Handle<JSFunction> js_function(JSFunction::cast(fun));
-        if (js_function->shared()->bound()) {
-          Debug::FloodBoundFunctionWithOneShot(js_function);
-        } else if (!js_function->IsBuiltin()) {
-          // Don't step into builtins.
+        // Don't step into builtins.
+        if (!js_function->IsBuiltin()) {
           // It will also compile target function if it's not compiled yet.
           FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
         }
@@ -1557,47 +1543,40 @@
 
 // Find the builtin to use for invoking the debug break
 Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
-  Isolate* isolate = Isolate::Current();
-
   // Find the builtin debug break function matching the calling convention
   // used by the call site.
   if (code->is_inline_cache_stub()) {
     switch (code->kind()) {
       case Code::CALL_IC:
       case Code::KEYED_CALL_IC:
-        return isolate->stub_cache()->ComputeCallDebugBreak(
-            code->arguments_count(), code->kind());
+        return ComputeCallDebugBreak(code->arguments_count(), code->kind());
 
       case Code::LOAD_IC:
-        return isolate->builtins()->LoadIC_DebugBreak();
+        return Isolate::Current()->builtins()->LoadIC_DebugBreak();
 
       case Code::STORE_IC:
-        return isolate->builtins()->StoreIC_DebugBreak();
+        return Isolate::Current()->builtins()->StoreIC_DebugBreak();
 
       case Code::KEYED_LOAD_IC:
-        return isolate->builtins()->KeyedLoadIC_DebugBreak();
+        return Isolate::Current()->builtins()->KeyedLoadIC_DebugBreak();
 
       case Code::KEYED_STORE_IC:
-        return isolate->builtins()->KeyedStoreIC_DebugBreak();
+        return Isolate::Current()->builtins()->KeyedStoreIC_DebugBreak();
 
       default:
         UNREACHABLE();
     }
   }
   if (RelocInfo::IsConstructCall(mode)) {
-    if (code->has_function_cache()) {
-      return isolate->builtins()->CallConstructStub_Recording_DebugBreak();
-    } else {
-      return isolate->builtins()->CallConstructStub_DebugBreak();
-    }
+    Handle<Code> result =
+        Isolate::Current()->builtins()->ConstructCall_DebugBreak();
+    return result;
   }
   if (code->kind() == Code::STUB) {
     ASSERT(code->major_key() == CodeStub::CallFunction);
-    if (code->has_function_cache()) {
-      return isolate->builtins()->CallFunctionStub_Recording_DebugBreak();
-    } else {
-      return isolate->builtins()->CallFunctionStub_DebugBreak();
-    }
+    Handle<Code> result =
+        Isolate::Current()->builtins()->StubNoRegisters_DebugBreak();
+    return result;
   }
 
   UNREACHABLE();
@@ -1663,11 +1642,8 @@
   // Flood the function with one-shot break points if it is called from where
   // step into was requested.
   if (fp == step_in_fp()) {
-    if (function->shared()->bound()) {
-      // Handle Function.prototype.bind
-      Debug::FloodBoundFunctionWithOneShot(function);
-    } else if (!function->IsBuiltin()) {
-      // Don't allow step into functions in the native context.
+    // Don't allow step into functions in the native context.
+    if (!function->IsBuiltin()) {
       if (function->shared()->code() ==
           Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
           function->shared()->code() ==
@@ -1750,289 +1726,11 @@
 }
 
 
-// Helper function to compile full code for debugging. This code will
-// have debug break slots and deoptimization
-// information. Deoptimization information is required in case that an
-// optimized version of this function is still activated on the
-// stack. It will also make sure that the full code is compiled with
-// the same flags as the previous version - that is flags which can
-// change the code generated. The current method of mapping from
-// already compiled full code without debug break slots to full code
-// with debug break slots depends on the generated code is otherwise
-// exactly the same.
-static bool CompileFullCodeForDebugging(Handle<SharedFunctionInfo> shared,
-                                        Handle<Code> current_code) {
-  ASSERT(!current_code->has_debug_break_slots());
-
-  CompilationInfo info(shared);
-  info.MarkCompilingForDebugging(current_code);
-  ASSERT(!info.shared_info()->is_compiled());
-  ASSERT(!info.isolate()->has_pending_exception());
-
-  // Use compile lazy which will end up compiling the full code in the
-  // configuration configured above.
-  bool result = Compiler::CompileLazy(&info);
-  ASSERT(result != Isolate::Current()->has_pending_exception());
-  info.isolate()->clear_pending_exception();
-#if DEBUG
-  if (result) {
-    Handle<Code> new_code(shared->code());
-    ASSERT(new_code->has_debug_break_slots());
-    ASSERT(current_code->is_compiled_optimizable() ==
-           new_code->is_compiled_optimizable());
-  }
-#endif
-  return result;
-}
-
-
-static void CollectActiveFunctionsFromThread(
-    Isolate* isolate,
-    ThreadLocalTop* top,
-    List<Handle<JSFunction> >* active_functions,
-    Object* active_code_marker) {
-  // Find all non-optimized code functions with activation frames
-  // on the stack. This includes functions which have optimized
-  // activations (including inlined functions) on the stack as the
-  // non-optimized code is needed for the lazy deoptimization.
-  for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
-    JavaScriptFrame* frame = it.frame();
-    if (frame->is_optimized()) {
-      List<JSFunction*> functions(Compiler::kMaxInliningLevels + 1);
-      frame->GetFunctions(&functions);
-      for (int i = 0; i < functions.length(); i++) {
-        JSFunction* function = functions[i];
-        active_functions->Add(Handle<JSFunction>(function));
-        function->shared()->code()->set_gc_metadata(active_code_marker);
-      }
-    } else if (frame->function()->IsJSFunction()) {
-      JSFunction* function = JSFunction::cast(frame->function());
-      ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
-      active_functions->Add(Handle<JSFunction>(function));
-      function->shared()->code()->set_gc_metadata(active_code_marker);
-    }
-  }
-}
-
-
-static void RedirectActivationsToRecompiledCodeOnThread(
-    Isolate* isolate,
-    ThreadLocalTop* top) {
-  for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
-    JavaScriptFrame* frame = it.frame();
-
-    if (frame->is_optimized() || !frame->function()->IsJSFunction()) continue;
-
-    JSFunction* function = JSFunction::cast(frame->function());
-
-    ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
-
-    Handle<Code> frame_code(frame->LookupCode());
-    if (frame_code->has_debug_break_slots()) continue;
-
-    Handle<Code> new_code(function->shared()->code());
-    if (new_code->kind() != Code::FUNCTION ||
-        !new_code->has_debug_break_slots()) {
-      continue;
-    }
-
-    intptr_t delta = frame->pc() - frame_code->instruction_start();
-    int debug_break_slot_count = 0;
-    int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
-    for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
-      // Check if the pc in the new code with debug break
-      // slots is before this slot.
-      RelocInfo* info = it.rinfo();
-      int debug_break_slot_bytes =
-          debug_break_slot_count * Assembler::kDebugBreakSlotLength;
-      intptr_t new_delta =
-          info->pc() -
-          new_code->instruction_start() -
-          debug_break_slot_bytes;
-      if (new_delta > delta) {
-        break;
-      }
-
-      // Passed a debug break slot in the full code with debug
-      // break slots.
-      debug_break_slot_count++;
-    }
-    if (frame_code->has_self_optimization_header() &&
-        !new_code->has_self_optimization_header()) {
-      delta -= FullCodeGenerator::self_optimization_header_size();
-    } else {
-      ASSERT(frame_code->has_self_optimization_header() ==
-             new_code->has_self_optimization_header());
-    }
-    int debug_break_slot_bytes =
-        debug_break_slot_count * Assembler::kDebugBreakSlotLength;
-    if (FLAG_trace_deopt) {
-      PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
-             "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
-             "for debugging, "
-             "changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
-             reinterpret_cast<intptr_t>(
-                 frame_code->instruction_start()),
-             reinterpret_cast<intptr_t>(
-                 frame_code->instruction_start()) +
-             frame_code->instruction_size(),
-             frame_code->instruction_size(),
-             reinterpret_cast<intptr_t>(new_code->instruction_start()),
-             reinterpret_cast<intptr_t>(new_code->instruction_start()) +
-             new_code->instruction_size(),
-             new_code->instruction_size(),
-             reinterpret_cast<intptr_t>(frame->pc()),
-             reinterpret_cast<intptr_t>(new_code->instruction_start()) +
-             delta + debug_break_slot_bytes);
-    }
-
-    // Patch the return address to return into the code with
-    // debug break slots.
-    frame->set_pc(
-        new_code->instruction_start() + delta + debug_break_slot_bytes);
-  }
-}
-
-
-class ActiveFunctionsCollector : public ThreadVisitor {
- public:
-  explicit ActiveFunctionsCollector(List<Handle<JSFunction> >* active_functions,
-                                    Object* active_code_marker)
-      : active_functions_(active_functions),
-        active_code_marker_(active_code_marker) { }
-
-  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
-    CollectActiveFunctionsFromThread(isolate,
-                                     top,
-                                     active_functions_,
-                                     active_code_marker_);
-  }
-
- private:
-  List<Handle<JSFunction> >* active_functions_;
-  Object* active_code_marker_;
-};
-
-
-class ActiveFunctionsRedirector : public ThreadVisitor {
- public:
-  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
-    RedirectActivationsToRecompiledCodeOnThread(isolate, top);
-  }
-};
-
-
 void Debug::PrepareForBreakPoints() {
   // If preparing for the first break point make sure to deoptimize all
   // functions as debugging does not work with optimized code.
   if (!has_break_points_) {
     Deoptimizer::DeoptimizeAll();
-
-    Handle<Code> lazy_compile =
-        Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
-
-    // Keep the list of activated functions in a handlified list as it
-    // is used both in GC and non-GC code.
-    List<Handle<JSFunction> > active_functions(100);
-
-    {
-      // We are going to iterate heap to find all functions without
-      // debug break slots.
-      isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                                          "preparing for breakpoints");
-
-      // Ensure no GC in this scope as we are going to use gc_metadata
-      // field in the Code object to mark active functions.
-      AssertNoAllocation no_allocation;
-
-      Object* active_code_marker = isolate_->heap()->the_hole_value();
-
-      CollectActiveFunctionsFromThread(isolate_,
-                                       isolate_->thread_local_top(),
-                                       &active_functions,
-                                       active_code_marker);
-      ActiveFunctionsCollector active_functions_collector(&active_functions,
-                                                          active_code_marker);
-      isolate_->thread_manager()->IterateArchivedThreads(
-          &active_functions_collector);
-
-      // Scan the heap for all non-optimized functions which have no
-      // debug break slots and are not active or inlined into an active
-      // function and mark them for lazy compilation.
-      HeapIterator iterator;
-      HeapObject* obj = NULL;
-      while (((obj = iterator.next()) != NULL)) {
-        if (obj->IsJSFunction()) {
-          JSFunction* function = JSFunction::cast(obj);
-          SharedFunctionInfo* shared = function->shared();
-          if (shared->allows_lazy_compilation() &&
-              shared->script()->IsScript() &&
-              function->code()->kind() == Code::FUNCTION &&
-              !function->code()->has_debug_break_slots() &&
-              shared->code()->gc_metadata() != active_code_marker) {
-            function->set_code(*lazy_compile);
-            function->shared()->set_code(*lazy_compile);
-          }
-        }
-      }
-
-      // Clear gc_metadata field.
-      for (int i = 0; i < active_functions.length(); i++) {
-        Handle<JSFunction> function = active_functions[i];
-        function->shared()->code()->set_gc_metadata(Smi::FromInt(0));
-      }
-    }
-
-    // Now recompile all functions with activation frames and and
-    // patch the return address to run in the new compiled code.
-    for (int i = 0; i < active_functions.length(); i++) {
-      Handle<JSFunction> function = active_functions[i];
-
-      if (function->code()->kind() == Code::FUNCTION &&
-          function->code()->has_debug_break_slots()) {
-        // Nothing to do. Function code already had debug break slots.
-        continue;
-      }
-
-      Handle<SharedFunctionInfo> shared(function->shared());
-      // If recompilation is not possible just skip it.
-      if (shared->is_toplevel() ||
-          !shared->allows_lazy_compilation() ||
-          shared->code()->kind() == Code::BUILTIN) {
-        continue;
-      }
-
-      // Make sure that the shared full code is compiled with debug
-      // break slots.
-      if (!shared->code()->has_debug_break_slots()) {
-        // Try to compile the full code with debug break slots. If it
-        // fails just keep the current code.
-        Handle<Code> current_code(function->shared()->code());
-        ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
-        shared->set_code(*lazy_compile);
-        bool prev_force_debugger_active =
-            isolate_->debugger()->force_debugger_active();
-        isolate_->debugger()->set_force_debugger_active(true);
-        ASSERT(current_code->kind() == Code::FUNCTION);
-        CompileFullCodeForDebugging(shared, current_code);
-        isolate_->debugger()->set_force_debugger_active(
-            prev_force_debugger_active);
-        if (!shared->is_compiled()) {
-          shared->set_code(*current_code);
-          continue;
-        }
-      }
-
-      // Keep function code in sync with shared function info.
-      function->set_code(shared->code());
-    }
-
-    RedirectActivationsToRecompiledCodeOnThread(isolate_,
-                                                isolate_->thread_local_top());
-
-    ActiveFunctionsRedirector active_functions_redirector;
-    isolate_->thread_manager()->IterateArchivedThreads(
-          &active_functions_redirector);
   }
 }
 
@@ -2046,9 +1744,7 @@
   }
 
   // Ensure shared in compiled. Return false if this failed.
-  if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
-    return false;
-  }
+  if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
 
   // Create the debug info object.
   Handle<DebugInfo> debug_info = FACTORY->NewDebugInfo(shared);
@@ -2263,11 +1959,9 @@
 
   // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
   // rid of all the cached script wrappers and the second gets rid of the
-  // scripts which are no longer referenced.  The second also sweeps precisely,
-  // which saves us doing yet another GC to make the heap iterable.
-  heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache");
-  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                          "Debug::CreateScriptCache");
+  // scripts which are no longer referenced.
+  heap->CollectAllGarbage(false);
+  heap->CollectAllGarbage(false);
 
   ASSERT(script_cache_ == NULL);
   script_cache_ = new ScriptCache();
@@ -2275,8 +1969,6 @@
   // Scan heap for Script objects.
   int count = 0;
   HeapIterator iterator;
-  AssertNoAllocation no_allocation;
-
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
       script_cache_->Add(Handle<Script>(Script::cast(obj)));
@@ -2317,8 +2009,7 @@
 
   // Perform GC to get unreferenced scripts evicted from the cache before
   // returning the content.
-  isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
-                                      "Debug::GetLoadedScripts");
+  isolate_->heap()->CollectAllGarbage(false);
 
   // Get the scripts from the cache.
   return script_cache_->GetScripts();
@@ -2340,7 +2031,6 @@
       compiling_natives_(false),
       is_loading_debugger_(false),
       never_unload_debugger_(false),
-      force_debugger_active_(false),
       message_handler_(NULL),
       debugger_unload_pending_(false),
       host_dispatch_handler_(NULL),
@@ -2365,8 +2055,7 @@
 
 
 Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
-                                      int argc,
-                                      Handle<Object> argv[],
+                                      int argc, Object*** argv,
                                       bool* caught_exception) {
   ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
 
@@ -2383,9 +2072,7 @@
   Handle<Object> js_object = Execution::TryCall(
       Handle<JSFunction>::cast(constructor),
       Handle<JSObject>(isolate_->debug()->debug_context()->global()),
-      argc,
-      argv,
-      caught_exception);
+      argc, argv, caught_exception);
   return js_object;
 }
 
@@ -2394,11 +2081,10 @@
   // Create the execution state object.
   Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
       isolate_->debug()->break_id());
-  Handle<Object> argv[] = { break_id };
+  const int argc = 1;
+  Object** argv[argc] = { break_id.location() };
   return MakeJSObject(CStrVector("MakeExecutionState"),
-                      ARRAY_SIZE(argv),
-                      argv,
-                      caught_exception);
+                      argc, argv, caught_exception);
 }
 
 
@@ -2406,9 +2092,11 @@
                                         Handle<Object> break_points_hit,
                                         bool* caught_exception) {
   // Create the new break event object.
-  Handle<Object> argv[] = { exec_state, break_points_hit };
+  const int argc = 2;
+  Object** argv[argc] = { exec_state.location(),
+                          break_points_hit.location() };
   return MakeJSObject(CStrVector("MakeBreakEvent"),
-                      ARRAY_SIZE(argv),
+                      argc,
                       argv,
                       caught_exception);
 }
@@ -2420,24 +2108,23 @@
                                             bool* caught_exception) {
   Factory* factory = isolate_->factory();
   // Create the new exception event object.
-  Handle<Object> argv[] = { exec_state,
-                            exception,
-                            factory->ToBoolean(uncaught) };
+  const int argc = 3;
+  Object** argv[argc] = { exec_state.location(),
+                          exception.location(),
+                          uncaught ? factory->true_value().location() :
+                                     factory->false_value().location()};
   return MakeJSObject(CStrVector("MakeExceptionEvent"),
-                      ARRAY_SIZE(argv),
-                      argv,
-                      caught_exception);
+                      argc, argv, caught_exception);
 }
 
 
 Handle<Object> Debugger::MakeNewFunctionEvent(Handle<Object> function,
                                               bool* caught_exception) {
   // Create the new function event object.
-  Handle<Object> argv[] = { function };
+  const int argc = 1;
+  Object** argv[argc] = { function.location() };
   return MakeJSObject(CStrVector("MakeNewFunctionEvent"),
-                      ARRAY_SIZE(argv),
-                      argv,
-                      caught_exception);
+                      argc, argv, caught_exception);
 }
 
 
@@ -2448,11 +2135,14 @@
   // Create the compile event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
   Handle<Object> script_wrapper = GetScriptWrapper(script);
-  Handle<Object> argv[] = { exec_state,
-                            script_wrapper,
-                            factory->ToBoolean(before) };
+  const int argc = 3;
+  Object** argv[argc] = { exec_state.location(),
+                          script_wrapper.location(),
+                          before ? factory->true_value().location() :
+                                   factory->false_value().location() };
+
   return MakeJSObject(CStrVector("MakeCompileEvent"),
-                      ARRAY_SIZE(argv),
+                      argc,
                       argv,
                       caught_exception);
 }
@@ -2463,10 +2153,11 @@
   // Create the script collected event object.
   Handle<Object> exec_state = MakeExecutionState(caught_exception);
   Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
-  Handle<Object> argv[] = { exec_state, id_object };
+  const int argc = 2;
+  Object** argv[argc] = { exec_state.location(), id_object.location() };
 
   return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
-                      ARRAY_SIZE(argv),
+                      argc,
                       argv,
                       caught_exception);
 }
@@ -2616,13 +2307,12 @@
   Handle<JSValue> wrapper = GetScriptWrapper(script);
 
   // Call UpdateScriptBreakPoints expect no exceptions.
-  bool caught_exception;
-  Handle<Object> argv[] = { wrapper };
+  bool caught_exception = false;
+  const int argc = 1;
+  Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
   Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
-                     Isolate::Current()->js_builtins_object(),
-                     ARRAY_SIZE(argv),
-                     argv,
-                     &caught_exception);
+      Isolate::Current()->js_builtins_object(), argc, argv,
+      &caught_exception);
   if (caught_exception) {
     return;
   }
@@ -2735,8 +2425,7 @@
                                   v8::Debug::ClientData* client_data) {
   Handle<Foreign> callback_obj(Handle<Foreign>::cast(event_listener_));
   v8::Debug::EventCallback2 callback =
-      FUNCTION_CAST<v8::Debug::EventCallback2>(
-          callback_obj->foreign_address());
+      FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->address());
   EventDetailsImpl event_details(
       event,
       Handle<JSObject>::cast(exec_state),
@@ -2754,16 +2443,13 @@
   Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
 
   // Invoke the JavaScript debug event listener.
-  Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
-                            exec_state,
-                            event_data,
-                            event_listener_data_ };
-  bool caught_exception;
-  Execution::TryCall(fun,
-                     isolate_->global(),
-                     ARRAY_SIZE(argv),
-                     argv,
-                     &caught_exception);
+  const int argc = 4;
+  Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
+                          exec_state.location(),
+                          Handle<Object>::cast(event_data).location(),
+                          event_listener_data_.location() };
+  bool caught_exception = false;
+  Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
   // Silently ignore exceptions from debug event listeners.
 }
 
@@ -2954,7 +2640,7 @@
     command.Dispose();
 
     // Return from debug event processing if either the VM is put into the
-    // running state (through a continue command) or auto continue is active
+    // runnning state (through a continue command) or auto continue is active
     // and there are no more commands queued.
     if (running && !HasCommands()) {
       return;
@@ -3109,9 +2795,7 @@
 bool Debugger::IsDebuggerActive() {
   ScopedLock with(debugger_access_);
 
-  return message_handler_ != NULL ||
-      !event_listener_.is_null() ||
-      force_debugger_active_;
+  return message_handler_ != NULL || !event_listener_.is_null();
 }
 
 
@@ -3134,11 +2818,12 @@
     return isolate_->factory()->undefined_value();
   }
 
-  Handle<Object> argv[] = { exec_state, data };
+  static const int kArgc = 2;
+  Object** argv[kArgc] = { exec_state.location(), data.location() };
   Handle<Object> result = Execution::Call(
       fun,
       Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
-      ARRAY_SIZE(argv),
+      kArgc,
       argv,
       pending_exception);
   return result;
@@ -3164,7 +2849,7 @@
     v8::Debug::DebugBreak();
   }
 
-  if (Socket::SetUp()) {
+  if (Socket::Setup()) {
     if (agent_ == NULL) {
       agent_ = new DebuggerAgent(name, port);
       agent_->Start();
@@ -3206,94 +2891,6 @@
 }
 
 
-EnterDebugger::EnterDebugger()
-    : isolate_(Isolate::Current()),
-      prev_(isolate_->debug()->debugger_entry()),
-      it_(isolate_),
-      has_js_frames_(!it_.done()),
-      save_(isolate_) {
-  Debug* debug = isolate_->debug();
-  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
-  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
-  // Link recursive debugger entry.
-  debug->set_debugger_entry(this);
-
-  // Store the previous break id and frame id.
-  break_id_ = debug->break_id();
-  break_frame_id_ = debug->break_frame_id();
-
-  // Create the new break info. If there is no JavaScript frames there is no
-  // break frame id.
-  if (has_js_frames_) {
-    debug->NewBreak(it_.frame()->id());
-  } else {
-    debug->NewBreak(StackFrame::NO_ID);
-  }
-
-  // Make sure that debugger is loaded and enter the debugger context.
-  load_failed_ = !debug->Load();
-  if (!load_failed_) {
-    // NOTE the member variable save which saves the previous context before
-    // this change.
-    isolate_->set_context(*debug->debug_context());
-  }
-}
-
-
-EnterDebugger::~EnterDebugger() {
-  ASSERT(Isolate::Current() == isolate_);
-  Debug* debug = isolate_->debug();
-
-  // Restore to the previous break state.
-  debug->SetBreak(break_frame_id_, break_id_);
-
-  // Check for leaving the debugger.
-  if (!load_failed_ && prev_ == NULL) {
-    // Clear mirror cache when leaving the debugger. Skip this if there is a
-    // pending exception as clearing the mirror cache calls back into
-    // JavaScript. This can happen if the v8::Debug::Call is used in which
-    // case the exception should end up in the calling code.
-    if (!isolate_->has_pending_exception()) {
-      // Try to avoid any pending debug break breaking in the clear mirror
-      // cache JavaScript code.
-      if (isolate_->stack_guard()->IsDebugBreak()) {
-        debug->set_interrupts_pending(DEBUGBREAK);
-        isolate_->stack_guard()->Continue(DEBUGBREAK);
-      }
-      debug->ClearMirrorCache();
-    }
-
-    // Request preemption and debug break when leaving the last debugger entry
-    // if any of these where recorded while debugging.
-    if (debug->is_interrupt_pending(PREEMPT)) {
-      // This re-scheduling of preemption is to avoid starvation in some
-      // debugging scenarios.
-      debug->clear_interrupt_pending(PREEMPT);
-      isolate_->stack_guard()->Preempt();
-    }
-    if (debug->is_interrupt_pending(DEBUGBREAK)) {
-      debug->clear_interrupt_pending(DEBUGBREAK);
-      isolate_->stack_guard()->DebugBreak();
-    }
-
-    // If there are commands in the queue when leaving the debugger request
-    // that these commands are processed.
-    if (isolate_->debugger()->HasCommands()) {
-      isolate_->stack_guard()->DebugCommand();
-    }
-
-    // If leaving the debugger with the debugger no longer active unload it.
-    if (!isolate_->debugger()->IsDebuggerActive()) {
-      isolate_->debugger()->UnloadDebugger();
-    }
-  }
-
-  // Leaving this debugger entry.
-  debug->set_debugger_entry(prev_);
-}
-
-
 MessageImpl MessageImpl::NewEvent(DebugEvent event,
                                   bool running,
                                   Handle<JSObject> exec_state,
diff --git a/src/debug.h b/src/debug.h
index 474b90b..a5083eb 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -224,7 +224,7 @@
 // DebugInfo.
 class Debug {
  public:
-  void SetUp(bool create_heap_objects);
+  void Setup(bool create_heap_objects);
   bool Load();
   void Unload();
   bool IsLoaded() { return !debug_context_.is_null(); }
@@ -239,7 +239,6 @@
   void ClearBreakPoint(Handle<Object> break_point_object);
   void ClearAllBreakPoints();
   void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
-  void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
   void FloodHandlerWithOneShot();
   void ChangeBreakOnException(ExceptionBreakType type, bool enable);
   bool IsBreakOnException(ExceptionBreakType type);
@@ -403,11 +402,9 @@
   static void GenerateStoreICDebugBreak(MacroAssembler* masm);
   static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
   static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
+  static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
   static void GenerateReturnDebugBreak(MacroAssembler* masm);
-  static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
-  static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
-  static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm);
-  static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm);
+  static void GenerateStubNoRegistersDebugBreak(MacroAssembler* masm);
   static void GenerateSlotDebugBreak(MacroAssembler* masm);
   static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
 
@@ -710,8 +707,7 @@
   void DebugRequest(const uint16_t* json_request, int length);
 
   Handle<Object> MakeJSObject(Vector<const char> constructor_name,
-                              int argc,
-                              Handle<Object> argv[],
+                              int argc, Object*** argv,
                               bool* caught_exception);
   Handle<Object> MakeExecutionState(bool* caught_exception);
   Handle<Object> MakeBreakEvent(Handle<Object> exec_state,
@@ -815,15 +811,11 @@
   }
 
   void set_compiling_natives(bool compiling_natives) {
-    compiling_natives_ = compiling_natives;
+    Debugger::compiling_natives_ = compiling_natives;
   }
   bool compiling_natives() const { return compiling_natives_; }
   void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
   bool is_loading_debugger() const { return is_loading_debugger_; }
-  void set_force_debugger_active(bool force_debugger_active) {
-    force_debugger_active_ = force_debugger_active;
-  }
-  bool force_debugger_active() const { return force_debugger_active_; }
 
   bool IsDebuggerActive();
 
@@ -849,7 +841,6 @@
   bool compiling_natives_;  // Are we compiling natives?
   bool is_loading_debugger_;  // Are we loading the debugger?
   bool never_unload_debugger_;  // Can we unload the debugger?
-  bool force_debugger_active_;  // Activate debugger without event listeners.
   v8::Debug::MessageHandler2 message_handler_;
   bool debugger_unload_pending_;  // Was message handler cleared?
   v8::Debug::HostDispatchHandler host_dispatch_handler_;
@@ -880,8 +871,91 @@
 // some reason could not be entered FailedToEnter will return true.
 class EnterDebugger BASE_EMBEDDED {
  public:
-  EnterDebugger();
-  ~EnterDebugger();
+  EnterDebugger()
+      : isolate_(Isolate::Current()),
+        prev_(isolate_->debug()->debugger_entry()),
+        it_(isolate_),
+        has_js_frames_(!it_.done()),
+        save_(isolate_) {
+    Debug* debug = isolate_->debug();
+    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
+
+    // Link recursive debugger entry.
+    debug->set_debugger_entry(this);
+
+    // Store the previous break id and frame id.
+    break_id_ = debug->break_id();
+    break_frame_id_ = debug->break_frame_id();
+
+    // Create the new break info. If there is no JavaScript frames there is no
+    // break frame id.
+    if (has_js_frames_) {
+      debug->NewBreak(it_.frame()->id());
+    } else {
+      debug->NewBreak(StackFrame::NO_ID);
+    }
+
+    // Make sure that debugger is loaded and enter the debugger context.
+    load_failed_ = !debug->Load();
+    if (!load_failed_) {
+      // NOTE the member variable save which saves the previous context before
+      // this change.
+      isolate_->set_context(*debug->debug_context());
+    }
+  }
+
+  ~EnterDebugger() {
+    ASSERT(Isolate::Current() == isolate_);
+    Debug* debug = isolate_->debug();
+
+    // Restore to the previous break state.
+    debug->SetBreak(break_frame_id_, break_id_);
+
+    // Check for leaving the debugger.
+    if (prev_ == NULL) {
+      // Clear mirror cache when leaving the debugger. Skip this if there is a
+      // pending exception as clearing the mirror cache calls back into
+      // JavaScript. This can happen if the v8::Debug::Call is used in which
+      // case the exception should end up in the calling code.
+      if (!isolate_->has_pending_exception()) {
+        // Try to avoid any pending debug break breaking in the clear mirror
+        // cache JavaScript code.
+        if (isolate_->stack_guard()->IsDebugBreak()) {
+          debug->set_interrupts_pending(DEBUGBREAK);
+          isolate_->stack_guard()->Continue(DEBUGBREAK);
+        }
+        debug->ClearMirrorCache();
+      }
+
+      // Request preemption and debug break when leaving the last debugger entry
+      // if any of these where recorded while debugging.
+      if (debug->is_interrupt_pending(PREEMPT)) {
+        // This re-scheduling of preemption is to avoid starvation in some
+        // debugging scenarios.
+        debug->clear_interrupt_pending(PREEMPT);
+        isolate_->stack_guard()->Preempt();
+      }
+      if (debug->is_interrupt_pending(DEBUGBREAK)) {
+        debug->clear_interrupt_pending(DEBUGBREAK);
+        isolate_->stack_guard()->DebugBreak();
+      }
+
+      // If there are commands in the queue when leaving the debugger request
+      // that these commands are processed.
+      if (isolate_->debugger()->HasCommands()) {
+        isolate_->stack_guard()->DebugCommand();
+      }
+
+      // If leaving the debugger with the debugger no longer active unload it.
+      if (!isolate_->debugger()->IsDebuggerActive()) {
+        isolate_->debugger()->UnloadDebugger();
+      }
+    }
+
+    // Leaving this debugger entry.
+    debug->set_debugger_entry(prev_);
+  }
 
   // Check whether the debugger could be entered.
   inline bool FailedToEnter() { return load_failed_; }
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 2a30ddd..0ada28b 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -52,13 +52,11 @@
 
 DeoptimizerData::~DeoptimizerData() {
   if (eager_deoptimization_entry_code_ != NULL) {
-    Isolate::Current()->memory_allocator()->Free(
-        eager_deoptimization_entry_code_);
+    eager_deoptimization_entry_code_->Free(EXECUTABLE);
     eager_deoptimization_entry_code_ = NULL;
   }
   if (lazy_deoptimization_entry_code_ != NULL) {
-    Isolate::Current()->memory_allocator()->Free(
-        lazy_deoptimization_entry_code_);
+    lazy_deoptimization_entry_code_->Free(EXECUTABLE);
     lazy_deoptimization_entry_code_ = NULL;
   }
 }
@@ -73,8 +71,6 @@
 #endif
 
 
-// We rely on this function not causing a GC.  It is called from generated code
-// without having a real stack frame in place.
 Deoptimizer* Deoptimizer::New(JSFunction* function,
                               BailoutType type,
                               unsigned bailout_id,
@@ -104,27 +100,10 @@
   return result;
 }
 
-
-int Deoptimizer::ConvertJSFrameIndexToFrameIndex(int jsframe_index) {
-  if (jsframe_index == 0) return 0;
-
-  int frame_index = 0;
-  while (jsframe_index >= 0) {
-    FrameDescription* frame = output_[frame_index];
-    if (frame->GetFrameType() == StackFrame::JAVA_SCRIPT) {
-      jsframe_index--;
-    }
-    frame_index++;
-  }
-
-  return frame_index - 1;
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
     JavaScriptFrame* frame,
-    int jsframe_index,
+    int frame_index,
     Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
   ASSERT(frame->is_optimized());
@@ -160,48 +139,22 @@
 
   // Create the GC safe output frame information and register it for GC
   // handling.
-  ASSERT_LT(jsframe_index, deoptimizer->jsframe_count());
-
-  // Convert JS frame index into frame index.
-  int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
-
-  bool has_arguments_adaptor =
-      frame_index > 0 &&
-      deoptimizer->output_[frame_index - 1]->GetFrameType() ==
-      StackFrame::ARGUMENTS_ADAPTOR;
-
-  int construct_offset = has_arguments_adaptor ? 2 : 1;
-  bool has_construct_stub =
-      frame_index >= construct_offset &&
-      deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
-      StackFrame::CONSTRUCT;
-
-  DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
-                                                        frame_index,
-                                                        has_arguments_adaptor,
-                                                        has_construct_stub);
+  ASSERT_LT(frame_index, deoptimizer->output_count());
+  DeoptimizedFrameInfo* info =
+      new DeoptimizedFrameInfo(deoptimizer, frame_index);
   isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
 
   // Get the "simulated" top and size for the requested frame.
-  FrameDescription* parameters_frame =
-      deoptimizer->output_[
-          has_arguments_adaptor ? (frame_index - 1) : frame_index];
-
-  uint32_t parameters_size = (info->parameters_count() + 1) * kPointerSize;
-  Address parameters_top = reinterpret_cast<Address>(
-      parameters_frame->GetTop() + (parameters_frame->GetFrameSize() -
-                                    parameters_size));
-
-  uint32_t expressions_size = info->expression_count() * kPointerSize;
-  Address expressions_top = reinterpret_cast<Address>(
-      deoptimizer->output_[frame_index]->GetTop());
+  Address top =
+      reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop());
+  uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize();
 
   // Done with the GC-unsafe frame descriptions. This re-enables allocation.
   deoptimizer->DeleteFrameDescriptions();
 
   // Allocate a heap number for the doubles belonging to this frame.
   deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
-      parameters_top, parameters_size, expressions_top, expressions_size, info);
+      top, size, info);
 
   // Finished using the deoptimizer instance.
   delete deoptimizer;
@@ -307,16 +260,11 @@
   AssertNoAllocation no_allocation;
 
   // Run through the list of all global contexts and deoptimize.
-  Object* context = Isolate::Current()->heap()->global_contexts_list();
-  while (!context->IsUndefined()) {
-    // GC can happen when the context is not fully initialized,
-    // so the global field of the context can be undefined.
-    Object* global = Context::cast(context)->get(Context::GLOBAL_INDEX);
-    if (!global->IsUndefined()) {
-      VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
-                                                visitor);
-    }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  Object* global = Isolate::Current()->heap()->global_contexts_list();
+  while (!global->IsUndefined()) {
+    VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
+                                              visitor);
+    global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
   }
 }
 
@@ -356,7 +304,6 @@
       fp_to_sp_delta_(fp_to_sp_delta),
       input_(NULL),
       output_count_(0),
-      jsframe_count_(0),
       output_(NULL),
       deferred_heap_numbers_(0) {
   if (FLAG_trace_deopt && type != OSR) {
@@ -382,26 +329,6 @@
   if (type == EAGER) {
     ASSERT(from == NULL);
     optimized_code_ = function_->code();
-    if (FLAG_trace_deopt && FLAG_code_comments) {
-      // Print instruction associated with this bailout.
-      const char* last_comment = NULL;
-      int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
-          | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
-        RelocInfo* info = it.rinfo();
-        if (info->rmode() == RelocInfo::COMMENT) {
-          last_comment = reinterpret_cast<const char*>(info->data());
-        }
-        if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
-          unsigned id = Deoptimizer::GetDeoptimizationId(
-              info->target_address(), Deoptimizer::EAGER);
-          if (id == bailout_id && last_comment != NULL) {
-            PrintF("            %s\n", last_comment);
-            break;
-          }
-        }
-      }
-    }
   } else if (type == LAZY) {
     optimized_code_ = FindDeoptimizingCodeFromAddress(from);
     ASSERT(optimized_code_ != NULL);
@@ -419,7 +346,9 @@
   ASSERT(HEAP->allow_allocation(false));
   unsigned size = ComputeInputFrameSize();
   input_ = new(size) FrameDescription(size, function);
-  input_->SetFrameType(StackFrame::JAVA_SCRIPT);
+#ifdef DEBUG
+  input_->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 }
 
 
@@ -443,7 +372,7 @@
 Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
   ASSERT(id >= 0);
   if (id >= kNumberOfEntries) return NULL;
-  MemoryChunk* base = NULL;
+  LargeObjectChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     if (data->eager_deoptimization_entry_code_ == NULL) {
@@ -457,12 +386,12 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   return
-      static_cast<Address>(base->area_start()) + (id * table_entry_size_);
+      static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
 }
 
 
 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
-  MemoryChunk* base = NULL;
+  LargeObjectChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     base = data->eager_deoptimization_entry_code_;
@@ -470,14 +399,14 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   if (base == NULL ||
-      addr < base->area_start() ||
-      addr >= base->area_start() +
+      addr < base->GetStartAddress() ||
+      addr >= base->GetStartAddress() +
           (kNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
   ASSERT_EQ(0,
-      static_cast<int>(addr - base->area_start()) % table_entry_size_);
-  return static_cast<int>(addr - base->area_start()) / table_entry_size_;
+      static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
+  return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
 }
 
 
@@ -519,8 +448,6 @@
 }
 
 
-// We rely on this function not causing a GC.  It is called from generated code
-// without having a real stack frame in place.
 void Deoptimizer::DoComputeOutputFrames() {
   if (bailout_type_ == OSR) {
     DoComputeOsrOutputFrame();
@@ -555,7 +482,6 @@
   // Read the number of output frames and allocate an array for their
   // descriptions.
   int count = iterator.Next();
-  iterator.Next();  // Drop JS frames count.
   ASSERT(output_ == NULL);
   output_ = new FrameDescription*[count];
   for (int i = 0; i < count; ++i) {
@@ -565,24 +491,7 @@
 
   // Translate each output frame.
   for (int i = 0; i < count; ++i) {
-    // Read the ast node id, function, and frame height for this output frame.
-    Translation::Opcode opcode =
-        static_cast<Translation::Opcode>(iterator.Next());
-    switch (opcode) {
-      case Translation::JS_FRAME:
-        DoComputeJSFrame(&iterator, i);
-        jsframe_count_++;
-        break;
-      case Translation::ARGUMENTS_ADAPTOR_FRAME:
-        DoComputeArgumentsAdaptorFrame(&iterator, i);
-        break;
-      case Translation::CONSTRUCT_STUB_FRAME:
-        DoComputeConstructStubFrame(&iterator, i);
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
+    DoComputeFrame(&iterator, i);
   }
 
   // Print some helpful diagnostic information.
@@ -623,52 +532,39 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
-    Address parameters_top,
-    uint32_t parameters_size,
-    Address expressions_top,
-    uint32_t expressions_size,
-    DeoptimizedFrameInfo* info) {
+    Address top, uint32_t size, DeoptimizedFrameInfo* info) {
   ASSERT_EQ(DEBUGGER, bailout_type_);
-  Address parameters_bottom = parameters_top + parameters_size;
-  Address expressions_bottom = expressions_top + expressions_size;
   for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
     HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
 
     // Check of the heap number to materialize actually belong to the frame
     // being extracted.
     Address slot = d.slot_address();
-    if (parameters_top <= slot && slot < parameters_bottom) {
+    if (top <= slot && slot < top + size) {
       Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-
-      int index = (info->parameters_count() - 1) -
-          static_cast<int>(slot - parameters_top) / kPointerSize;
-
+      // Calculate the index with the botton of the expression stack
+      // at index 0, and the fixed part (including incoming arguments)
+      // at negative indexes.
+      int index = static_cast<int>(
+          info->expression_count_ - (slot - top) / kPointerSize - 1);
       if (FLAG_trace_deopt) {
         PrintF("Materializing a new heap number %p [%e] in slot %p"
-               "for parameter slot #%d\n",
+               "for stack index %d\n",
                reinterpret_cast<void*>(*num),
                d.value(),
                d.slot_address(),
                index);
       }
-
-      info->SetParameter(index, *num);
-    } else if (expressions_top <= slot && slot < expressions_bottom) {
-      Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-
-      int index = info->expression_count() - 1 -
-          static_cast<int>(slot - expressions_top) / kPointerSize;
-
-      if (FLAG_trace_deopt) {
-        PrintF("Materializing a new heap number %p [%e] in slot %p"
-               "for expression slot #%d\n",
-               reinterpret_cast<void*>(*num),
-               d.value(),
-               d.slot_address(),
-               index);
+      if (index >=0) {
+        info->SetExpression(index, *num);
+      } else {
+        // Calculate parameter index subtracting one for the receiver.
+        int parameter_index =
+            index +
+            static_cast<int>(size) / kPointerSize -
+            info->expression_count_ - 1;
+        info->SetParameter(parameter_index, *num);
       }
-
-      info->SetExpression(index, *num);
     }
   }
 }
@@ -693,9 +589,7 @@
 
   switch (opcode) {
     case Translation::BEGIN:
-    case Translation::JS_FRAME:
-    case Translation::ARGUMENTS_ADAPTOR_FRAME:
-    case Translation::CONSTRUCT_STUB_FRAME:
+    case Translation::FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();
       return;
@@ -705,13 +599,11 @@
       intptr_t input_value = input_->GetRegister(input_reg);
       if (FLAG_trace_deopt) {
         PrintF(
-            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
+            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
             output_[frame_index]->GetTop() + output_offset,
             output_offset,
             input_value,
             converter.NameOfCPURegister(input_reg));
-        reinterpret_cast<Object*>(input_value)->ShortPrint();
-        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -764,17 +656,15 @@
     case Translation::STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset =
-          input_->GetOffsetFromSlotIndex(input_slot_index);
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
       intptr_t input_value = input_->GetFrameSlot(input_offset);
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
-        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
+        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
                output_offset,
                input_value,
                input_offset);
-        reinterpret_cast<Object*>(input_value)->ShortPrint();
-        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -783,7 +673,7 @@
     case Translation::INT32_STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset =
-          input_->GetOffsetFromSlotIndex(input_slot_index);
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
       intptr_t value = input_->GetFrameSlot(input_offset);
       bool is_smi = Smi::IsValid(value);
       if (FLAG_trace_deopt) {
@@ -812,7 +702,7 @@
     case Translation::DOUBLE_STACK_SLOT: {
       int input_slot_index = iterator->Next();
       unsigned input_offset =
-          input_->GetOffsetFromSlotIndex(input_slot_index);
+          input_->GetOffsetFromSlotIndex(this, input_slot_index);
       double value = input_->GetDoubleFrameSlot(input_offset);
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
@@ -845,6 +735,7 @@
     case Translation::ARGUMENTS_OBJECT: {
       // Use the arguments marker value as a sentinel and fill in the arguments
       // object after the deoptimized frame is built.
+      ASSERT(frame_index == 0);  // Only supported for first frame.
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- ",
                output_[frame_index]->GetTop() + output_offset,
@@ -880,9 +771,7 @@
 
   switch (opcode) {
     case Translation::BEGIN:
-    case Translation::JS_FRAME:
-    case Translation::ARGUMENTS_ADAPTOR_FRAME:
-    case Translation::CONSTRUCT_STUB_FRAME:
+    case Translation::FRAME:
     case Translation::DUPLICATE:
       UNREACHABLE();  // Malformed input.
        return false;
@@ -945,14 +834,12 @@
     case Translation::STACK_SLOT: {
       int output_index = iterator->Next();
       unsigned output_offset =
-          output->GetOffsetFromSlotIndex(output_index);
+          output->GetOffsetFromSlotIndex(this, output_index);
       if (FLAG_trace_osr) {
-        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
+        PrintF("    [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
                output_offset,
                input_value,
                *input_offset);
-        reinterpret_cast<Object*>(input_value)->ShortPrint();
-        PrintF("\n");
       }
       output->SetFrameSlot(output_offset, input_value);
       break;
@@ -964,7 +851,7 @@
 
       int output_index = iterator->Next();
       unsigned output_offset =
-          output->GetOffsetFromSlotIndex(output_index);
+          output->GetOffsetFromSlotIndex(this, output_index);
       int int32_value = input_object->IsSmi()
           ? Smi::cast(input_object)->value()
           : DoubleToInt32(input_object->Number());
@@ -996,7 +883,7 @@
 
       int output_index = iterator->Next();
       unsigned output_offset =
-          output->GetOffsetFromSlotIndex(output_index);
+          output->GetOffsetFromSlotIndex(this, output_index);
       double double_value = input_object->Number();
       uint64_t int_value = BitCast<uint64_t, double>(double_value);
       int32_t lower = static_cast<int32_t>(int_value);
@@ -1052,10 +939,7 @@
   for (uint32_t i = 0; i < table_length; ++i) {
     uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
     Address pc_after = unoptimized_code->instruction_start() + pc_offset;
-    PatchStackCheckCodeAt(unoptimized_code,
-                          pc_after,
-                          check_code,
-                          replacement_code);
+    PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
     stack_check_cursor += 2 * kIntSize;
   }
 }
@@ -1074,10 +958,7 @@
   for (uint32_t i = 0; i < table_length; ++i) {
     uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
     Address pc_after = unoptimized_code->instruction_start() + pc_offset;
-    RevertStackCheckCodeAt(unoptimized_code,
-                           pc_after,
-                           check_code,
-                           replacement_code);
+    RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
     stack_check_cursor += 2 * kIntSize;
   }
 }
@@ -1107,8 +988,8 @@
 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
   // The fixed part of the frame consists of the return address, frame
   // pointer, function, context, and all the incoming arguments.
-  return ComputeIncomingArgumentSize(function) +
-      StandardFrameConstants::kFixedFrameSize;
+  static const unsigned kFixedSlotSize = 4 * kPointerSize;
+  return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
 }
 
 
@@ -1144,7 +1025,7 @@
 }
 
 
-MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
+LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
   // We cannot run this if the serializer is enabled because this will
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
@@ -1158,16 +1039,12 @@
   masm.GetCode(&desc);
   ASSERT(desc.reloc_size == 0);
 
-  MemoryChunk* chunk =
-      Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
-                                                            EXECUTABLE,
-                                                            NULL);
-  ASSERT(chunk->area_size() >= desc.instr_size);
+  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
   if (chunk == NULL) {
     V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
   }
-  memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->area_start(), desc.instr_size);
+  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
   return chunk;
 }
 
@@ -1216,8 +1093,7 @@
       function_(function),
       top_(kZapUint32),
       pc_(kZapUint32),
-      fp_(kZapUint32),
-      context_(kZapUint32) {
+      fp_(kZapUint32) {
   // Zap all the registers.
   for (int r = 0; r < Register::kNumRegisters; r++) {
     SetRegister(r, kZapUint32);
@@ -1230,62 +1106,49 @@
 }
 
 
-int FrameDescription::ComputeFixedSize() {
-  return StandardFrameConstants::kFixedFrameSize +
-      (ComputeParametersCount() + 1) * kPointerSize;
-}
-
-
-unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
+unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
+                                                  int slot_index) {
   if (slot_index >= 0) {
     // Local or spill slots. Skip the fixed part of the frame
     // including all arguments.
-    unsigned base = GetFrameSize() - ComputeFixedSize();
+    unsigned base =
+        GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
     return base - ((slot_index + 1) * kPointerSize);
   } else {
     // Incoming parameter.
-    int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
-    unsigned base = GetFrameSize() - arg_size;
+    unsigned base = GetFrameSize() -
+        deoptimizer->ComputeIncomingArgumentSize(GetFunction());
     return base - ((slot_index + 1) * kPointerSize);
   }
 }
 
 
 int FrameDescription::ComputeParametersCount() {
-  switch (type_) {
-    case StackFrame::JAVA_SCRIPT:
-      return function_->shared()->formal_parameter_count();
-    case StackFrame::ARGUMENTS_ADAPTOR: {
-      // Last slot contains number of incomming arguments as a smi.
-      // Can't use GetExpression(0) because it would cause infinite recursion.
-      return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
-    }
-    default:
-      UNREACHABLE();
-      return 0;
-  }
+  return function_->shared()->formal_parameter_count();
 }
 
 
-Object* FrameDescription::GetParameter(int index) {
+Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) {
+  ASSERT_EQ(Code::FUNCTION, kind_);
   ASSERT(index >= 0);
   ASSERT(index < ComputeParametersCount());
   // The slot indexes for incoming arguments are negative.
-  unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
+  unsigned offset = GetOffsetFromSlotIndex(deoptimizer,
+                                           index - ComputeParametersCount());
   return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
 }
 
 
-unsigned FrameDescription::GetExpressionCount() {
-  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
-  unsigned size = GetFrameSize() - ComputeFixedSize();
+unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) {
+  ASSERT_EQ(Code::FUNCTION, kind_);
+  unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction());
   return size / kPointerSize;
 }
 
 
-Object* FrameDescription::GetExpression(int index) {
-  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
-  unsigned offset = GetOffsetFromSlotIndex(index);
+Object* FrameDescription::GetExpression(Deoptimizer* deoptimizer, int index) {
+  ASSERT_EQ(Code::FUNCTION, kind_);
+  unsigned offset = GetOffsetFromSlotIndex(deoptimizer, index);
   return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
 }
 
@@ -1331,22 +1194,8 @@
 }
 
 
-void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
-  buffer_->Add(CONSTRUCT_STUB_FRAME);
-  buffer_->Add(literal_id);
-  buffer_->Add(height);
-}
-
-
-void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
-  buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
-  buffer_->Add(literal_id);
-  buffer_->Add(height);
-}
-
-
-void Translation::BeginJSFrame(int node_id, int literal_id, unsigned height) {
-  buffer_->Add(JS_FRAME);
+void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
+  buffer_->Add(FRAME);
   buffer_->Add(node_id);
   buffer_->Add(literal_id);
   buffer_->Add(height);
@@ -1410,6 +1259,7 @@
     case ARGUMENTS_OBJECT:
     case DUPLICATE:
       return 0;
+    case BEGIN:
     case REGISTER:
     case INT32_REGISTER:
     case DOUBLE_REGISTER:
@@ -1418,11 +1268,7 @@
     case DOUBLE_STACK_SLOT:
     case LITERAL:
       return 1;
-    case BEGIN:
-    case ARGUMENTS_ADAPTOR_FRAME:
-    case CONSTRUCT_STUB_FRAME:
-      return 2;
-    case JS_FRAME:
+    case FRAME:
       return 3;
   }
   UNREACHABLE();
@@ -1436,12 +1282,8 @@
   switch (opcode) {
     case BEGIN:
       return "BEGIN";
-    case JS_FRAME:
-      return "JS_FRAME";
-    case ARGUMENTS_ADAPTOR_FRAME:
-      return "ARGUMENTS_ADAPTOR_FRAME";
-    case CONSTRUCT_STUB_FRAME:
-      return "CONSTRUCT_STUB_FRAME";
+    case FRAME:
+      return "FRAME";
     case REGISTER:
       return "REGISTER";
     case INT32_REGISTER:
@@ -1495,9 +1337,7 @@
 
   switch (opcode) {
     case Translation::BEGIN:
-    case Translation::JS_FRAME:
-    case Translation::ARGUMENTS_ADAPTOR_FRAME:
-    case Translation::CONSTRUCT_STUB_FRAME:
+    case Translation::FRAME:
       // Peeled off before getting here.
       break;
 
@@ -1543,27 +1383,9 @@
 }
 
 
-void SlotRef::ComputeSlotsForArguments(Vector<SlotRef>* args_slots,
-                                       TranslationIterator* it,
-                                       DeoptimizationInputData* data,
-                                       JavaScriptFrame* frame) {
-  // Process the translation commands for the arguments.
-
-  // Skip the translation command for the receiver.
-  it->Skip(Translation::NumberOfOperandsFor(
-      static_cast<Translation::Opcode>(it->Next())));
-
-  // Compute slots for arguments.
-  for (int i = 0; i < args_slots->length(); ++i) {
-    (*args_slots)[i] = ComputeSlotForNextArgument(it, data, frame);
-  }
-}
-
-
-Vector<SlotRef> SlotRef::ComputeSlotMappingForArguments(
-    JavaScriptFrame* frame,
-    int inlined_jsframe_index,
-    int formal_parameter_count) {
+void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                             int inlined_frame_index,
+                                             Vector<SlotRef>* args_slots) {
   AssertNoAllocation no_gc;
   int deopt_index = AstNode::kNoNumber;
   DeoptimizationInputData* data =
@@ -1572,81 +1394,51 @@
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
-  it.Next();  // Drop frame count.
-  int jsframe_count = it.Next();
-  USE(jsframe_count);
-  ASSERT(jsframe_count > inlined_jsframe_index);
-  int jsframes_to_skip = inlined_jsframe_index;
+  int frame_count = it.Next();
+  USE(frame_count);
+  ASSERT(frame_count > inlined_frame_index);
+  int frames_to_skip = inlined_frame_index;
   while (true) {
     opcode = static_cast<Translation::Opcode>(it.Next());
-    if (opcode == Translation::ARGUMENTS_ADAPTOR_FRAME) {
-      if (jsframes_to_skip == 0) {
-        ASSERT(Translation::NumberOfOperandsFor(opcode) == 2);
-
-        it.Skip(1);  // literal id
-        int height = it.Next();
-
-        // We reached the arguments adaptor frame corresponding to the
-        // inlined function in question.  Number of arguments is height - 1.
-        Vector<SlotRef> args_slots =
-            Vector<SlotRef>::New(height - 1);  // Minus receiver.
-        ComputeSlotsForArguments(&args_slots, &it, data, frame);
-        return args_slots;
-      }
-    } else if (opcode == Translation::JS_FRAME) {
-      if (jsframes_to_skip == 0) {
-        // Skip over operands to advance to the next opcode.
-        it.Skip(Translation::NumberOfOperandsFor(opcode));
-
-        // We reached the frame corresponding to the inlined function
-        // in question.  Process the translation commands for the
-        // arguments.  Number of arguments is equal to the number of
-        // format parameter count.
-        Vector<SlotRef> args_slots =
-            Vector<SlotRef>::New(formal_parameter_count);
-        ComputeSlotsForArguments(&args_slots, &it, data, frame);
-        return args_slots;
-      }
-      jsframes_to_skip--;
-    }
-
     // Skip over operands to advance to the next opcode.
     it.Skip(Translation::NumberOfOperandsFor(opcode));
+    if (opcode == Translation::FRAME) {
+      if (frames_to_skip == 0) {
+        // We reached the frame corresponding to the inlined function
+        // in question.  Process the translation commands for the
+        // arguments.
+        //
+        // Skip the translation command for the receiver.
+        it.Skip(Translation::NumberOfOperandsFor(
+            static_cast<Translation::Opcode>(it.Next())));
+        // Compute slots for arguments.
+        for (int i = 0; i < args_slots->length(); ++i) {
+          (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
+        }
+        return;
+      }
+      frames_to_skip--;
+    }
   }
 
   UNREACHABLE();
-  return Vector<SlotRef>();
 }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
-DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
-                                           int frame_index,
-                                           bool has_arguments_adaptor,
-                                           bool has_construct_stub) {
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(
+    Deoptimizer* deoptimizer, int frame_index) {
   FrameDescription* output_frame = deoptimizer->output_[frame_index];
-  function_ = output_frame->GetFunction();
-  has_construct_stub_ = has_construct_stub;
-  expression_count_ = output_frame->GetExpressionCount();
-  expression_stack_ = new Object*[expression_count_];
-  // Get the source position using the unoptimized code.
-  Address pc = reinterpret_cast<Address>(output_frame->GetPc());
-  Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
-  source_position_ = code->SourcePosition(pc);
-
-  for (int i = 0; i < expression_count_; i++) {
-    SetExpression(i, output_frame->GetExpression(i));
-  }
-
-  if (has_arguments_adaptor) {
-    output_frame = deoptimizer->output_[frame_index - 1];
-    ASSERT(output_frame->GetFrameType() == StackFrame::ARGUMENTS_ADAPTOR);
-  }
-
+  SetFunction(output_frame->GetFunction());
+  expression_count_ = output_frame->GetExpressionCount(deoptimizer);
   parameters_count_ = output_frame->ComputeParametersCount();
   parameters_ = new Object*[parameters_count_];
   for (int i = 0; i < parameters_count_; i++) {
-    SetParameter(i, output_frame->GetParameter(i));
+    SetParameter(i, output_frame->GetParameter(deoptimizer, i));
+  }
+  expression_stack_ = new Object*[expression_count_];
+  for (int i = 0; i < expression_count_; i++) {
+    SetExpression(i, output_frame->GetExpression(deoptimizer, i));
   }
 }
 
@@ -1656,7 +1448,6 @@
   delete[] parameters_;
 }
 
-
 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
   v->VisitPointer(BitCast<Object**>(&function_));
   v->VisitPointers(parameters_, parameters_ + parameters_count_);
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 6bc4a51..8641261 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -86,8 +86,8 @@
 #endif
 
  private:
-  MemoryChunk* eager_deoptimization_entry_code_;
-  MemoryChunk* lazy_deoptimization_entry_code_;
+  LargeObjectChunk* eager_deoptimization_entry_code_;
+  LargeObjectChunk* lazy_deoptimization_entry_code_;
   Deoptimizer* current_;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -119,9 +119,6 @@
 
   int output_count() const { return output_count_; }
 
-  // Number of created JS frames. Not all created frames are necessarily JS.
-  int jsframe_count() const { return jsframe_count_; }
-
   static Deoptimizer* New(JSFunction* function,
                           BailoutType type,
                           unsigned bailout_id,
@@ -134,7 +131,7 @@
   // The returned object with information on the optimized frame needs to be
   // freed before another one can be generated.
   static DeoptimizedFrameInfo* DebuggerInspectableFrame(JavaScriptFrame* frame,
-                                                        int jsframe_index,
+                                                        int frame_index,
                                                         Isolate* isolate);
   static void DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
                                              Isolate* isolate);
@@ -176,8 +173,7 @@
 
   // Patch stack guard check at instruction before pc_after in
   // the unoptimized code to unconditionally call replacement_code.
-  static void PatchStackCheckCodeAt(Code* unoptimized_code,
-                                    Address pc_after,
+  static void PatchStackCheckCodeAt(Address pc_after,
                                     Code* check_code,
                                     Code* replacement_code);
 
@@ -189,8 +185,7 @@
 
   // Change all patched stack guard checks in the unoptimized code
   // back to a normal stack guard check.
-  static void RevertStackCheckCodeAt(Code* unoptimized_code,
-                                     Address pc_after,
+  static void RevertStackCheckCodeAt(Address pc_after,
                                      Code* check_code,
                                      Code* replacement_code);
 
@@ -199,11 +194,7 @@
   void MaterializeHeapNumbers();
 #ifdef ENABLE_DEBUGGER_SUPPORT
   void MaterializeHeapNumbersForDebuggerInspectableFrame(
-      Address parameters_top,
-      uint32_t parameters_size,
-      Address expressions_top,
-      uint32_t expressions_size,
-      DeoptimizedFrameInfo* info);
+      Address top, uint32_t size, DeoptimizedFrameInfo* info);
 #endif
 
   static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -259,10 +250,8 @@
     int count_;
   };
 
-  int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
-
  private:
-  static const int kNumberOfEntries = 16384;
+  static const int kNumberOfEntries = 4096;
 
   Deoptimizer(Isolate* isolate,
               JSFunction* function,
@@ -275,11 +264,7 @@
 
   void DoComputeOutputFrames();
   void DoComputeOsrOutputFrame();
-  void DoComputeJSFrame(TranslationIterator* iterator, int frame_index);
-  void DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
-                                      int frame_index);
-  void DoComputeConstructStubFrame(TranslationIterator* iterator,
-                                   int frame_index);
+  void DoComputeFrame(TranslationIterator* iterator, int frame_index);
   void DoTranslateCommand(TranslationIterator* iterator,
                           int frame_index,
                           unsigned output_offset);
@@ -300,7 +285,7 @@
 
   void AddDoubleValue(intptr_t slot_address, double value);
 
-  static MemoryChunk* CreateCode(BailoutType type);
+  static LargeObjectChunk* CreateCode(BailoutType type);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
@@ -327,8 +312,6 @@
   FrameDescription* input_;
   // Number of output frames.
   int output_count_;
-  // Number of output js frames.
-  int jsframe_count_;
   // Array of output frame descriptions.
   FrameDescription** output_;
 
@@ -368,27 +351,14 @@
 
   JSFunction* GetFunction() const { return function_; }
 
-  unsigned GetOffsetFromSlotIndex(int slot_index);
+  unsigned GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, int slot_index);
 
   intptr_t GetFrameSlot(unsigned offset) {
     return *GetFrameSlotPointer(offset);
   }
 
   double GetDoubleFrameSlot(unsigned offset) {
-    intptr_t* ptr = GetFrameSlotPointer(offset);
-#if V8_TARGET_ARCH_MIPS
-    // Prevent gcc from using load-double (mips ldc1) on (possibly)
-    // non-64-bit aligned double. Uses two lwc1 instructions.
-    union conversion {
-      double d;
-      uint32_t u[2];
-    } c;
-    c.u[0] = *reinterpret_cast<uint32_t*>(ptr);
-    c.u[1] = *(reinterpret_cast<uint32_t*>(ptr) + 1);
-    return c.d;
-#else
-    return *reinterpret_cast<double*>(ptr);
-#endif
+    return *reinterpret_cast<double*>(GetFrameSlotPointer(offset));
   }
 
   void SetFrameSlot(unsigned offset, intptr_t value) {
@@ -424,28 +394,27 @@
   intptr_t GetFp() const { return fp_; }
   void SetFp(intptr_t fp) { fp_ = fp; }
 
-  intptr_t GetContext() const { return context_; }
-  void SetContext(intptr_t context) { context_ = context; }
-
   Smi* GetState() const { return state_; }
   void SetState(Smi* state) { state_ = state; }
 
   void SetContinuation(intptr_t pc) { continuation_ = pc; }
 
-  StackFrame::Type GetFrameType() const { return type_; }
-  void SetFrameType(StackFrame::Type type) { type_ = type; }
+#ifdef DEBUG
+  Code::Kind GetKind() const { return kind_; }
+  void SetKind(Code::Kind kind) { kind_ = kind; }
+#endif
 
   // Get the incoming arguments count.
   int ComputeParametersCount();
 
   // Get a parameter value for an unoptimized frame.
-  Object* GetParameter(int index);
+  Object* GetParameter(Deoptimizer* deoptimizer, int index);
 
   // Get the expression stack height for a unoptimized frame.
-  unsigned GetExpressionCount();
+  unsigned GetExpressionCount(Deoptimizer* deoptimizer);
 
   // Get the expression stack value for an unoptimized frame.
-  Object* GetExpression(int index);
+  Object* GetExpression(Deoptimizer* deoptimizer, int index);
 
   static int registers_offset() {
     return OFFSET_OF(FrameDescription, registers_);
@@ -488,8 +457,6 @@
   intptr_t top_;
   intptr_t pc_;
   intptr_t fp_;
-  intptr_t context_;
-  StackFrame::Type type_;
   Smi* state_;
 #ifdef DEBUG
   Code::Kind kind_;
@@ -508,8 +475,6 @@
     return reinterpret_cast<intptr_t*>(
         reinterpret_cast<Address>(this) + frame_content_offset() + offset);
   }
-
-  int ComputeFixedSize();
 };
 
 
@@ -552,9 +517,7 @@
  public:
   enum Opcode {
     BEGIN,
-    JS_FRAME,
-    CONSTRUCT_STUB_FRAME,
-    ARGUMENTS_ADAPTOR_FRAME,
+    FRAME,
     REGISTER,
     INT32_REGISTER,
     DOUBLE_REGISTER,
@@ -569,20 +532,17 @@
     DUPLICATE
   };
 
-  Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count)
+  Translation(TranslationBuffer* buffer, int frame_count)
       : buffer_(buffer),
         index_(buffer->CurrentIndex()) {
     buffer_->Add(BEGIN);
     buffer_->Add(frame_count);
-    buffer_->Add(jsframe_count);
   }
 
   int index() const { return index_; }
 
   // Commands.
-  void BeginJSFrame(int node_id, int literal_id, unsigned height);
-  void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
-  void BeginConstructStubFrame(int literal_id, unsigned height);
+  void BeginFrame(int node_id, int literal_id, unsigned height);
   void StoreRegister(Register reg);
   void StoreInt32Register(Register reg);
   void StoreDoubleRegister(DoubleRegister reg);
@@ -672,10 +632,9 @@
     }
   }
 
-  static Vector<SlotRef> ComputeSlotMappingForArguments(
-      JavaScriptFrame* frame,
-      int inlined_frame_index,
-      int formal_parameter_count);
+  static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
+                                             int inlined_frame_index,
+                                             Vector<SlotRef>* args_slots);
 
  private:
   Address addr_;
@@ -695,12 +654,6 @@
   static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
                                             DeoptimizationInputData* data,
                                             JavaScriptFrame* frame);
-
-  static void ComputeSlotsForArguments(
-      Vector<SlotRef>* args_slots,
-      TranslationIterator* iterator,
-      DeoptimizationInputData* data,
-      JavaScriptFrame* frame);
 };
 
 
@@ -709,14 +662,9 @@
 // needs to inspect a frame that is part of an optimized frame. The
 // internally used FrameDescription objects are not GC safe so for use
 // by the debugger frame information is copied to an object of this type.
-// Represents parameters in unadapted form so their number might mismatch
-// formal parameter count.
 class DeoptimizedFrameInfo : public Malloced {
  public:
-  DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
-                       int frame_index,
-                       bool has_arguments_adaptor,
-                       bool has_construct_stub);
+  DeoptimizedFrameInfo(Deoptimizer* deoptimizer, int frame_index);
   virtual ~DeoptimizedFrameInfo();
 
   // GC support.
@@ -733,12 +681,6 @@
     return function_;
   }
 
-  // Check if this frame is preceded by construct stub frame.  The bottom-most
-  // inlined frame might still be called by an uninlined construct stub.
-  bool HasConstructStub() {
-    return has_construct_stub_;
-  }
-
   // Get an incoming argument.
   Object* GetParameter(int index) {
     ASSERT(0 <= index && index < parameters_count());
@@ -751,11 +693,12 @@
     return expression_stack_[index];
   }
 
-  int GetSourcePosition() {
-    return source_position_;
+ private:
+  // Set the frame function.
+  void SetFunction(JSFunction* function) {
+    function_ = function;
   }
 
- private:
   // Set an incoming argument.
   void SetParameter(int index, Object* obj) {
     ASSERT(0 <= index && index < parameters_count());
@@ -769,12 +712,10 @@
   }
 
   JSFunction* function_;
-  bool has_construct_stub_;
   int parameters_count_;
   int expression_count_;
   Object** parameters_;
   Object** expression_stack_;
-  int source_position_;
 
   friend class Deoptimizer;
 };
diff --git a/src/disassembler.cc b/src/disassembler.cc
index e3b40ab..1e67b4c 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -200,7 +200,7 @@
     // Print all the reloc info for this instruction which are not comments.
     for (int i = 0; i < pcs.length(); i++) {
       // Put together the reloc info
-      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
+      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
 
       // Indent the printing of the reloc info.
       if (i == 0) {
diff --git a/src/double.h b/src/double.h
index 16a3245..65eded9 100644
--- a/src/double.h
+++ b/src/double.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,8 +34,8 @@
 namespace internal {
 
 // We assume that doubles and uint64_t have the same endianness.
-inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
+static double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
 
 // Helper functions for doubles.
 class Double {
diff --git a/src/dtoa.h b/src/dtoa.h
index 948a079..b3e79af 100644
--- a/src/dtoa.h
+++ b/src/dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,9 +47,9 @@
 // The maximal length of digits a double can have in base 10.
 // Note that DoubleToAscii null-terminates its input. So the given buffer should
 // be at least kBase10MaximalLength + 1 characters long.
-const int kBase10MaximalLength = 17;
+static const int kBase10MaximalLength = 17;
 
-// Converts the given double 'v' to ASCII.
+// Converts the given double 'v' to ascii.
 // The result should be interpreted as buffer * 10^(point-length).
 //
 // The output depends on the given mode:
diff --git a/src/elements.cc b/src/elements.cc
index 1d043a1..0454644 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,85 +31,14 @@
 #include "elements.h"
 #include "utils.h"
 
-
-// Each concrete ElementsAccessor can handle exactly one ElementsKind,
-// several abstract ElementsAccessor classes are used to allow sharing
-// common code.
-//
-// Inheritance hierarchy:
-// - ElementsAccessorBase                        (abstract)
-//   - FastElementsAccessor                      (abstract)
-//     - FastObjectElementsAccessor
-//     - FastDoubleElementsAccessor
-//   - ExternalElementsAccessor                  (abstract)
-//     - ExternalByteElementsAccessor
-//     - ExternalUnsignedByteElementsAccessor
-//     - ExternalShortElementsAccessor
-//     - ExternalUnsignedShortElementsAccessor
-//     - ExternalIntElementsAccessor
-//     - ExternalUnsignedIntElementsAccessor
-//     - ExternalFloatElementsAccessor
-//     - ExternalDoubleElementsAccessor
-//     - PixelElementsAccessor
-//   - DictionaryElementsAccessor
-//   - NonStrictArgumentsElementsAccessor
-
-
 namespace v8 {
 namespace internal {
 
 
-// First argument in list is the accessor class, the second argument is the
-// accessor ElementsKind, and the third is the backing store class.  Use the
-// fast element handler for smi-only arrays.  The implementation is currently
-// identical.  Note that the order must match that of the ElementsKind enum for
-// the |accessor_array[]| below to work.
-#define ELEMENTS_LIST(V)                                                \
-  V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS, FixedArray)     \
-  V(FastObjectElementsAccessor, FAST_ELEMENTS, FixedArray)              \
-  V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS, FixedDoubleArray) \
-  V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS,                    \
-    SeededNumberDictionary)                                             \
-  V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS,  \
-    FixedArray)                                                         \
-  V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS,               \
-    ExternalByteArray)                                                  \
-  V(ExternalUnsignedByteElementsAccessor,                               \
-    EXTERNAL_UNSIGNED_BYTE_ELEMENTS, ExternalUnsignedByteArray)         \
-  V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS,             \
-    ExternalShortArray)                                                 \
-  V(ExternalUnsignedShortElementsAccessor,                              \
-    EXTERNAL_UNSIGNED_SHORT_ELEMENTS, ExternalUnsignedShortArray)       \
-  V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS,                 \
-    ExternalIntArray)                                                   \
-  V(ExternalUnsignedIntElementsAccessor,                                \
-    EXTERNAL_UNSIGNED_INT_ELEMENTS, ExternalUnsignedIntArray)           \
-  V(ExternalFloatElementsAccessor,                                      \
-    EXTERNAL_FLOAT_ELEMENTS, ExternalFloatArray)                        \
-  V(ExternalDoubleElementsAccessor,                                     \
-    EXTERNAL_DOUBLE_ELEMENTS, ExternalDoubleArray)                      \
-  V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS, ExternalPixelArray)
-
-
-template<ElementsKind Kind> class ElementsKindTraits {
- public:
-  typedef FixedArrayBase BackingStore;
-};
-
-#define ELEMENTS_TRAITS(Class, KindParam, Store)               \
-template<> class ElementsKindTraits<KindParam> {               \
-  public:                                                      \
-  static const ElementsKind Kind = KindParam;                  \
-  typedef Store BackingStore;                                  \
-};
-ELEMENTS_LIST(ELEMENTS_TRAITS)
-#undef ELEMENTS_TRAITS
-
-
 ElementsAccessor** ElementsAccessor::elements_accessors_;
 
 
-static bool HasKey(FixedArray* array, Object* key) {
+bool HasKey(FixedArray* array, Object* key) {
   int len0 = array->length();
   for (int i = 0; i < len0; i++) {
     Object* element = array->get(i);
@@ -123,252 +52,6 @@
 }
 
 
-static Failure* ThrowArrayLengthRangeError(Heap* heap) {
-  HandleScope scope(heap->isolate());
-  return heap->isolate()->Throw(
-      *heap->isolate()->factory()->NewRangeError("invalid_array_length",
-          HandleVector<Object>(NULL, 0)));
-}
-
-
-void CopyObjectToObjectElements(FixedArray* from,
-                                ElementsKind from_kind,
-                                uint32_t from_start,
-                                FixedArray* to,
-                                ElementsKind to_kind,
-                                uint32_t to_start,
-                                int raw_copy_size) {
-  ASSERT(to->map() != HEAP->fixed_cow_array_map());
-  ASSERT(from_kind == FAST_ELEMENTS || from_kind == FAST_SMI_ONLY_ELEMENTS);
-  ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
-  int copy_size = raw_copy_size;
-  if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
-           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
-    copy_size = Min(from->length() - from_start,
-                    to->length() - to_start);
-#ifdef DEBUG
-    // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
-    // marked with the hole.
-    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
-      for (int i = to_start + copy_size; i < to->length(); ++i) {
-        ASSERT(to->get(i)->IsTheHole());
-      }
-    }
-#endif
-  }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
-         (copy_size + static_cast<int>(from_start)) <= from->length());
-  if (copy_size == 0) return;
-  Address to_address = to->address() + FixedArray::kHeaderSize;
-  Address from_address = from->address() + FixedArray::kHeaderSize;
-  CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
-            reinterpret_cast<Object**>(from_address) + from_start,
-            copy_size);
-  if (from_kind == FAST_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    Heap* heap = from->GetHeap();
-    if (!heap->InNewSpace(to)) {
-      heap->RecordWrites(to->address(),
-                         to->OffsetOfElementAt(to_start),
-                         copy_size);
-    }
-    heap->incremental_marking()->RecordWrites(to);
-  }
-}
-
-
-static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
-                                           uint32_t from_start,
-                                           FixedArray* to,
-                                           ElementsKind to_kind,
-                                           uint32_t to_start,
-                                           int raw_copy_size) {
-  int copy_size = raw_copy_size;
-  Heap* heap = from->GetHeap();
-  if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
-           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
-    copy_size = from->max_number_key() + 1 - from_start;
-#ifdef DEBUG
-    // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
-    // marked with the hole.
-    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
-      for (int i = to_start + copy_size; i < to->length(); ++i) {
-        ASSERT(to->get(i)->IsTheHole());
-      }
-    }
-#endif
-  }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length());
-  ASSERT(to != from);
-  ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
-  if (copy_size == 0) return;
-  for (int i = 0; i < copy_size; i++) {
-    int entry = from->FindEntry(i + from_start);
-    if (entry != SeededNumberDictionary::kNotFound) {
-      Object* value = from->ValueAt(entry);
-      ASSERT(!value->IsTheHole());
-      to->set(i + to_start, value, SKIP_WRITE_BARRIER);
-    } else {
-      to->set_the_hole(i + to_start);
-    }
-  }
-  if (to_kind == FAST_ELEMENTS) {
-    if (!heap->InNewSpace(to)) {
-      heap->RecordWrites(to->address(),
-                         to->OffsetOfElementAt(to_start),
-                         copy_size);
-    }
-    heap->incremental_marking()->RecordWrites(to);
-  }
-}
-
-
-MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
-    FixedDoubleArray* from,
-    uint32_t from_start,
-    FixedArray* to,
-    ElementsKind to_kind,
-    uint32_t to_start,
-    int raw_copy_size) {
-  ASSERT(to_kind == FAST_ELEMENTS || to_kind == FAST_SMI_ONLY_ELEMENTS);
-  int copy_size = raw_copy_size;
-  if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
-           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
-    copy_size = Min(from->length() - from_start,
-                    to->length() - to_start);
-#ifdef DEBUG
-    // FAST_ELEMENT arrays cannot be uninitialized. Ensure they are already
-    // marked with the hole.
-    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
-      for (int i = to_start + copy_size; i < to->length(); ++i) {
-        ASSERT(to->get(i)->IsTheHole());
-      }
-    }
-#endif
-  }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
-         (copy_size + static_cast<int>(from_start)) <= from->length());
-  if (copy_size == 0) return from;
-  for (int i = 0; i < copy_size; ++i) {
-    if (to_kind == FAST_SMI_ONLY_ELEMENTS) {
-      UNIMPLEMENTED();
-      return Failure::Exception();
-    } else {
-      MaybeObject* maybe_value = from->get(i + from_start);
-      Object* value;
-      ASSERT(to_kind == FAST_ELEMENTS);
-      // Because FAST_DOUBLE_ELEMENTS -> FAST_ELEMENT allocate HeapObjects
-      // iteratively, the allocate must succeed within a single GC cycle,
-      // otherwise the retry after the GC will also fail. In order to ensure
-      // that no GC is triggered, allocate HeapNumbers from old space if they
-      // can't be taken from new space.
-      if (!maybe_value->ToObject(&value)) {
-        ASSERT(maybe_value->IsRetryAfterGC() || maybe_value->IsOutOfMemory());
-        Heap* heap = from->GetHeap();
-        MaybeObject* maybe_value_object =
-            heap->AllocateHeapNumber(from->get_scalar(i + from_start),
-                                     TENURED);
-        if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
-      }
-      to->set(i + to_start, value, UPDATE_WRITE_BARRIER);
-    }
-  }
-  return to;
-}
-
-
-static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
-                                       uint32_t from_start,
-                                       FixedDoubleArray* to,
-                                       uint32_t to_start,
-                                       int raw_copy_size) {
-  int copy_size = raw_copy_size;
-  if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
-           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
-    copy_size = Min(from->length() - from_start,
-                    to->length() - to_start);
-    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
-      for (int i = to_start + copy_size; i < to->length(); ++i) {
-        to->set_the_hole(i);
-      }
-    }
-  }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
-         (copy_size + static_cast<int>(from_start)) <= from->length());
-  if (copy_size == 0) return;
-  Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
-  Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
-  to_address += kDoubleSize * to_start;
-  from_address += kDoubleSize * from_start;
-  int words_per_double = (kDoubleSize / kPointerSize);
-  CopyWords(reinterpret_cast<Object**>(to_address),
-            reinterpret_cast<Object**>(from_address),
-            words_per_double * copy_size);
-}
-
-
-static void CopyObjectToDoubleElements(FixedArray* from,
-                                       uint32_t from_start,
-                                       FixedDoubleArray* to,
-                                       uint32_t to_start,
-                                       int raw_copy_size) {
-  int copy_size = raw_copy_size;
-  if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
-           raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
-    copy_size = from->length() - from_start;
-    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
-      for (int i = to_start + copy_size; i < to->length(); ++i) {
-        to->set_the_hole(i);
-      }
-    }
-  }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
-         (copy_size + static_cast<int>(from_start)) <= from->length());
-  if (copy_size == 0) return;
-  for (int i = 0; i < copy_size; i++) {
-    Object* hole_or_object = from->get(i + from_start);
-    if (hole_or_object->IsTheHole()) {
-      to->set_the_hole(i + to_start);
-    } else {
-      to->set(i + to_start, hole_or_object->Number());
-    }
-  }
-}
-
-
-static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
-                                           uint32_t from_start,
-                                           FixedDoubleArray* to,
-                                           uint32_t to_start,
-                                           int raw_copy_size) {
-  int copy_size = raw_copy_size;
-  if (copy_size < 0) {
-    ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
-           copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
-    copy_size = from->max_number_key() + 1 - from_start;
-    if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
-      for (int i = to_start + copy_size; i < to->length(); ++i) {
-        to->set_the_hole(i);
-      }
-    }
-  }
-  ASSERT(copy_size + static_cast<int>(to_start) <= to->length());
-  if (copy_size == 0) return;
-  for (int i = 0; i < copy_size; i++) {
-    int entry = from->FindEntry(i + from_start);
-    if (entry != SeededNumberDictionary::kNotFound) {
-      to->set(i + to_start, from->ValueAt(entry)->Number());
-    } else {
-      to->set_the_hole(i + to_start);
-    }
-  }
-}
-
-
 // Base class for element handler implementations. Contains the
 // the common logic for objects with different ElementsKinds.
 // Subclasses must specialize method for which the element
@@ -386,119 +69,36 @@
 // http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).  We use
 // CRTP to guarantee aggressive compile time optimizations (i.e.  inlining and
 // specialization of SomeElementsAccessor methods).
-template <typename ElementsAccessorSubclass,
-          typename ElementsTraitsParam>
+template <typename ElementsAccessorSubclass, typename BackingStoreClass>
 class ElementsAccessorBase : public ElementsAccessor {
  protected:
-  explicit ElementsAccessorBase(const char* name)
-      : ElementsAccessor(name) { }
-
-  typedef ElementsTraitsParam ElementsTraits;
-  typedef typename ElementsTraitsParam::BackingStore BackingStore;
-
-  virtual ElementsKind kind() const { return ElementsTraits::Kind; }
-
-  static bool HasElementImpl(Object* receiver,
-                             JSObject* holder,
-                             uint32_t key,
-                             BackingStore* backing_store) {
-    MaybeObject* element =
-        ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store);
-    return !element->IsTheHole();
-  }
-
-  virtual bool HasElement(Object* receiver,
-                          JSObject* holder,
-                          uint32_t key,
-                          FixedArrayBase* backing_store) {
-    if (backing_store == NULL) {
-      backing_store = holder->elements();
-    }
-    return ElementsAccessorSubclass::HasElementImpl(
-        receiver, holder, key, BackingStore::cast(backing_store));
-  }
-
-  virtual MaybeObject* Get(Object* receiver,
-                           JSObject* holder,
+  ElementsAccessorBase() { }
+  virtual MaybeObject* Get(FixedArrayBase* backing_store,
                            uint32_t key,
-                           FixedArrayBase* backing_store) {
-    if (backing_store == NULL) {
-      backing_store = holder->elements();
+                           JSObject* obj,
+                           Object* receiver) {
+    return ElementsAccessorSubclass::Get(
+        BackingStoreClass::cast(backing_store), key, obj, receiver);
+  }
+
+  static MaybeObject* Get(BackingStoreClass* backing_store,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
+    if (key < ElementsAccessorSubclass::GetCapacity(backing_store)) {
+      return backing_store->get(key);
     }
-    return ElementsAccessorSubclass::GetImpl(
-        receiver, holder, key, BackingStore::cast(backing_store));
-  }
-
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              BackingStore* backing_store) {
-    return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
-           ? backing_store->get(key)
-           : backing_store->GetHeap()->the_hole_value();
-  }
-
-  virtual MaybeObject* SetLength(JSArray* array,
-                                 Object* length) {
-    return ElementsAccessorSubclass::SetLengthImpl(
-        array, length, BackingStore::cast(array->elements()));
-  }
-
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    BackingStore* backing_store);
-
-  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
-                                            int capacity,
-                                            int length) {
-    return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
-        array,
-        capacity,
-        length);
-  }
-
-  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
-                                                       int capacity,
-                                                       int length) {
-    UNIMPLEMENTED();
-    return obj;
+    return backing_store->GetHeap()->the_hole_value();
   }
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
-    UNREACHABLE();
-    return NULL;
-  }
-
-  virtual MaybeObject* CopyElements(JSObject* from_holder,
-                                    uint32_t from_start,
-                                    FixedArrayBase* to,
-                                    ElementsKind to_kind,
-                                    uint32_t to_start,
-                                    int copy_size,
-                                    FixedArrayBase* from) {
-    if (from == NULL) {
-      from = from_holder->elements();
-    }
-    if (from->length() == 0) {
-      return from;
-    }
-    return ElementsAccessorSubclass::CopyElementsImpl(
-        from, from_start, to, to_kind, to_start, copy_size);
-  }
-
-  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
-                                               JSObject* holder,
+  virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
                                                FixedArray* to,
-                                               FixedArrayBase* from) {
+                                               JSObject* holder,
+                                               Object* receiver) {
     int len0 = to->length();
 #ifdef DEBUG
     if (FLAG_enable_slow_asserts) {
@@ -507,26 +107,24 @@
       }
     }
 #endif
-    if (from == NULL) {
-      from = holder->elements();
-    }
-    BackingStore* backing_store = BackingStore::cast(from);
-    uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
+    BackingStoreClass* backing_store = BackingStoreClass::cast(from);
+    uint32_t len1 = ElementsAccessorSubclass::GetCapacity(backing_store);
 
     // Optimize if 'other' is empty.
     // We cannot optimize if 'this' is empty, as other may have holes.
     if (len1 == 0) return to;
 
     // Compute how many elements are not in other.
-    uint32_t extra = 0;
+    int extra = 0;
     for (uint32_t y = 0; y < len1; y++) {
-      uint32_t key =
-          ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
-      if (ElementsAccessorSubclass::HasElementImpl(
-              receiver, holder, key, backing_store)) {
+      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
+                                                      y,
+                                                      holder,
+                                                      receiver)) {
+        uint32_t key =
+            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
         MaybeObject* maybe_value =
-            ElementsAccessorSubclass::GetImpl(receiver, holder,
-                                              key, backing_store);
+            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
         Object* value;
         if (!maybe_value->ToObject(&value)) return maybe_value;
         ASSERT(!value->IsTheHole());
@@ -555,15 +153,16 @@
       }
     }
     // Fill in the extra values.
-    uint32_t index = 0;
+    int index = 0;
     for (uint32_t y = 0; y < len1; y++) {
-      uint32_t key =
-          ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
-      if (ElementsAccessorSubclass::HasElementImpl(
-              receiver, holder, key, backing_store)) {
+      if (ElementsAccessorSubclass::HasElementAtIndex(backing_store,
+                                                      y,
+                                                      holder,
+                                                      receiver)) {
+        uint32_t key =
+            ElementsAccessorSubclass::GetKeyForIndex(backing_store, y);
         MaybeObject* maybe_value =
-            ElementsAccessorSubclass::GetImpl(receiver, holder,
-                                              key, backing_store);
+            ElementsAccessorSubclass::Get(backing_store, key, holder, receiver);
         Object* value;
         if (!maybe_value->ToObject(&value)) return maybe_value;
         if (!value->IsTheHole() && !HasKey(to, value)) {
@@ -577,24 +176,45 @@
   }
 
  protected:
-  static uint32_t GetCapacityImpl(BackingStore* backing_store) {
+  static uint32_t GetCapacity(BackingStoreClass* backing_store) {
     return backing_store->length();
   }
 
   virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
-    return ElementsAccessorSubclass::GetCapacityImpl(
-        BackingStore::cast(backing_store));
+    return ElementsAccessorSubclass::GetCapacity(
+        BackingStoreClass::cast(backing_store));
   }
 
-  static uint32_t GetKeyForIndexImpl(BackingStore* backing_store,
-                                     uint32_t index) {
+  static bool HasElementAtIndex(BackingStoreClass* backing_store,
+                                uint32_t index,
+                                JSObject* holder,
+                                Object* receiver) {
+    uint32_t key =
+        ElementsAccessorSubclass::GetKeyForIndex(backing_store, index);
+    MaybeObject* element = ElementsAccessorSubclass::Get(backing_store,
+                                                         key,
+                                                         holder,
+                                                         receiver);
+    return !element->IsTheHole();
+  }
+
+  virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
+                                 uint32_t index,
+                                 JSObject* holder,
+                                 Object* receiver) {
+    return ElementsAccessorSubclass::HasElementAtIndex(
+        BackingStoreClass::cast(backing_store), index, holder, receiver);
+  }
+
+  static uint32_t GetKeyForIndex(BackingStoreClass* backing_store,
+                                 uint32_t index) {
     return index;
   }
 
   virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
-                                  uint32_t index) {
-    return ElementsAccessorSubclass::GetKeyForIndexImpl(
-        BackingStore::cast(backing_store), index);
+                                              uint32_t index) {
+    return ElementsAccessorSubclass::GetKeyForIndex(
+        BackingStoreClass::cast(backing_store), index);
   }
 
  private:
@@ -602,87 +222,12 @@
 };
 
 
-// Super class for all fast element arrays.
-template<typename FastElementsAccessorSubclass,
-         typename KindTraits,
-         int ElementSize>
 class FastElementsAccessor
-    : public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
+    : public ElementsAccessorBase<FastElementsAccessor, FixedArray> {
  public:
-  explicit FastElementsAccessor(const char* name)
-      : ElementsAccessorBase<FastElementsAccessorSubclass,
-                             KindTraits>(name) {}
- protected:
-  friend class ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits>;
-
-  typedef typename KindTraits::BackingStore BackingStore;
-
-  // Adjusts the length of the fast backing store or returns the new length or
-  // undefined in case conversion to a slow backing store should be performed.
-  static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
-                                                JSArray* array,
-                                                Object* length_object,
-                                                uint32_t length) {
-    uint32_t old_capacity = backing_store->length();
-
-    // Check whether the backing store should be shrunk.
-    if (length <= old_capacity) {
-      if (array->HasFastTypeElements()) {
-        MaybeObject* maybe_obj = array->EnsureWritableFastElements();
-        if (!maybe_obj->To(&backing_store)) return maybe_obj;
-      }
-      if (2 * length <= old_capacity) {
-        // If more than half the elements won't be used, trim the array.
-        if (length == 0) {
-          array->initialize_elements();
-        } else {
-          backing_store->set_length(length);
-          Address filler_start = backing_store->address() +
-              BackingStore::OffsetOfElementAt(length);
-          int filler_size = (old_capacity - length) * ElementSize;
-          array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
-        }
-      } else {
-        // Otherwise, fill the unused tail with holes.
-        int old_length = FastD2I(array->length()->Number());
-        for (int i = length; i < old_length; i++) {
-          backing_store->set_the_hole(i);
-        }
-      }
-      return length_object;
-    }
-
-    // Check whether the backing store should be expanded.
-    uint32_t min = JSObject::NewElementsCapacity(old_capacity);
-    uint32_t new_capacity = length > min ? length : min;
-    if (!array->ShouldConvertToSlowElements(new_capacity)) {
-      MaybeObject* result = FastElementsAccessorSubclass::
-          SetFastElementsCapacityAndLength(array, new_capacity, length);
-      if (result->IsFailure()) return result;
-      return length_object;
-    }
-
-    // Request conversion to slow elements.
-    return array->GetHeap()->undefined_value();
-  }
-};
-
-
-class FastObjectElementsAccessor
-    : public FastElementsAccessor<FastObjectElementsAccessor,
-                                  ElementsKindTraits<FAST_ELEMENTS>,
-                                  kPointerSize> {
- public:
-  explicit FastObjectElementsAccessor(const char* name)
-      : FastElementsAccessor<FastObjectElementsAccessor,
-                             ElementsKindTraits<FAST_ELEMENTS>,
-                             kPointerSize>(name) {}
-
   static MaybeObject* DeleteCommon(JSObject* obj,
                                    uint32_t key) {
-    ASSERT(obj->HasFastElements() ||
-           obj->HasFastSmiOnlyElements() ||
-           obj->HasFastArgumentsElements());
+    ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
     Heap* heap = obj->GetHeap();
     FixedArray* backing_store = FixedArray::cast(obj->elements());
     if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@@ -724,49 +269,7 @@
     return heap->true_value();
   }
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
-    switch (to_kind) {
-      case FAST_SMI_ONLY_ELEMENTS:
-      case FAST_ELEMENTS: {
-        CopyObjectToObjectElements(
-            FixedArray::cast(from), ElementsTraits::Kind, from_start,
-            FixedArray::cast(to), to_kind, to_start, copy_size);
-        return from;
-      }
-      case FAST_DOUBLE_ELEMENTS:
-        CopyObjectToDoubleElements(
-            FixedArray::cast(from), from_start,
-            FixedDoubleArray::cast(to), to_start, copy_size);
-        return from;
-      default:
-        UNREACHABLE();
-    }
-    return to->GetHeap()->undefined_value();
-  }
-
-
-  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
-                                                       uint32_t capacity,
-                                                       uint32_t length) {
-    JSObject::SetFastElementsCapacityMode set_capacity_mode =
-        obj->HasFastSmiOnlyElements()
-            ? JSObject::kAllowSmiOnlyElements
-            : JSObject::kDontAllowSmiOnlyElements;
-    return obj->SetFastElementsCapacityAndLength(capacity,
-                                                 length,
-                                                 set_capacity_mode);
-  }
-
  protected:
-  friend class FastElementsAccessor<FastObjectElementsAccessor,
-                                    ElementsKindTraits<FAST_ELEMENTS>,
-                                    kPointerSize>;
-
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) {
@@ -776,50 +279,11 @@
 
 
 class FastDoubleElementsAccessor
-    : public FastElementsAccessor<FastDoubleElementsAccessor,
-                                  ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
-                                  kDoubleSize> {
- public:
-  explicit FastDoubleElementsAccessor(const char* name)
-      : FastElementsAccessor<FastDoubleElementsAccessor,
-                             ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
-                             kDoubleSize>(name) {}
-
-  static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
-                                                       uint32_t capacity,
-                                                       uint32_t length) {
-    return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
-  }
-
+    : public ElementsAccessorBase<FastDoubleElementsAccessor,
+                                  FixedDoubleArray> {
  protected:
   friend class ElementsAccessorBase<FastDoubleElementsAccessor,
-                                    ElementsKindTraits<FAST_DOUBLE_ELEMENTS> >;
-  friend class FastElementsAccessor<FastDoubleElementsAccessor,
-                                    ElementsKindTraits<FAST_DOUBLE_ELEMENTS>,
-                                    kDoubleSize>;
-
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
-    switch (to_kind) {
-      case FAST_SMI_ONLY_ELEMENTS:
-      case FAST_ELEMENTS:
-        return CopyDoubleToObjectElements(
-            FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
-            to_kind, to_start, copy_size);
-      case FAST_DOUBLE_ELEMENTS:
-        CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
-                                   FixedDoubleArray::cast(to),
-                                   to_start, copy_size);
-        return from;
-      default:
-        UNREACHABLE();
-    }
-    return to->GetHeap()->undefined_value();
-  }
+                                    FixedDoubleArray>;
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
@@ -833,49 +297,34 @@
     return obj->GetHeap()->true_value();
   }
 
-  static bool HasElementImpl(Object* receiver,
-                             JSObject* holder,
-                             uint32_t key,
-                             FixedDoubleArray* backing_store) {
-    return key < static_cast<uint32_t>(backing_store->length()) &&
-        !backing_store->is_the_hole(key);
+  static bool HasElementAtIndex(FixedDoubleArray* backing_store,
+                                uint32_t index,
+                                JSObject* holder,
+                                Object* receiver) {
+    return !backing_store->is_the_hole(index);
   }
 };
 
 
 // Super class for all external element arrays.
 template<typename ExternalElementsAccessorSubclass,
-         ElementsKind Kind>
+         typename ExternalArray>
 class ExternalElementsAccessor
     : public ElementsAccessorBase<ExternalElementsAccessorSubclass,
-                                  ElementsKindTraits<Kind> > {
- public:
-  explicit ExternalElementsAccessor(const char* name)
-      : ElementsAccessorBase<ExternalElementsAccessorSubclass,
-                             ElementsKindTraits<Kind> >(name) {}
-
+                                  ExternalArray> {
  protected:
-  typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
-
   friend class ElementsAccessorBase<ExternalElementsAccessorSubclass,
-                                    ElementsKindTraits<Kind> >;
+                                    ExternalArray>;
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              BackingStore* backing_store) {
-    return
-        key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
-        ? backing_store->get(key)
-        : backing_store->GetHeap()->undefined_value();
-  }
-
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    BackingStore* backing_store) {
-    // External arrays do not support changing their length.
-    UNREACHABLE();
-    return obj;
+  static MaybeObject* Get(ExternalArray* backing_store,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
+    if (key < ExternalElementsAccessorSubclass::GetCapacity(backing_store)) {
+      return backing_store->get(key);
+    } else {
+      return backing_store->GetHeap()->undefined_value();
+    }
   }
 
   virtual MaybeObject* Delete(JSObject* obj,
@@ -884,173 +333,67 @@
     // External arrays always ignore deletes.
     return obj->GetHeap()->true_value();
   }
-
-  static bool HasElementImpl(Object* receiver,
-                             JSObject* holder,
-                             uint32_t key,
-                             BackingStore* backing_store) {
-    uint32_t capacity =
-        ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
-    return key < capacity;
-  }
 };
 
 
 class ExternalByteElementsAccessor
     : public ExternalElementsAccessor<ExternalByteElementsAccessor,
-                                      EXTERNAL_BYTE_ELEMENTS> {
- public:
-  explicit ExternalByteElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalByteElementsAccessor,
-                                 EXTERNAL_BYTE_ELEMENTS>(name) {}
+                                      ExternalByteArray> {
 };
 
 
 class ExternalUnsignedByteElementsAccessor
     : public ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
-                                      EXTERNAL_UNSIGNED_BYTE_ELEMENTS> {
- public:
-  explicit ExternalUnsignedByteElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalUnsignedByteElementsAccessor,
-                                 EXTERNAL_UNSIGNED_BYTE_ELEMENTS>(name) {}
+                                      ExternalUnsignedByteArray> {
 };
 
 
 class ExternalShortElementsAccessor
     : public ExternalElementsAccessor<ExternalShortElementsAccessor,
-                                      EXTERNAL_SHORT_ELEMENTS> {
- public:
-  explicit ExternalShortElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalShortElementsAccessor,
-                                 EXTERNAL_SHORT_ELEMENTS>(name) {}
+                                      ExternalShortArray> {
 };
 
 
 class ExternalUnsignedShortElementsAccessor
     : public ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
-                                      EXTERNAL_UNSIGNED_SHORT_ELEMENTS> {
- public:
-  explicit ExternalUnsignedShortElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalUnsignedShortElementsAccessor,
-                                 EXTERNAL_UNSIGNED_SHORT_ELEMENTS>(name) {}
+                                      ExternalUnsignedShortArray> {
 };
 
 
 class ExternalIntElementsAccessor
     : public ExternalElementsAccessor<ExternalIntElementsAccessor,
-                                      EXTERNAL_INT_ELEMENTS> {
- public:
-  explicit ExternalIntElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalIntElementsAccessor,
-                                 EXTERNAL_INT_ELEMENTS>(name) {}
+                                      ExternalIntArray> {
 };
 
 
 class ExternalUnsignedIntElementsAccessor
     : public ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
-                                      EXTERNAL_UNSIGNED_INT_ELEMENTS> {
- public:
-  explicit ExternalUnsignedIntElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalUnsignedIntElementsAccessor,
-                                 EXTERNAL_UNSIGNED_INT_ELEMENTS>(name) {}
+                                      ExternalUnsignedIntArray> {
 };
 
 
 class ExternalFloatElementsAccessor
     : public ExternalElementsAccessor<ExternalFloatElementsAccessor,
-                                      EXTERNAL_FLOAT_ELEMENTS> {
- public:
-  explicit ExternalFloatElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalFloatElementsAccessor,
-                                 EXTERNAL_FLOAT_ELEMENTS>(name) {}
+                                      ExternalFloatArray> {
 };
 
 
 class ExternalDoubleElementsAccessor
     : public ExternalElementsAccessor<ExternalDoubleElementsAccessor,
-                                      EXTERNAL_DOUBLE_ELEMENTS> {
- public:
-  explicit ExternalDoubleElementsAccessor(const char* name)
-      : ExternalElementsAccessor<ExternalDoubleElementsAccessor,
-                                 EXTERNAL_DOUBLE_ELEMENTS>(name) {}
+                                      ExternalDoubleArray> {
 };
 
 
 class PixelElementsAccessor
     : public ExternalElementsAccessor<PixelElementsAccessor,
-                                      EXTERNAL_PIXEL_ELEMENTS> {
- public:
-  explicit PixelElementsAccessor(const char* name)
-      : ExternalElementsAccessor<PixelElementsAccessor,
-                                 EXTERNAL_PIXEL_ELEMENTS>(name) {}
+                                      ExternalPixelArray> {
 };
 
 
 class DictionaryElementsAccessor
     : public ElementsAccessorBase<DictionaryElementsAccessor,
-                                  ElementsKindTraits<DICTIONARY_ELEMENTS> > {
+                                  SeededNumberDictionary> {
  public:
-  explicit DictionaryElementsAccessor(const char* name)
-      : ElementsAccessorBase<DictionaryElementsAccessor,
-                             ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {}
-
-  // Adjusts the length of the dictionary backing store and returns the new
-  // length according to ES5 section 15.4.5.2 behavior.
-  static MaybeObject* SetLengthWithoutNormalize(SeededNumberDictionary* dict,
-                                                JSArray* array,
-                                                Object* length_object,
-                                                uint32_t length) {
-    if (length == 0) {
-      // If the length of a slow array is reset to zero, we clear
-      // the array and flush backing storage. This has the added
-      // benefit that the array returns to fast mode.
-      Object* obj;
-      MaybeObject* maybe_obj = array->ResetElements();
-      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    } else {
-      uint32_t new_length = length;
-      uint32_t old_length = static_cast<uint32_t>(array->length()->Number());
-      if (new_length < old_length) {
-        // Find last non-deletable element in range of elements to be
-        // deleted and adjust range accordingly.
-        Heap* heap = array->GetHeap();
-        int capacity = dict->Capacity();
-        for (int i = 0; i < capacity; i++) {
-          Object* key = dict->KeyAt(i);
-          if (key->IsNumber()) {
-            uint32_t number = static_cast<uint32_t>(key->Number());
-            if (new_length <= number && number < old_length) {
-              PropertyDetails details = dict->DetailsAt(i);
-              if (details.IsDontDelete()) new_length = number + 1;
-            }
-          }
-        }
-        if (new_length != length) {
-          MaybeObject* maybe_object = heap->NumberFromUint32(new_length);
-          if (!maybe_object->To(&length_object)) return maybe_object;
-        }
-
-        // Remove elements that should be deleted.
-        int removed_entries = 0;
-        Object* the_hole_value = heap->the_hole_value();
-        for (int i = 0; i < capacity; i++) {
-          Object* key = dict->KeyAt(i);
-          if (key->IsNumber()) {
-            uint32_t number = static_cast<uint32_t>(key->Number());
-            if (new_length <= number && number < old_length) {
-              dict->SetEntry(i, the_hole_value, the_hole_value);
-              removed_entries++;
-            }
-          }
-        }
-
-        // Update the number of elements.
-        dict->ElementsRemoved(removed_entries);
-      }
-    }
-    return length_object;
-  }
-
   static MaybeObject* DeleteCommon(JSObject* obj,
                                    uint32_t key,
                                    JSReceiver::DeleteMode mode) {
@@ -1096,34 +439,9 @@
     return heap->true_value();
   }
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
-    switch (to_kind) {
-      case FAST_SMI_ONLY_ELEMENTS:
-      case FAST_ELEMENTS:
-        CopyDictionaryToObjectElements(
-            SeededNumberDictionary::cast(from), from_start,
-            FixedArray::cast(to), to_kind, to_start, copy_size);
-        return from;
-      case FAST_DOUBLE_ELEMENTS:
-        CopyDictionaryToDoubleElements(
-            SeededNumberDictionary::cast(from), from_start,
-            FixedDoubleArray::cast(to), to_start, copy_size);
-        return from;
-      default:
-        UNREACHABLE();
-    }
-    return to->GetHeap()->undefined_value();
-  }
-
-
  protected:
   friend class ElementsAccessorBase<DictionaryElementsAccessor,
-                                    ElementsKindTraits<DICTIONARY_ELEMENTS> >;
+                                    SeededNumberDictionary>;
 
   virtual MaybeObject* Delete(JSObject* obj,
                               uint32_t key,
@@ -1131,10 +449,10 @@
     return DeleteCommon(obj, key, mode);
   }
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              SeededNumberDictionary* backing_store) {
+  static MaybeObject* Get(SeededNumberDictionary* backing_store,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
     int entry = backing_store->FindEntry(key);
     if (entry != SeededNumberDictionary::kNotFound) {
       Object* element = backing_store->ValueAt(entry);
@@ -1151,40 +469,26 @@
     return obj->GetHeap()->the_hole_value();
   }
 
-  static bool HasElementImpl(Object* receiver,
-                             JSObject* holder,
-                             uint32_t key,
-                             SeededNumberDictionary* backing_store) {
-    return backing_store->FindEntry(key) !=
-        SeededNumberDictionary::kNotFound;
-  }
-
-  static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
-                                     uint32_t index) {
+  static uint32_t GetKeyForIndex(SeededNumberDictionary* dict,
+                                 uint32_t index) {
     Object* key = dict->KeyAt(index);
     return Smi::cast(key)->value();
   }
 };
 
 
-class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
-    NonStrictArgumentsElementsAccessor,
-    ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> > {
- public:
-  explicit NonStrictArgumentsElementsAccessor(const char* name)
-      : ElementsAccessorBase<
-          NonStrictArgumentsElementsAccessor,
-          ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >(name) {}
+class NonStrictArgumentsElementsAccessor
+    : public ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
+                                  FixedArray> {
  protected:
-  friend class ElementsAccessorBase<
-      NonStrictArgumentsElementsAccessor,
-      ElementsKindTraits<NON_STRICT_ARGUMENTS_ELEMENTS> >;
+  friend class ElementsAccessorBase<NonStrictArgumentsElementsAccessor,
+                                    FixedArray>;
 
-  static MaybeObject* GetImpl(Object* receiver,
-                              JSObject* obj,
-                              uint32_t key,
-                              FixedArray* parameter_map) {
-    Object* probe = GetParameterMapArg(obj, parameter_map, key);
+  static MaybeObject* Get(FixedArray* parameter_map,
+                          uint32_t key,
+                          JSObject* obj,
+                          Object* receiver) {
+    Object* probe = GetParameterMapArg(parameter_map, key);
     if (!probe->IsTheHole()) {
       Context* context = Context::cast(parameter_map->get(0));
       int context_index = Smi::cast(probe)->value();
@@ -1193,37 +497,19 @@
     } else {
       // Object is not mapped, defer to the arguments.
       FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-      MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get(
-          receiver, obj, key, arguments);
-      Object* result;
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-      // Elements of the arguments object in slow mode might be slow aliases.
-      if (result->IsAliasedArgumentsEntry()) {
-        AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(result);
-        Context* context = Context::cast(parameter_map->get(0));
-        int context_index = entry->aliased_context_slot();
-        ASSERT(!context->get(context_index)->IsTheHole());
-        return context->get(context_index);
-      } else {
-        return result;
-      }
+      return ElementsAccessor::ForArray(arguments)->Get(arguments,
+                                                        key,
+                                                        obj,
+                                                        receiver);
     }
   }
 
-  static MaybeObject* SetLengthImpl(JSObject* obj,
-                                    Object* length,
-                                    FixedArray* parameter_map) {
-    // TODO(mstarzinger): This was never implemented but will be used once we
-    // correctly implement [[DefineOwnProperty]] on arrays.
-    UNIMPLEMENTED();
-    return obj;
-  }
-
   virtual MaybeObject* Delete(JSObject* obj,
-                              uint32_t key,
+                              uint32_t key
+                              ,
                               JSReceiver::DeleteMode mode) {
     FixedArray* parameter_map = FixedArray::cast(obj->elements());
-    Object* probe = GetParameterMapArg(obj, parameter_map, key);
+    Object* probe = GetParameterMapArg(parameter_map, key);
     if (!probe->IsTheHole()) {
       // TODO(kmillikin): We could check if this was the last aliased
       // parameter, and revert to normal elements in that case.  That
@@ -1234,57 +520,41 @@
       if (arguments->IsDictionary()) {
         return DictionaryElementsAccessor::DeleteCommon(obj, key, mode);
       } else {
-        return FastObjectElementsAccessor::DeleteCommon(obj, key);
+        return FastElementsAccessor::DeleteCommon(obj, key);
       }
     }
     return obj->GetHeap()->true_value();
   }
 
-  static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
-                                       uint32_t from_start,
-                                       FixedArrayBase* to,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
-                                       int copy_size) {
-    FixedArray* parameter_map = FixedArray::cast(from);
-    FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-    ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
-    return accessor->CopyElements(NULL, from_start, to, to_kind,
-                                  to_start, copy_size, arguments);
-  }
-
-  static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
+  static uint32_t GetCapacity(FixedArray* parameter_map) {
     FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
     return Max(static_cast<uint32_t>(parameter_map->length() - 2),
                ForArray(arguments)->GetCapacity(arguments));
   }
 
-  static uint32_t GetKeyForIndexImpl(FixedArray* dict,
-                                     uint32_t index) {
+  static uint32_t GetKeyForIndex(FixedArray* dict,
+                                 uint32_t index) {
     return index;
   }
 
-  static bool HasElementImpl(Object* receiver,
-                             JSObject* holder,
-                             uint32_t key,
-                             FixedArray* parameter_map) {
-    Object* probe = GetParameterMapArg(holder, parameter_map, key);
+  static bool HasElementAtIndex(FixedArray* parameter_map,
+                                uint32_t index,
+                                JSObject* holder,
+                                Object* receiver) {
+    Object* probe = GetParameterMapArg(parameter_map, index);
     if (!probe->IsTheHole()) {
       return true;
     } else {
       FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
       ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
-      return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
+      return !accessor->Get(arguments, index, holder, receiver)->IsTheHole();
     }
   }
 
  private:
-  static Object* GetParameterMapArg(JSObject* holder,
-                                    FixedArray* parameter_map,
+  static Object* GetParameterMapArg(FixedArray* parameter_map,
                                     uint32_t key) {
-    uint32_t length = holder->IsJSArray()
-        ? Smi::cast(JSArray::cast(holder)->length())->value()
-        : parameter_map->length();
+    uint32_t length = parameter_map->length();
     return key < (length - 2 )
         ? parameter_map->get(key + 2)
         : parameter_map->GetHeap()->the_hole_value();
@@ -1327,87 +597,39 @@
 
 void ElementsAccessor::InitializeOncePerProcess() {
   static struct ConcreteElementsAccessors {
-#define ACCESSOR_STRUCT(Class, Kind, Store) Class* Kind##_handler;
-    ELEMENTS_LIST(ACCESSOR_STRUCT)
-#undef ACCESSOR_STRUCT
-  } element_accessors = {
-#define ACCESSOR_INIT(Class, Kind, Store) new Class(#Kind),
-    ELEMENTS_LIST(ACCESSOR_INIT)
-#undef ACCESSOR_INIT
-  };
+    FastElementsAccessor fast_elements_handler;
+    FastDoubleElementsAccessor fast_double_elements_handler;
+    DictionaryElementsAccessor dictionary_elements_handler;
+    NonStrictArgumentsElementsAccessor non_strict_arguments_elements_handler;
+    ExternalByteElementsAccessor byte_elements_handler;
+    ExternalUnsignedByteElementsAccessor unsigned_byte_elements_handler;
+    ExternalShortElementsAccessor short_elements_handler;
+    ExternalUnsignedShortElementsAccessor unsigned_short_elements_handler;
+    ExternalIntElementsAccessor int_elements_handler;
+    ExternalUnsignedIntElementsAccessor unsigned_int_elements_handler;
+    ExternalFloatElementsAccessor float_elements_handler;
+    ExternalDoubleElementsAccessor double_elements_handler;
+    PixelElementsAccessor pixel_elements_handler;
+  } element_accessors;
 
   static ElementsAccessor* accessor_array[] = {
-#define ACCESSOR_ARRAY(Class, Kind, Store) element_accessors.Kind##_handler,
-    ELEMENTS_LIST(ACCESSOR_ARRAY)
-#undef ACCESSOR_ARRAY
+    &element_accessors.fast_elements_handler,
+    &element_accessors.fast_double_elements_handler,
+    &element_accessors.dictionary_elements_handler,
+    &element_accessors.non_strict_arguments_elements_handler,
+    &element_accessors.byte_elements_handler,
+    &element_accessors.unsigned_byte_elements_handler,
+    &element_accessors.short_elements_handler,
+    &element_accessors.unsigned_short_elements_handler,
+    &element_accessors.int_elements_handler,
+    &element_accessors.unsigned_int_elements_handler,
+    &element_accessors.float_elements_handler,
+    &element_accessors.double_elements_handler,
+    &element_accessors.pixel_elements_handler
   };
 
-  STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
-                kElementsKindCount);
-
   elements_accessors_ = accessor_array;
 }
 
 
-template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
-                                  ElementsKindTraits>::
-    SetLengthImpl(JSObject* obj,
-                  Object* length,
-                  typename ElementsKindTraits::BackingStore* backing_store) {
-  JSArray* array = JSArray::cast(obj);
-
-  // Fast case: The new length fits into a Smi.
-  MaybeObject* maybe_smi_length = length->ToSmi();
-  Object* smi_length = Smi::FromInt(0);
-  if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
-    const int value = Smi::cast(smi_length)->value();
-    if (value >= 0) {
-      Object* new_length;
-      MaybeObject* result = ElementsAccessorSubclass::
-          SetLengthWithoutNormalize(backing_store, array, smi_length, value);
-      if (!result->ToObject(&new_length)) return result;
-      ASSERT(new_length->IsSmi() || new_length->IsUndefined());
-      if (new_length->IsSmi()) {
-        array->set_length(Smi::cast(new_length));
-        return array;
-      }
-    } else {
-      return ThrowArrayLengthRangeError(array->GetHeap());
-    }
-  }
-
-  // Slow case: The new length does not fit into a Smi or conversion
-  // to slow elements is needed for other reasons.
-  if (length->IsNumber()) {
-    uint32_t value;
-    if (length->ToArrayIndex(&value)) {
-      SeededNumberDictionary* dictionary;
-      MaybeObject* maybe_object = array->NormalizeElements();
-      if (!maybe_object->To(&dictionary)) return maybe_object;
-      Object* new_length;
-      MaybeObject* result = DictionaryElementsAccessor::
-          SetLengthWithoutNormalize(dictionary, array, length, value);
-      if (!result->ToObject(&new_length)) return result;
-      ASSERT(new_length->IsNumber());
-      array->set_length(new_length);
-      return array;
-    } else {
-      return ThrowArrayLengthRangeError(array->GetHeap());
-    }
-  }
-
-  // Fall-back case: The new length is not a number so make the array
-  // size one and set only element to length.
-  FixedArray* new_backing_store;
-  MaybeObject* maybe_obj = array->GetHeap()->AllocateFixedArray(1);
-  if (!maybe_obj->To(&new_backing_store)) return maybe_obj;
-  new_backing_store->set(0, length);
-  { MaybeObject* result = array->SetContent(new_backing_store);
-    if (result->IsFailure()) return result;
-  }
-  return array;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/elements.h b/src/elements.h
index ff97c08..851c8c3 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,8 +29,6 @@
 #define V8_ELEMENTS_H_
 
 #include "objects.h"
-#include "heap.h"
-#include "isolate.h"
 
 namespace v8 {
 namespace internal {
@@ -39,88 +37,21 @@
 // ElementsKinds.
 class ElementsAccessor {
  public:
-  explicit ElementsAccessor(const char* name) : name_(name) { }
+  ElementsAccessor() { }
   virtual ~ElementsAccessor() { }
-
-  virtual ElementsKind kind() const = 0;
-  const char* name() const { return name_; }
-
-  // Returns true if a holder contains an element with the specified key
-  // without iterating up the prototype chain.  The caller can optionally pass
-  // in the backing store to use for the check, which must be compatible with
-  // the ElementsKind of the ElementsAccessor. If backing_store is NULL, the
-  // holder->elements() is used as the backing store.
-  virtual bool HasElement(Object* receiver,
-                          JSObject* holder,
-                          uint32_t key,
-                          FixedArrayBase* backing_store = NULL) = 0;
-
-  // Returns the element with the specified key or undefined if there is no such
-  // element. This method doesn't iterate up the prototype chain.  The caller
-  // can optionally pass in the backing store to use for the check, which must
-  // be compatible with the ElementsKind of the ElementsAccessor. If
-  // backing_store is NULL, the holder->elements() is used as the backing store.
-  virtual MaybeObject* Get(Object* receiver,
-                           JSObject* holder,
+  virtual MaybeObject* Get(FixedArrayBase* backing_store,
                            uint32_t key,
-                           FixedArrayBase* backing_store = NULL) = 0;
+                           JSObject* holder,
+                           Object* receiver) = 0;
 
-  // Modifies the length data property as specified for JSArrays and resizes the
-  // underlying backing store accordingly. The method honors the semantics of
-  // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
-  // have non-deletable elements can only be shrunk to the size of highest
-  // element that is non-deletable.
-  virtual MaybeObject* SetLength(JSArray* holder,
-                                 Object* new_length) = 0;
-
-  // Modifies both the length and capacity of a JSArray, resizing the underlying
-  // backing store as necessary. This method does NOT honor the semantics of
-  // EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
-  // elements. This method should only be called for array expansion OR by
-  // runtime JavaScript code that use InternalArrays and don't care about
-  // EcmaScript 5.1 semantics.
-  virtual MaybeObject* SetCapacityAndLength(JSArray* array,
-                                            int capacity,
-                                            int length) = 0;
-
-  // Deletes an element in an object, returning a new elements backing store.
   virtual MaybeObject* Delete(JSObject* holder,
                               uint32_t key,
                               JSReceiver::DeleteMode mode) = 0;
 
-  // If kCopyToEnd is specified as the copy_size to CopyElements, it copies all
-  // of elements from source after source_start to the destination array.
-  static const int kCopyToEnd = -1;
-  // If kCopyToEndAndInitializeToHole is specified as the copy_size to
-  // CopyElements, it copies all of elements from source after source_start to
-  // destination array, padding any remaining uninitialized elements in the
-  // destination array with the hole.
-  static const int kCopyToEndAndInitializeToHole = -2;
-
-  // Copy elements from one backing store to another. Typically, callers specify
-  // the source JSObject or JSArray in source_holder. If the holder's backing
-  // store is available, it can be passed in source and source_holder is
-  // ignored.
-  virtual MaybeObject* CopyElements(JSObject* source_holder,
-                                    uint32_t source_start,
-                                    FixedArrayBase* destination,
-                                    ElementsKind destination_kind,
-                                    uint32_t destination_start,
-                                    int copy_size,
-                                    FixedArrayBase* source = NULL) = 0;
-
-  MaybeObject* CopyElements(JSObject* from_holder,
-                            FixedArrayBase* to,
-                            ElementsKind to_kind,
-                            FixedArrayBase* from = NULL) {
-    return CopyElements(from_holder, 0, to, to_kind, 0,
-                        kCopyToEndAndInitializeToHole, from);
-  }
-
-  virtual MaybeObject* AddElementsToFixedArray(Object* receiver,
-                                               JSObject* holder,
+  virtual MaybeObject* AddElementsToFixedArray(FixedArrayBase* from,
                                                FixedArray* to,
-                                               FixedArrayBase* from = NULL) = 0;
+                                               JSObject* holder,
+                                               Object* receiver) = 0;
 
   // Returns a shared ElementsAccessor for the specified ElementsKind.
   static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@@ -137,34 +68,28 @@
 
   virtual uint32_t GetCapacity(FixedArrayBase* backing_store) = 0;
 
-  // Element handlers distinguish between indexes and keys when they manipulate
+  virtual bool HasElementAtIndex(FixedArrayBase* backing_store,
+                                 uint32_t index,
+                                 JSObject* holder,
+                                 Object* receiver) = 0;
+
+  // Element handlers distinguish between indexes and keys when the manipulate
   // elements.  Indexes refer to elements in terms of their location in the
-  // underlying storage's backing store representation, and are between 0 and
+  // underlying storage's backing store representation, and are between 0
   // GetCapacity.  Keys refer to elements in terms of the value that would be
-  // specified in JavaScript to access the element. In most implementations,
-  // keys are equivalent to indexes, and GetKeyForIndex returns the same value
-  // it is passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps
-  // the index to a key using the KeyAt method on the NumberDictionary.
+  // specific in JavaScript to access the element. In most implementations, keys
+  // are equivalent to indexes, and GetKeyForIndex returns the same value it is
+  // passed. In the NumberDictionary ElementsAccessor, GetKeyForIndex maps the
+  // index to a key using the KeyAt method on the NumberDictionary.
   virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
                                   uint32_t index) = 0;
 
  private:
   static ElementsAccessor** elements_accessors_;
-  const char* name_;
 
   DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
 };
 
-
-void CopyObjectToObjectElements(FixedArray* from_obj,
-                                ElementsKind from_kind,
-                                uint32_t from_start,
-                                FixedArray* to_obj,
-                                ElementsKind to_kind,
-                                uint32_t to_start,
-                                int copy_size);
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_ELEMENTS_H_
diff --git a/src/execution.cc b/src/execution.cc
index 5618975..f36d4e4 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,7 +33,6 @@
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "debug.h"
-#include "isolate-inl.h"
 #include "runtime-profiler.h"
 #include "simulator.h"
 #include "v8threads.h"
@@ -66,13 +65,13 @@
 }
 
 
-static Handle<Object> Invoke(bool is_construct,
-                             Handle<JSFunction> function,
+static Handle<Object> Invoke(bool construct,
+                             Handle<JSFunction> func,
                              Handle<Object> receiver,
                              int argc,
-                             Handle<Object> args[],
+                             Object*** args,
                              bool* has_pending_exception) {
-  Isolate* isolate = function->GetIsolate();
+  Isolate* isolate = func->GetIsolate();
 
   // Entering JavaScript.
   VMState state(isolate, JS);
@@ -80,15 +79,21 @@
   // Placeholder for return value.
   MaybeObject* value = reinterpret_cast<Object*>(kZapValue);
 
-  typedef Object* (*JSEntryFunction)(byte* entry,
-                                     Object* function,
-                                     Object* receiver,
-                                     int argc,
-                                     Object*** args);
+  typedef Object* (*JSEntryFunction)(
+    byte* entry,
+    Object* function,
+    Object* receiver,
+    int argc,
+    Object*** args);
 
-  Handle<Code> code = is_construct
-      ? isolate->factory()->js_construct_entry_code()
-      : isolate->factory()->js_entry_code();
+  Handle<Code> code;
+  if (construct) {
+    JSConstructEntryStub stub;
+    code = stub.GetCode();
+  } else {
+    JSEntryStub stub;
+    code = stub.GetCode();
+  }
 
   // Convert calls on global objects to be calls on the global
   // receiver instead to avoid having a 'this' pointer which refers
@@ -100,22 +105,21 @@
 
   // Make sure that the global object of the context we're about to
   // make the current one is indeed a global object.
-  ASSERT(function->context()->global()->IsGlobalObject());
+  ASSERT(func->context()->global()->IsGlobalObject());
 
   {
     // Save and restore context around invocation and block the
     // allocation of handles without explicit handle scopes.
     SaveContext save(isolate);
     NoHandleAllocation na;
-    JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
+    JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
 
     // Call the function through the right JS entry stub.
-    byte* function_entry = function->code()->entry();
-    JSFunction* func = *function;
-    Object* recv = *receiver;
-    Object*** argv = reinterpret_cast<Object***>(args);
-    value =
-        CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
+    byte* entry_address = func->code()->entry();
+    JSFunction* function = *func;
+    Object* receiver_pointer = *receiver;
+    value = CALL_GENERATED_CODE(entry, entry_address, function,
+                                receiver_pointer, argc, args);
   }
 
 #ifdef DEBUG
@@ -144,11 +148,9 @@
 Handle<Object> Execution::Call(Handle<Object> callable,
                                Handle<Object> receiver,
                                int argc,
-                               Handle<Object> argv[],
+                               Object*** args,
                                bool* pending_exception,
                                bool convert_receiver) {
-  *pending_exception = false;
-
   if (!callable->IsJSFunction()) {
     callable = TryGetFunctionDelegate(callable, pending_exception);
     if (*pending_exception) return callable;
@@ -157,7 +159,7 @@
 
   // In non-strict mode, convert receiver.
   if (convert_receiver && !receiver->IsJSReceiver() &&
-      !func->shared()->native() && func->shared()->is_classic_mode()) {
+      !func->shared()->native() && !func->shared()->strict_mode()) {
     if (receiver->IsUndefined() || receiver->IsNull()) {
       Object* global = func->context()->global()->global_receiver();
       // Under some circumstances, 'global' can be the JSBuiltinsObject
@@ -170,15 +172,13 @@
     if (*pending_exception) return callable;
   }
 
-  return Invoke(false, func, receiver, argc, argv, pending_exception);
+  return Invoke(false, func, receiver, argc, args, pending_exception);
 }
 
 
-Handle<Object> Execution::New(Handle<JSFunction> func,
-                              int argc,
-                              Handle<Object> argv[],
-                              bool* pending_exception) {
-  return Invoke(true, func, Isolate::Current()->global(), argc, argv,
+Handle<Object> Execution::New(Handle<JSFunction> func, int argc,
+                              Object*** args, bool* pending_exception) {
+  return Invoke(true, func, Isolate::Current()->global(), argc, args,
                 pending_exception);
 }
 
@@ -186,7 +186,7 @@
 Handle<Object> Execution::TryCall(Handle<JSFunction> func,
                                   Handle<Object> receiver,
                                   int argc,
-                                  Handle<Object> args[],
+                                  Object*** args,
                                   bool* caught_exception) {
   // Enter a try-block while executing the JavaScript code. To avoid
   // duplicate error printing it must be non-verbose.  Also, to avoid
@@ -195,7 +195,6 @@
   v8::TryCatch catcher;
   catcher.SetVerbose(false);
   catcher.SetCaptureMessage(false);
-  *caught_exception = false;
 
   Handle<Object> result = Invoke(false, func, receiver, argc, args,
                                  caught_exception);
@@ -356,7 +355,7 @@
 
 void StackGuard::SetStackLimit(uintptr_t limit) {
   ExecutionAccess access(isolate_);
-  // If the current limits are special (e.g. due to a pending interrupt) then
+  // If the current limits are special (eg due to a pending interrupt) then
   // leave them alone.
   uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(isolate_, limit);
   if (thread_local_.jslimit_ == thread_local_.real_jslimit_) {
@@ -376,15 +375,9 @@
 }
 
 
-bool StackGuard::ShouldPostponeInterrupts() {
-  ExecutionAccess access(isolate_);
-  return should_postpone_interrupts(access);
-}
-
-
 bool StackGuard::IsInterrupted() {
   ExecutionAccess access(isolate_);
-  return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
+  return thread_local_.interrupt_flags_ & INTERRUPT;
 }
 
 
@@ -410,7 +403,7 @@
 
 bool StackGuard::IsTerminateExecution() {
   ExecutionAccess access(isolate_);
-  return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
+  return thread_local_.interrupt_flags_ & TERMINATE;
 }
 
 
@@ -423,7 +416,7 @@
 
 bool StackGuard::IsRuntimeProfilerTick() {
   ExecutionAccess access(isolate_);
-  return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
+  return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
 }
 
 
@@ -440,22 +433,6 @@
 }
 
 
-bool StackGuard::IsGCRequest() {
-  ExecutionAccess access(isolate_);
-  return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
-}
-
-
-void StackGuard::RequestGC() {
-  ExecutionAccess access(isolate_);
-  thread_local_.interrupt_flags_ |= GC_REQUEST;
-  if (thread_local_.postpone_interrupts_nesting_ == 0) {
-    thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
-    isolate_->heap()->SetStackLimits();
-  }
-}
-
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 bool StackGuard::IsDebugBreak() {
   ExecutionAccess access(isolate_);
@@ -578,15 +555,14 @@
 
 // --- C a l l s   t o   n a t i v e s ---
 
-#define RETURN_NATIVE_CALL(name, args, has_pending_exception)           \
-  do {                                                                  \
-    Isolate* isolate = Isolate::Current();                              \
-    Handle<Object> argv[] = args;                                       \
-    ASSERT(has_pending_exception != NULL);                              \
-    return Call(isolate->name##_fun(),                                  \
-                isolate->js_builtins_object(),                          \
-                ARRAY_SIZE(argv), argv,                                 \
-                has_pending_exception);                                 \
+#define RETURN_NATIVE_CALL(name, argc, argv, has_pending_exception)            \
+  do {                                                                         \
+    Isolate* isolate = Isolate::Current();                                     \
+    Object** args[argc] = argv;                                                \
+    ASSERT(has_pending_exception != NULL);                                     \
+    return Call(isolate->name##_fun(),                                         \
+                isolate->js_builtins_object(), argc, args,                     \
+                has_pending_exception);                                        \
   } while (false)
 
 
@@ -607,44 +583,44 @@
 
 
 Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_number, { obj }, exc);
+  RETURN_NATIVE_CALL(to_number, 1, { obj.location() }, exc);
 }
 
 
 Handle<Object> Execution::ToString(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_string, { obj }, exc);
+  RETURN_NATIVE_CALL(to_string, 1, { obj.location() }, exc);
 }
 
 
 Handle<Object> Execution::ToDetailString(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_detail_string, { obj }, exc);
+  RETURN_NATIVE_CALL(to_detail_string, 1, { obj.location() }, exc);
 }
 
 
 Handle<Object> Execution::ToObject(Handle<Object> obj, bool* exc) {
   if (obj->IsSpecObject()) return obj;
-  RETURN_NATIVE_CALL(to_object, { obj }, exc);
+  RETURN_NATIVE_CALL(to_object, 1, { obj.location() }, exc);
 }
 
 
 Handle<Object> Execution::ToInteger(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_integer, { obj }, exc);
+  RETURN_NATIVE_CALL(to_integer, 1, { obj.location() }, exc);
 }
 
 
 Handle<Object> Execution::ToUint32(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_uint32, { obj }, exc);
+  RETURN_NATIVE_CALL(to_uint32, 1, { obj.location() }, exc);
 }
 
 
 Handle<Object> Execution::ToInt32(Handle<Object> obj, bool* exc) {
-  RETURN_NATIVE_CALL(to_int32, { obj }, exc);
+  RETURN_NATIVE_CALL(to_int32, 1, { obj.location() }, exc);
 }
 
 
 Handle<Object> Execution::NewDate(double time, bool* exc) {
   Handle<Object> time_obj = FACTORY->NewNumber(time);
-  RETURN_NATIVE_CALL(create_date, { time_obj }, exc);
+  RETURN_NATIVE_CALL(create_date, 1, { time_obj.location() }, exc);
 }
 
 
@@ -681,7 +657,7 @@
 
   bool caught_exception;
   Handle<Object> index_object = factory->NewNumberFromInt(int_index);
-  Handle<Object> index_arg[] = { index_object };
+  Object** index_arg[] = { index_object.location() };
   Handle<Object> result = TryCall(Handle<JSFunction>::cast(char_at),
                                   string,
                                   ARRAY_SIZE(index_arg),
@@ -695,8 +671,7 @@
 
 
 Handle<JSFunction> Execution::InstantiateFunction(
-    Handle<FunctionTemplateInfo> data,
-    bool* exc) {
+    Handle<FunctionTemplateInfo> data, bool* exc) {
   Isolate* isolate = data->GetIsolate();
   // Fast case: see if the function has already been instantiated
   int serial_number = Smi::cast(data->serial_number())->value();
@@ -705,12 +680,10 @@
           GetElementNoExceptionThrown(serial_number);
   if (elm->IsJSFunction()) return Handle<JSFunction>(JSFunction::cast(elm));
   // The function has not yet been instantiated in this context; do it.
-  Handle<Object> args[] = { data };
-  Handle<Object> result = Call(isolate->instantiate_fun(),
-                               isolate->js_builtins_object(),
-                               ARRAY_SIZE(args),
-                               args,
-                               exc);
+  Object** args[1] = { Handle<Object>::cast(data).location() };
+  Handle<Object> result =
+      Call(isolate->instantiate_fun(),
+           isolate->js_builtins_object(), 1, args, exc);
   if (*exc) return Handle<JSFunction>::null();
   return Handle<JSFunction>::cast(result);
 }
@@ -737,12 +710,10 @@
     ASSERT(!*exc);
     return Handle<JSObject>(JSObject::cast(result));
   } else {
-    Handle<Object> args[] = { data };
-    Handle<Object> result = Call(isolate->instantiate_fun(),
-                                 isolate->js_builtins_object(),
-                                 ARRAY_SIZE(args),
-                                 args,
-                                 exc);
+    Object** args[1] = { Handle<Object>::cast(data).location() };
+    Handle<Object> result =
+        Call(isolate->instantiate_fun(),
+             isolate->js_builtins_object(), 1, args, exc);
     if (*exc) return Handle<JSObject>::null();
     return Handle<JSObject>::cast(result);
   }
@@ -753,12 +724,9 @@
                                   Handle<Object> instance_template,
                                   bool* exc) {
   Isolate* isolate = Isolate::Current();
-  Handle<Object> args[] = { instance, instance_template };
+  Object** args[2] = { instance.location(), instance_template.location() };
   Execution::Call(isolate->configure_instance_fun(),
-                  isolate->js_builtins_object(),
-                  ARRAY_SIZE(args),
-                  args,
-                  exc);
+                  isolate->js_builtins_object(), 2, args, exc);
 }
 
 
@@ -767,13 +735,16 @@
                                             Handle<Object> pos,
                                             Handle<Object> is_global) {
   Isolate* isolate = fun->GetIsolate();
-  Handle<Object> args[] = { recv, fun, pos, is_global };
-  bool caught_exception;
-  Handle<Object> result = TryCall(isolate->get_stack_trace_line_fun(),
-                                  isolate->js_builtins_object(),
-                                  ARRAY_SIZE(args),
-                                  args,
-                                  &caught_exception);
+  const int argc = 4;
+  Object** args[argc] = { recv.location(),
+                          Handle<Object>::cast(fun).location(),
+                          pos.location(),
+                          is_global.location() };
+  bool caught_exception = false;
+  Handle<Object> result =
+      TryCall(isolate->get_stack_trace_line_fun(),
+              isolate->js_builtins_object(), argc, args,
+              &caught_exception);
   if (caught_exception || !result->IsString()) {
       return isolate->factory()->empty_symbol();
   }
@@ -826,11 +797,6 @@
     return isolate->heap()->undefined_value();
   }
 
-  StackLimitCheck check(isolate);
-  if (check.HasOverflowed()) {
-    return isolate->heap()->undefined_value();
-  }
-
   {
     JavaScriptFrameIterator it(isolate);
     ASSERT(!it.done());
@@ -856,22 +822,17 @@
   // Clear the debug break request flag.
   isolate->stack_guard()->Continue(DEBUGBREAK);
 
-  ProcessDebugMessages(debug_command_only);
+  ProcessDebugMesssages(debug_command_only);
 
   // Return to continue execution.
   return isolate->heap()->undefined_value();
 }
 
-void Execution::ProcessDebugMessages(bool debug_command_only) {
+void Execution::ProcessDebugMesssages(bool debug_command_only) {
   Isolate* isolate = Isolate::Current();
   // Clear the debug command request flag.
   isolate->stack_guard()->Continue(DEBUGCOMMAND);
 
-  StackLimitCheck check(isolate);
-  if (check.HasOverflowed()) {
-    return;
-  }
-
   HandleScope scope(isolate);
   // Enter the debugger. Just continue if we fail to enter the debugger.
   EnterDebugger debugger;
@@ -888,22 +849,11 @@
 
 #endif
 
-MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
+MaybeObject* Execution::HandleStackGuardInterrupt() {
+  Isolate* isolate = Isolate::Current();
   StackGuard* stack_guard = isolate->stack_guard();
-  if (stack_guard->ShouldPostponeInterrupts()) {
-    return isolate->heap()->undefined_value();
-  }
-
-  if (stack_guard->IsGCRequest()) {
-    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
-                                       "StackGuard GC request");
-    stack_guard->Continue(GC_REQUEST);
-  }
-
   isolate->counters()->stack_interrupts()->Increment();
-  // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
-  if (FLAG_count_based_interrupts ||
-      stack_guard->IsRuntimeProfilerTick()) {
+  if (stack_guard->IsRuntimeProfilerTick()) {
     isolate->counters()->runtime_profiler_ticks()->Increment();
     stack_guard->Continue(RUNTIME_PROFILER_TICK);
     isolate->runtime_profiler()->OptimizeNow();
@@ -925,5 +875,4 @@
   return isolate->heap()->undefined_value();
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/execution.h b/src/execution.h
index 01e4b9d..5cd7141 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,14 +41,9 @@
   DEBUGCOMMAND = 1 << 2,
   PREEMPT = 1 << 3,
   TERMINATE = 1 << 4,
-  RUNTIME_PROFILER_TICK = 1 << 5,
-  GC_REQUEST = 1 << 6
+  RUNTIME_PROFILER_TICK = 1 << 5
 };
 
-
-class Isolate;
-
-
 class Execution : public AllStatic {
  public:
   // Call a function, the caller supplies a receiver and an array
@@ -65,7 +60,7 @@
   static Handle<Object> Call(Handle<Object> callable,
                              Handle<Object> receiver,
                              int argc,
-                             Handle<Object> argv[],
+                             Object*** args,
                              bool* pending_exception,
                              bool convert_receiver = false);
 
@@ -78,7 +73,7 @@
   //
   static Handle<Object> New(Handle<JSFunction> func,
                             int argc,
-                            Handle<Object> argv[],
+                            Object*** args,
                             bool* pending_exception);
 
   // Call a function, just like Call(), but make sure to silently catch
@@ -88,7 +83,7 @@
   static Handle<Object> TryCall(Handle<JSFunction> func,
                                 Handle<Object> receiver,
                                 int argc,
-                                Handle<Object> argv[],
+                                Object*** args,
                                 bool* caught_exception);
 
   // ECMA-262 9.2
@@ -140,13 +135,12 @@
                                           Handle<Object> is_global);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   static Object* DebugBreakHelper();
-  static void ProcessDebugMessages(bool debug_command_only);
+  static void ProcessDebugMesssages(bool debug_command_only);
 #endif
 
   // If the stack guard is triggered, but it is not an actual
   // stack overflow, then handle the interruption accordingly.
-  MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt(
-      Isolate* isolate);
+  MUST_USE_RESULT static MaybeObject* HandleStackGuardInterrupt();
 
   // Get a function delegate (or undefined) for the given non-function
   // object. Used for support calling objects as functions.
@@ -163,6 +157,7 @@
 
 
 class ExecutionAccess;
+class Isolate;
 
 
 // StackGuard contains the handling of the limits that are used to limit the
@@ -201,8 +196,6 @@
   bool IsDebugCommand();
   void DebugCommand();
 #endif
-  bool IsGCRequest();
-  void RequestGC();
   void Continue(InterruptFlag after_what);
 
   // This provides an asynchronous read of the stack limits for the current
@@ -226,7 +219,6 @@
   Address address_of_real_jslimit() {
     return reinterpret_cast<Address>(&thread_local_.real_jslimit_);
   }
-  bool ShouldPostponeInterrupts();
 
  private:
   StackGuard();
diff --git a/src/extensions/experimental/break-iterator.cc b/src/extensions/experimental/break-iterator.cc
new file mode 100644
index 0000000..e695a3e
--- /dev/null
+++ b/src/extensions/experimental/break-iterator.cc
@@ -0,0 +1,252 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/break-iterator.h"
+
+#include <string.h>
+
+#include "unicode/brkiter.h"
+#include "unicode/locid.h"
+#include "unicode/rbbi.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> BreakIterator::break_iterator_template_;
+
+icu::BreakIterator* BreakIterator::UnpackBreakIterator(
+    v8::Handle<v8::Object> obj) {
+  if (break_iterator_template_->HasInstance(obj)) {
+    return static_cast<icu::BreakIterator*>(
+        obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+icu::UnicodeString* BreakIterator::ResetAdoptedText(
+    v8::Handle<v8::Object> obj, v8::Handle<v8::Value> value) {
+  // Get the previous value from the internal field.
+  icu::UnicodeString* text = static_cast<icu::UnicodeString*>(
+      obj->GetPointerFromInternalField(1));
+  delete text;
+
+  // Assign new value to the internal pointer.
+  v8::String::Value text_value(value);
+  text = new icu::UnicodeString(
+      reinterpret_cast<const UChar*>(*text_value), text_value.length());
+  obj->SetPointerInInternalField(1, text);
+
+  // Return new unicode string pointer.
+  return text;
+}
+
+void BreakIterator::DeleteBreakIterator(v8::Persistent<v8::Value> object,
+                                        void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a break iterator.
+  delete UnpackBreakIterator(persistent_object);
+
+  delete static_cast<icu::UnicodeString*>(
+      persistent_object->GetPointerFromInternalField(1));
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("BreakIterator method called on an object "
+                      "that is not a BreakIterator.")));
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorAdoptText(
+    const v8::Arguments& args) {
+  if (args.Length() != 1 || !args[0]->IsString()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Text input is required.")));
+  }
+
+  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+  if (!break_iterator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  break_iterator->setText(*ResetAdoptedText(args.Holder(), args[0]));
+
+  return v8::Undefined();
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorFirst(
+    const v8::Arguments& args) {
+  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+  if (!break_iterator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  return v8::Int32::New(break_iterator->first());
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorNext(
+    const v8::Arguments& args) {
+  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+  if (!break_iterator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  return v8::Int32::New(break_iterator->next());
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorCurrent(
+    const v8::Arguments& args) {
+  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+  if (!break_iterator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  return v8::Int32::New(break_iterator->current());
+}
+
+v8::Handle<v8::Value> BreakIterator::BreakIteratorBreakType(
+    const v8::Arguments& args) {
+  icu::BreakIterator* break_iterator = UnpackBreakIterator(args.Holder());
+  if (!break_iterator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
+  icu::RuleBasedBreakIterator* rule_based_iterator =
+      static_cast<icu::RuleBasedBreakIterator*>(break_iterator);
+  int32_t status = rule_based_iterator->getRuleStatus();
+  // Keep return values in sync with JavaScript BreakType enum.
+  if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
+    return v8::Int32::New(UBRK_WORD_NONE);
+  } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
+    return v8::Int32::New(UBRK_WORD_NUMBER);
+  } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
+    return v8::Int32::New(UBRK_WORD_LETTER);
+  } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
+    return v8::Int32::New(UBRK_WORD_KANA);
+  } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
+    return v8::Int32::New(UBRK_WORD_IDEO);
+  } else {
+    return v8::Int32::New(-1);
+  }
+}
+
+v8::Handle<v8::Value> BreakIterator::JSBreakIterator(
+    const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale and iterator type are required.")));
+  }
+
+  v8::String::Utf8Value locale(args[0]);
+  icu::Locale icu_locale(*locale);
+
+  UErrorCode status = U_ZERO_ERROR;
+  icu::BreakIterator* break_iterator = NULL;
+  v8::String::Utf8Value type(args[1]);
+  if (!strcmp(*type, "character")) {
+    break_iterator =
+        icu::BreakIterator::createCharacterInstance(icu_locale, status);
+  } else if (!strcmp(*type, "word")) {
+    break_iterator =
+        icu::BreakIterator::createWordInstance(icu_locale, status);
+  } else if (!strcmp(*type, "sentence")) {
+    break_iterator =
+        icu::BreakIterator::createSentenceInstance(icu_locale, status);
+  } else if (!strcmp(*type, "line")) {
+    break_iterator =
+        icu::BreakIterator::createLineInstance(icu_locale, status);
+  } else {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Invalid iterator type.")));
+  }
+
+  if (U_FAILURE(status)) {
+    delete break_iterator;
+    return v8::ThrowException(v8::Exception::Error(
+        v8::String::New("Failed to create break iterator.")));
+  }
+
+  if (break_iterator_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+    raw_template->SetClassName(v8::String::New("v8Locale.v8BreakIterator"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal fields for icu break iterator and adopted text.
+    object_template->SetInternalFieldCount(2);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("adoptText"),
+               v8::FunctionTemplate::New(BreakIteratorAdoptText));
+    proto->Set(v8::String::New("first"),
+               v8::FunctionTemplate::New(BreakIteratorFirst));
+    proto->Set(v8::String::New("next"),
+               v8::FunctionTemplate::New(BreakIteratorNext));
+    proto->Set(v8::String::New("current"),
+               v8::FunctionTemplate::New(BreakIteratorCurrent));
+    proto->Set(v8::String::New("breakType"),
+               v8::FunctionTemplate::New(BreakIteratorBreakType));
+
+    break_iterator_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      break_iterator_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set break iterator as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, break_iterator);
+  // Make sure that the pointer to adopted text is NULL.
+  wrapper->SetPointerInInternalField(1, NULL);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteBreakIterator);
+
+  return wrapper;
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/break-iterator.h b/src/extensions/experimental/break-iterator.h
new file mode 100644
index 0000000..73b9bbd
--- /dev/null
+++ b/src/extensions/experimental/break-iterator.h
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class BreakIterator;
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class BreakIterator {
+ public:
+  static v8::Handle<v8::Value> JSBreakIterator(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks break iterator object from corresponding JavaScript object.
+  static icu::BreakIterator* UnpackBreakIterator(v8::Handle<v8::Object> obj);
+
+  // Deletes the old value and sets the adopted text in
+  // corresponding JavaScript object.
+  static icu::UnicodeString* ResetAdoptedText(v8::Handle<v8::Object> obj,
+                                              v8::Handle<v8::Value> text_value);
+
+  // Release memory we allocated for the BreakIterator once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteBreakIterator(v8::Persistent<v8::Value> object,
+                                  void* param);
+
+  // Assigns new text to the iterator.
+  static v8::Handle<v8::Value> BreakIteratorAdoptText(
+      const v8::Arguments& args);
+
+  // Moves iterator to the beginning of the string and returns new position.
+  static v8::Handle<v8::Value> BreakIteratorFirst(const v8::Arguments& args);
+
+  // Moves iterator to the next position and returns it.
+  static v8::Handle<v8::Value> BreakIteratorNext(const v8::Arguments& args);
+
+  // Returns current iterator's current position.
+  static v8::Handle<v8::Value> BreakIteratorCurrent(
+      const v8::Arguments& args);
+
+  // Returns type of the item from current position.
+  // This call is only valid for word break iterators. Others just return 0.
+  static v8::Handle<v8::Value> BreakIteratorBreakType(
+      const v8::Arguments& args);
+
+ private:
+  BreakIterator() {}
+
+  static v8::Persistent<v8::FunctionTemplate> break_iterator_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_BREAK_ITERATOR_H_
diff --git a/src/extensions/experimental/collator.cc b/src/extensions/experimental/collator.cc
new file mode 100644
index 0000000..5cf2192
--- /dev/null
+++ b/src/extensions/experimental/collator.cc
@@ -0,0 +1,222 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/collator.h"
+
+#include "unicode/coll.h"
+#include "unicode/locid.h"
+#include "unicode/ucol.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> Collator::collator_template_;
+
+icu::Collator* Collator::UnpackCollator(v8::Handle<v8::Object> obj) {
+  if (collator_template_->HasInstance(obj)) {
+    return static_cast<icu::Collator*>(obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void Collator::DeleteCollator(v8::Persistent<v8::Value> object, void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a collator.
+  delete UnpackCollator(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("Collator method called on an object "
+                      "that is not a Collator.")));
+}
+
+// Extract a boolean option named in |option| and set it to |result|.
+// Return true if it's specified. Otherwise, return false.
+static bool ExtractBooleanOption(const v8::Local<v8::Object>& options,
+                                 const char* option,
+                                 bool* result) {
+  v8::HandleScope handle_scope;
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> value = options->Get(v8::String::New(option));
+  if (try_catch.HasCaught()) {
+    return false;
+  }
+  // No need to check if |value| is empty because it's taken care of
+  // by TryCatch above.
+  if (!value->IsUndefined() && !value->IsNull()) {
+    if (value->IsBoolean()) {
+      *result = value->BooleanValue();
+      return true;
+    }
+  }
+  return false;
+}
+
+// When there's an ICU error, throw a JavaScript error with |message|.
+static v8::Handle<v8::Value> ThrowExceptionForICUError(const char* message) {
+  return v8::ThrowException(v8::Exception::Error(v8::String::New(message)));
+}
+
+v8::Handle<v8::Value> Collator::CollatorCompare(const v8::Arguments& args) {
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Two string arguments are required.")));
+  }
+
+  icu::Collator* collator = UnpackCollator(args.Holder());
+  if (!collator) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  v8::String::Value string_value1(args[0]);
+  v8::String::Value string_value2(args[1]);
+  const UChar* string1 = reinterpret_cast<const UChar*>(*string_value1);
+  const UChar* string2 = reinterpret_cast<const UChar*>(*string_value2);
+  UErrorCode status = U_ZERO_ERROR;
+  UCollationResult result = collator->compare(
+      string1, string_value1.length(), string2, string_value2.length(), status);
+
+  if (U_FAILURE(status)) {
+    return ThrowExceptionForICUError(
+        "Unexpected failure in Collator.compare.");
+  }
+
+  return v8::Int32::New(result);
+}
+
+v8::Handle<v8::Value> Collator::JSCollator(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale and collation options are required.")));
+  }
+
+  v8::String::AsciiValue locale(args[0]);
+  icu::Locale icu_locale(*locale);
+
+  icu::Collator* collator = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  collator = icu::Collator::createInstance(icu_locale, status);
+
+  if (U_FAILURE(status)) {
+    delete collator;
+    return ThrowExceptionForICUError("Failed to create collator.");
+  }
+
+  v8::Local<v8::Object> options(args[1]->ToObject());
+
+  // Below, we change collation options that are explicitly specified
+  // by a caller in JavaScript. Otherwise, we don't touch because
+  // we don't want to change the locale-dependent default value.
+  // The three options below are very likely to have the same default
+  // across locales, but I haven't checked them all. Others we may add
+  // in the future have certainly locale-dependent default (e.g.
+  // caseFirst is upperFirst for Danish while is off for most other locales).
+
+  bool ignore_case, ignore_accents, numeric;
+
+  if (ExtractBooleanOption(options, "ignoreCase", &ignore_case)) {
+    // We need to explicitly set the level to secondary to get case ignored.
+    // The default L3 ignores UCOL_CASE_LEVEL == UCOL_OFF !
+    if (ignore_case) {
+      collator->setStrength(icu::Collator::SECONDARY);
+    }
+    collator->setAttribute(UCOL_CASE_LEVEL, ignore_case ? UCOL_OFF : UCOL_ON,
+                           status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set ignoreCase.");
+    }
+  }
+
+  // Accents are taken into account with strength secondary or higher.
+  if (ExtractBooleanOption(options, "ignoreAccents", &ignore_accents)) {
+    if (!ignore_accents) {
+      collator->setStrength(icu::Collator::SECONDARY);
+    } else {
+      collator->setStrength(icu::Collator::PRIMARY);
+    }
+  }
+
+  if (ExtractBooleanOption(options, "numeric", &numeric)) {
+    collator->setAttribute(UCOL_NUMERIC_COLLATION,
+                           numeric ? UCOL_ON : UCOL_OFF, status);
+    if (U_FAILURE(status)) {
+      delete collator;
+      return ThrowExceptionForICUError("Failed to set numeric sort option.");
+    }
+  }
+
+  if (collator_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+    raw_template->SetClassName(v8::String::New("v8Locale.Collator"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal fields for icu collator.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("compare"),
+               v8::FunctionTemplate::New(CollatorCompare));
+
+    collator_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      collator_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set collator as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, collator);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteCollator);
+
+  return wrapper;
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/collator.h b/src/extensions/experimental/collator.h
new file mode 100644
index 0000000..ca7e4dc
--- /dev/null
+++ b/src/extensions/experimental/collator.h
@@ -0,0 +1,68 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H
+#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class Collator;
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class Collator {
+ public:
+  static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks collator object from corresponding JavaScript object.
+  static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the Collator once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteCollator(v8::Persistent<v8::Value> object, void* param);
+
+  // Compare two strings and returns -1, 0 and 1 depending on
+  // whether string1 is smaller than, equal to or larger than string2.
+  static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
+
+ private:
+  Collator() {}
+
+  static v8::Persistent<v8::FunctionTemplate> collator_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR
diff --git a/src/extensions/experimental/datetime-format.cc b/src/extensions/experimental/datetime-format.cc
new file mode 100644
index 0000000..94a29ac
--- /dev/null
+++ b/src/extensions/experimental/datetime-format.cc
@@ -0,0 +1,384 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/datetime-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dtfmtsym.h"
+#include "unicode/dtptngen.h"
+#include "unicode/locid.h"
+#include "unicode/smpdtfmt.h"
+
+namespace v8 {
+namespace internal {
+
+v8::Persistent<v8::FunctionTemplate> DateTimeFormat::datetime_format_template_;
+
+static icu::DateFormat* CreateDateTimeFormat(v8::Handle<v8::String>,
+                                             v8::Handle<v8::Object>);
+static v8::Handle<v8::Value> GetSymbols(
+    const v8::Arguments&,
+    const icu::UnicodeString*, int32_t,
+    const icu::UnicodeString*, int32_t,
+    const icu::UnicodeString*, int32_t);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+static icu::DateFormat::EStyle GetDateTimeStyle(const icu::UnicodeString&);
+
+icu::SimpleDateFormat* DateTimeFormat::UnpackDateTimeFormat(
+    v8::Handle<v8::Object> obj) {
+  if (datetime_format_template_->HasInstance(obj)) {
+    return static_cast<icu::SimpleDateFormat*>(
+        obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void DateTimeFormat::DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+                                          void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a date time formatter.
+  delete UnpackDateTimeFormat(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> DateTimeFormat::Format(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  double millis = 0.0;
+  if (args.Length() != 1 || !args[0]->IsDate()) {
+    // Create a new date.
+    v8::TryCatch try_catch;
+    v8::Local<v8::Script> date_script =
+        v8::Script::Compile(v8::String::New("eval('new Date()')"));
+    millis = date_script->Run()->NumberValue();
+    if (try_catch.HasCaught()) {
+      return try_catch.ReThrow();
+    }
+  } else {
+    millis = v8::Date::Cast(*args[0])->NumberValue();
+  }
+
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  icu::UnicodeString result;
+  date_format->format(millis, result);
+
+  return v8::String::New(
+      reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetMonths(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  int32_t narrow_count;
+  const icu::UnicodeString* narrow = symbols->getMonths(
+      narrow_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::NARROW);
+  int32_t abbrev_count;
+  const icu::UnicodeString* abbrev = symbols->getMonths(
+      abbrev_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::ABBREVIATED);
+  int32_t wide_count;
+  const icu::UnicodeString* wide = symbols->getMonths(
+      wide_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::WIDE);
+
+  return GetSymbols(
+      args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetWeekdays(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  int32_t narrow_count;
+  const icu::UnicodeString* narrow = symbols->getWeekdays(
+      narrow_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::NARROW);
+  int32_t abbrev_count;
+  const icu::UnicodeString* abbrev = symbols->getWeekdays(
+      abbrev_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::ABBREVIATED);
+  int32_t wide_count;
+  const icu::UnicodeString* wide = symbols->getWeekdays(
+      wide_count,
+      icu::DateFormatSymbols::STANDALONE,
+      icu::DateFormatSymbols::WIDE);
+
+  // getXXXWeekdays always returns 8 elements - ICU stable API.
+  // We can't use ASSERT_EQ(8, narrow_count) because ASSERT is internal to v8.
+  if (narrow_count != 8 || abbrev_count != 8 || wide_count != 8) {
+    return v8::ThrowException(v8::Exception::Error(
+        v8::String::New("Failed to get weekday information.")));
+  }
+
+  // ICU documentation says we should ignore element 0 of the returned array.
+  return GetSymbols(args, narrow + 1, narrow_count - 1, abbrev + 1,
+                    abbrev_count -1 , wide + 1, wide_count - 1);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetEras(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  int32_t narrow_count;
+  const icu::UnicodeString* narrow = symbols->getNarrowEras(narrow_count);
+  int32_t abbrev_count;
+  const icu::UnicodeString* abbrev = symbols->getEras(abbrev_count);
+  int32_t wide_count;
+  const icu::UnicodeString* wide = symbols->getEraNames(wide_count);
+
+  return GetSymbols(
+      args, narrow, narrow_count, abbrev, abbrev_count, wide, wide_count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::GetAmPm(const v8::Arguments& args) {
+  icu::SimpleDateFormat* date_format = UnpackDateTimeFormat(args.Holder());
+  if (!date_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  const icu::DateFormatSymbols* symbols = date_format->getDateFormatSymbols();
+
+  // In this case narrow == abbreviated == wide
+  int32_t count;
+  const icu::UnicodeString* wide = symbols->getAmPmStrings(count);
+
+  return GetSymbols(args, wide, count, wide, count, wide, count);
+}
+
+v8::Handle<v8::Value> DateTimeFormat::JSDateTimeFormat(
+    const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale and date/time options are required.")));
+  }
+
+  icu::SimpleDateFormat* date_format = static_cast<icu::SimpleDateFormat*>(
+      CreateDateTimeFormat(args[0]->ToString(), args[1]->ToObject()));
+
+  if (datetime_format_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+    raw_template->SetClassName(v8::String::New("v8Locale.DateTimeFormat"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal field for icu date time formatter.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("format"),
+               v8::FunctionTemplate::New(Format));
+    proto->Set(v8::String::New("getMonths"),
+               v8::FunctionTemplate::New(GetMonths));
+    proto->Set(v8::String::New("getWeekdays"),
+               v8::FunctionTemplate::New(GetWeekdays));
+    proto->Set(v8::String::New("getEras"),
+               v8::FunctionTemplate::New(GetEras));
+    proto->Set(v8::String::New("getAmPm"),
+               v8::FunctionTemplate::New(GetAmPm));
+
+    datetime_format_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      datetime_format_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set date time formatter as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, date_format);
+
+  // Set resolved pattern in options.pattern.
+  icu::UnicodeString pattern;
+  date_format->toPattern(pattern);
+  v8::Local<v8::Object> options = v8::Object::New();
+  options->Set(v8::String::New("pattern"),
+               v8::String::New(reinterpret_cast<const uint16_t*>(
+                   pattern.getBuffer()), pattern.length()));
+  wrapper->Set(v8::String::New("options"), options);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteDateTimeFormat);
+
+  return wrapper;
+}
+
+// Returns SimpleDateFormat.
+static icu::DateFormat* CreateDateTimeFormat(
+    v8::Handle<v8::String> locale, v8::Handle<v8::Object> settings) {
+  v8::HandleScope handle_scope;
+
+  v8::String::AsciiValue ascii_locale(locale);
+  icu::Locale icu_locale(*ascii_locale);
+
+  // Make formatter from skeleton.
+  icu::SimpleDateFormat* date_format = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  icu::UnicodeString skeleton;
+  if (I18NUtils::ExtractStringSetting(settings, "skeleton", &skeleton)) {
+    v8::Local<icu::DateTimePatternGenerator> generator(
+        icu::DateTimePatternGenerator::createInstance(icu_locale, status));
+    icu::UnicodeString pattern =
+        generator->getBestPattern(skeleton, status);
+
+    date_format = new icu::SimpleDateFormat(pattern, icu_locale, status);
+    if (U_SUCCESS(status)) {
+      return date_format;
+    } else {
+      delete date_format;
+    }
+  }
+
+  // Extract date style and time style from settings.
+  icu::UnicodeString date_style;
+  icu::DateFormat::EStyle icu_date_style = icu::DateFormat::kNone;
+  if (I18NUtils::ExtractStringSetting(settings, "dateStyle", &date_style)) {
+    icu_date_style = GetDateTimeStyle(date_style);
+  }
+
+  icu::UnicodeString time_style;
+  icu::DateFormat::EStyle icu_time_style = icu::DateFormat::kNone;
+  if (I18NUtils::ExtractStringSetting(settings, "timeStyle", &time_style)) {
+    icu_time_style = GetDateTimeStyle(time_style);
+  }
+
+  // Try all combinations of date/time styles.
+  if (icu_date_style == icu::DateFormat::kNone &&
+      icu_time_style == icu::DateFormat::kNone) {
+    // Return default short date, short
+    return icu::DateFormat::createDateTimeInstance(
+        icu::DateFormat::kShort, icu::DateFormat::kShort, icu_locale);
+  } else if (icu_date_style != icu::DateFormat::kNone &&
+             icu_time_style != icu::DateFormat::kNone) {
+    return icu::DateFormat::createDateTimeInstance(
+        icu_date_style, icu_time_style, icu_locale);
+  } else if (icu_date_style != icu::DateFormat::kNone) {
+    return icu::DateFormat::createDateInstance(icu_date_style, icu_locale);
+  } else {
+    // icu_time_style != icu::DateFormat::kNone
+    return icu::DateFormat::createTimeInstance(icu_time_style, icu_locale);
+  }
+}
+
+// Creates a v8::Array of narrow, abbrev or wide symbols.
+static v8::Handle<v8::Value> GetSymbols(const v8::Arguments& args,
+                                        const icu::UnicodeString* narrow,
+                                        int32_t narrow_count,
+                                        const icu::UnicodeString* abbrev,
+                                        int32_t abbrev_count,
+                                        const icu::UnicodeString* wide,
+                                        int32_t wide_count) {
+  v8::HandleScope handle_scope;
+
+  // Make wide width default.
+  const icu::UnicodeString* result = wide;
+  int32_t count = wide_count;
+
+  if (args.Length() == 1 && args[0]->IsString()) {
+    v8::String::AsciiValue ascii_value(args[0]);
+    if (strcmp(*ascii_value, "abbreviated") == 0) {
+      result = abbrev;
+      count = abbrev_count;
+    } else if (strcmp(*ascii_value, "narrow") == 0) {
+      result = narrow;
+      count = narrow_count;
+    }
+  }
+
+  v8::Handle<v8::Array> symbols = v8::Array::New();
+  for (int32_t i = 0; i < count; ++i) {
+    symbols->Set(i, v8::String::New(
+        reinterpret_cast<const uint16_t*>(result[i].getBuffer()),
+        result[i].length()));
+  }
+
+  return handle_scope.Close(symbols);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("DateTimeFormat method called on an object "
+                      "that is not a DateTimeFormat.")));
+}
+
+// Returns icu date/time style.
+static icu::DateFormat::EStyle GetDateTimeStyle(
+    const icu::UnicodeString& type) {
+  if (type == UNICODE_STRING_SIMPLE("medium")) {
+    return icu::DateFormat::kMedium;
+  } else if (type == UNICODE_STRING_SIMPLE("long")) {
+    return icu::DateFormat::kLong;
+  } else if (type == UNICODE_STRING_SIMPLE("full")) {
+    return icu::DateFormat::kFull;
+  }
+
+  return icu::DateFormat::kShort;
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/datetime-format.h b/src/extensions/experimental/datetime-format.h
new file mode 100644
index 0000000..a6a228c
--- /dev/null
+++ b/src/extensions/experimental/datetime-format.h
@@ -0,0 +1,83 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class SimpleDateFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class DateTimeFormat {
+ public:
+  static v8::Handle<v8::Value> JSDateTimeFormat(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks date format object from corresponding JavaScript object.
+  static icu::SimpleDateFormat* UnpackDateTimeFormat(
+      v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the DateFormat once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteDateTimeFormat(v8::Persistent<v8::Value> object,
+                                   void* param);
+
+  // Formats date and returns corresponding string.
+  static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+  // All date time symbol methods below return stand-alone names in
+  // either narrow, abbreviated or wide width.
+
+  // Get list of months.
+  static v8::Handle<v8::Value> GetMonths(const v8::Arguments& args);
+
+  // Get list of weekdays.
+  static v8::Handle<v8::Value> GetWeekdays(const v8::Arguments& args);
+
+  // Get list of eras.
+  static v8::Handle<v8::Value> GetEras(const v8::Arguments& args);
+
+  // Get list of day periods.
+  static v8::Handle<v8::Value> GetAmPm(const v8::Arguments& args);
+
+ private:
+  DateTimeFormat();
+
+  static v8::Persistent<v8::FunctionTemplate> datetime_format_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_DATETIME_FORMAT_H_
diff --git a/src/extensions/experimental/experimental.gyp b/src/extensions/experimental/experimental.gyp
new file mode 100644
index 0000000..24fb683
--- /dev/null
+++ b/src/extensions/experimental/experimental.gyp
@@ -0,0 +1,105 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+  'variables': {
+    # TODO(cira): Find out how to pass this value for arbitrary embedder.
+    # Chromium sets it in common.gypi and does force include of that file for
+    # all sub projects.
+    'icu_src_dir%': '../../../../third_party/icu',
+  },
+  'targets': [
+    {
+      'target_name': 'i18n_api',
+      'type': 'static_library',
+      'sources': [
+        'break-iterator.cc',
+        'break-iterator.h',
+        'collator.cc',
+        'collator.h',
+        'datetime-format.cc',
+        'datetime-format.h',
+        'i18n-extension.cc',
+        'i18n-extension.h',
+        'i18n-locale.cc',
+        'i18n-locale.h',
+        'i18n-natives.h',
+        'i18n-utils.cc',
+        'i18n-utils.h',
+        'language-matcher.cc',
+        'language-matcher.h',
+        'number-format.cc',
+        'number-format.h',
+        '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+      ],
+      'include_dirs': [
+        '<(icu_src_dir)/public/common',
+        # v8/ is root for all includes.
+        '../../..'
+      ],
+      'dependencies': [
+        '<(icu_src_dir)/icu.gyp:*',
+        'js2c_i18n#host',
+        '../../../tools/gyp/v8.gyp:v8',
+      ],
+      'direct_dependent_settings': {
+        # Adds -Iv8 for embedders.
+        'include_dirs': [
+          '../../..'
+        ],
+      },
+    },
+    {
+      'target_name': 'js2c_i18n',
+      'type': 'none',
+      'toolsets': ['host'],
+      'variables': {
+        'js_files': [
+          'i18n.js'
+        ],
+      },
+      'actions': [
+        {
+          'action_name': 'js2c_i18n',
+          'inputs': [
+            'i18n-js2c.py',
+            '<@(js_files)',
+          ],
+          'outputs': [
+            '<(SHARED_INTERMEDIATE_DIR)/i18n-js.cc',
+          ],
+          'action': [
+            'python',
+            'i18n-js2c.py',
+            '<@(_outputs)',
+            '<@(js_files)'
+          ],
+        },
+      ],
+    },
+  ],  # targets
+}
diff --git a/src/extensions/experimental/i18n-extension.cc b/src/extensions/experimental/i18n-extension.cc
new file mode 100644
index 0000000..c5afcf0
--- /dev/null
+++ b/src/extensions/experimental/i18n-extension.cc
@@ -0,0 +1,74 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/i18n-extension.h"
+
+#include "src/extensions/experimental/break-iterator.h"
+#include "src/extensions/experimental/collator.h"
+#include "src/extensions/experimental/datetime-format.h"
+#include "src/extensions/experimental/i18n-locale.h"
+#include "src/extensions/experimental/i18n-natives.h"
+#include "src/extensions/experimental/number-format.h"
+
+namespace v8 {
+namespace internal {
+
+I18NExtension* I18NExtension::extension_ = NULL;
+
+I18NExtension::I18NExtension()
+    : v8::Extension("v8/i18n", I18Natives::GetScriptSource()) {
+}
+
+v8::Handle<v8::FunctionTemplate> I18NExtension::GetNativeFunction(
+    v8::Handle<v8::String> name) {
+  if (name->Equals(v8::String::New("NativeJSLocale"))) {
+    return v8::FunctionTemplate::New(I18NLocale::JSLocale);
+  } else if (name->Equals(v8::String::New("NativeJSBreakIterator"))) {
+    return v8::FunctionTemplate::New(BreakIterator::JSBreakIterator);
+  } else if (name->Equals(v8::String::New("NativeJSCollator"))) {
+    return v8::FunctionTemplate::New(Collator::JSCollator);
+  } else if (name->Equals(v8::String::New("NativeJSDateTimeFormat"))) {
+    return v8::FunctionTemplate::New(DateTimeFormat::JSDateTimeFormat);
+  } else if (name->Equals(v8::String::New("NativeJSNumberFormat"))) {
+    return v8::FunctionTemplate::New(NumberFormat::JSNumberFormat);
+  }
+
+  return v8::Handle<v8::FunctionTemplate>();
+}
+
+I18NExtension* I18NExtension::get() {
+  if (!extension_) {
+    extension_ = new I18NExtension();
+  }
+  return extension_;
+}
+
+void I18NExtension::Register() {
+  static v8::DeclareExtension i18n_extension_declaration(I18NExtension::get());
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-extension.h b/src/extensions/experimental/i18n-extension.h
new file mode 100644
index 0000000..5401f25
--- /dev/null
+++ b/src/extensions/experimental/i18n-extension.h
@@ -0,0 +1,54 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+class I18NExtension : public v8::Extension {
+ public:
+  I18NExtension();
+
+  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+      v8::Handle<v8::String> name);
+
+  // V8 code prefers Register, while Chrome and WebKit use get kind of methods.
+  static void Register();
+  static I18NExtension* get();
+
+ private:
+  static I18NExtension* extension_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_EXTENSION_H_
diff --git a/src/extensions/experimental/i18n-js2c.py b/src/extensions/experimental/i18n-js2c.py
new file mode 100644
index 0000000..9c3128b
--- /dev/null
+++ b/src/extensions/experimental/i18n-js2c.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for converting I18N JavaScript source code into C-style
+# char arrays. It is used for embedded JavaScript code in the V8
+# library.
+# This is a pared down copy of v8/tools/js2c.py that avoids use of
+# v8/src/natives.h and produces different cc template.
+
+import os, re, sys, string
+
+
+def ToCArray(lines):
+  result = []
+  for chr in lines:
+    value = ord(chr)
+    assert value < 128
+    result.append(str(value))
+  result.append("0")
+  return ", ".join(result)
+
+
+def RemoveCommentsAndTrailingWhitespace(lines):
+  lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
+  lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
+  lines = re.sub(r'\s+\n+', '\n', lines) # trailing whitespace
+  return lines
+
+
+def ReadFile(filename):
+  file = open(filename, "rt")
+  try:
+    lines = file.read()
+  finally:
+    file.close()
+  return lines
+
+
+EVAL_PATTERN = re.compile(r'\beval\s*\(');
+WITH_PATTERN = re.compile(r'\bwith\s*\(');
+
+
+def Validate(lines, file):
+  lines = RemoveCommentsAndTrailingWhitespace(lines)
+  # Because of simplified context setup, eval and with is not
+  # allowed in the natives files.
+  eval_match = EVAL_PATTERN.search(lines)
+  if eval_match:
+    raise ("Eval disallowed in natives: %s" % file)
+  with_match = WITH_PATTERN.search(lines)
+  if with_match:
+    raise ("With statements disallowed in natives: %s" % file)
+
+
+HEADER_TEMPLATE = """\
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+// This file was generated from .js source files by gyp.  If you
+// want to make changes to this file you should either change the
+// javascript source files or the i18n-js2c.py script.
+
+#include "src/extensions/experimental/i18n-natives.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+const char* I18Natives::GetScriptSource() {
+  // JavaScript source gets injected here.
+  static const char i18n_source[] = {%s};
+
+  return i18n_source;
+}
+
+}  // internal
+}  // v8
+"""
+
+
+def JS2C(source, target):
+  filename = str(source)
+
+  lines = ReadFile(filename)
+  Validate(lines, filename)
+  data = ToCArray(lines)
+
+  # Emit result
+  output = open(target, "w")
+  output.write(HEADER_TEMPLATE % data)
+  output.close()
+
+
+def main():
+  target = sys.argv[1]
+  source = sys.argv[2]
+  JS2C(source, target)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/src/extensions/experimental/i18n-locale.cc b/src/extensions/experimental/i18n-locale.cc
new file mode 100644
index 0000000..46a5f87
--- /dev/null
+++ b/src/extensions/experimental/i18n-locale.cc
@@ -0,0 +1,111 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/i18n-locale.h"
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "src/extensions/experimental/language-matcher.h"
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+const char* const I18NLocale::kLocaleID = "localeID";
+const char* const I18NLocale::kRegionID = "regionID";
+const char* const I18NLocale::kICULocaleID = "icuLocaleID";
+
+v8::Handle<v8::Value> I18NLocale::JSLocale(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 1 || !args[0]->IsObject()) {
+    return v8::Undefined();
+  }
+
+  v8::Local<v8::Object> settings = args[0]->ToObject();
+
+  // Get best match for locale.
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> locale_id = settings->Get(v8::String::New(kLocaleID));
+  if (try_catch.HasCaught()) {
+    return v8::Undefined();
+  }
+
+  LocaleIDMatch result;
+  if (locale_id->IsArray()) {
+    LanguageMatcher::GetBestMatchForPriorityList(
+        v8::Handle<v8::Array>::Cast(locale_id), &result);
+  } else if (locale_id->IsString()) {
+    LanguageMatcher::GetBestMatchForString(locale_id->ToString(), &result);
+  } else {
+    LanguageMatcher::GetBestMatchForString(v8::String::New(""), &result);
+  }
+
+  // Get best match for region.
+  char region_id[ULOC_COUNTRY_CAPACITY];
+  I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+
+  v8::Handle<v8::Value> region = settings->Get(v8::String::New(kRegionID));
+  if (try_catch.HasCaught()) {
+    return v8::Undefined();
+  }
+
+  if (!GetBestMatchForRegionID(result.icu_id, region, region_id)) {
+    // Set region id to empty string because region couldn't be inferred.
+    I18NUtils::StrNCopy(region_id, ULOC_COUNTRY_CAPACITY, "");
+  }
+
+  // Build JavaScript object that contains bcp and icu locale ID and region ID.
+  v8::Handle<v8::Object> locale = v8::Object::New();
+  locale->Set(v8::String::New(kLocaleID), v8::String::New(result.bcp47_id));
+  locale->Set(v8::String::New(kICULocaleID), v8::String::New(result.icu_id));
+  locale->Set(v8::String::New(kRegionID), v8::String::New(region_id));
+
+  return handle_scope.Close(locale);
+}
+
+bool I18NLocale::GetBestMatchForRegionID(
+    const char* locale_id, v8::Handle<v8::Value> region_id, char* result) {
+  if (region_id->IsString() && region_id->ToString()->Length() != 0) {
+    icu::Locale user_locale(
+        icu::Locale("und", *v8::String::Utf8Value(region_id->ToString())));
+    I18NUtils::StrNCopy(
+        result, ULOC_COUNTRY_CAPACITY, user_locale.getCountry());
+    return true;
+  }
+  // Maximize locale_id to infer the region (e.g. expand "de" to "de-Latn-DE"
+  // and grab "DE" from the result).
+  UErrorCode status = U_ZERO_ERROR;
+  char maximized_locale[ULOC_FULLNAME_CAPACITY];
+  uloc_addLikelySubtags(
+      locale_id, maximized_locale, ULOC_FULLNAME_CAPACITY, &status);
+  uloc_getCountry(maximized_locale, result, ULOC_COUNTRY_CAPACITY, &status);
+
+  return !U_FAILURE(status);
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-locale.h b/src/extensions/experimental/i18n-locale.h
new file mode 100644
index 0000000..607818c
--- /dev/null
+++ b/src/extensions/experimental/i18n-locale.h
@@ -0,0 +1,60 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
+
+#include "include/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class I18NLocale {
+ public:
+  I18NLocale() {}
+
+  // Implementations of window.Locale methods.
+  static v8::Handle<v8::Value> JSLocale(const v8::Arguments& args);
+
+  // Infers region id given the locale id, or uses user specified region id.
+  // Result is canonicalized.
+  // Returns status of ICU operation (maximizing locale or get region call).
+  static bool GetBestMatchForRegionID(
+      const char* locale_id, v8::Handle<v8::Value> regions, char* result);
+
+ private:
+  // Key name for localeID parameter.
+  static const char* const kLocaleID;
+  // Key name for regionID parameter.
+  static const char* const kRegionID;
+  // Key name for the icuLocaleID result.
+  static const char* const kICULocaleID;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_LOCALE_H_
diff --git a/src/extensions/experimental/i18n-natives.h b/src/extensions/experimental/i18n-natives.h
new file mode 100644
index 0000000..37362d0
--- /dev/null
+++ b/src/extensions/experimental/i18n-natives.h
@@ -0,0 +1,43 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
+
+namespace v8 {
+namespace internal {
+
+class I18Natives {
+ public:
+  // Gets script source from generated file.
+  // Source is statically allocated string.
+  static const char* GetScriptSource();
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_NATIVES_H_
diff --git a/src/extensions/experimental/i18n-utils.cc b/src/extensions/experimental/i18n-utils.cc
new file mode 100644
index 0000000..dc2be1a
--- /dev/null
+++ b/src/extensions/experimental/i18n-utils.cc
@@ -0,0 +1,87 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/i18n-utils.h"
+
+#include <string.h>
+
+#include "unicode/unistr.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+void I18NUtils::StrNCopy(char* dest, int length, const char* src) {
+  if (!dest || !src) return;
+
+  strncpy(dest, src, length);
+  dest[length - 1] = '\0';
+}
+
+// static
+bool I18NUtils::ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+                                     const char* setting,
+                                     icu::UnicodeString* result) {
+  if (!setting || !result) return false;
+
+  v8::HandleScope handle_scope;
+  v8::TryCatch try_catch;
+  v8::Handle<v8::Value> value = settings->Get(v8::String::New(setting));
+  if (try_catch.HasCaught()) {
+    return false;
+  }
+  // No need to check if |value| is empty because it's taken care of
+  // by TryCatch above.
+  if (!value->IsUndefined() && !value->IsNull() && value->IsString()) {
+    v8::String::Utf8Value utf8_value(value);
+    if (*utf8_value == NULL) return false;
+    result->setTo(icu::UnicodeString::fromUTF8(*utf8_value));
+    return true;
+  }
+  return false;
+}
+
+// static
+void I18NUtils::AsciiToUChar(const char* source,
+                             int32_t source_length,
+                             UChar* target,
+                             int32_t target_length) {
+  int32_t length =
+      source_length < target_length ? source_length : target_length;
+
+  if (length <= 0) {
+    return;
+  }
+
+  for (int32_t i = 0; i < length - 1; ++i) {
+    target[i] = static_cast<UChar>(source[i]);
+  }
+
+  target[length - 1] = 0x0u;
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/i18n-utils.h b/src/extensions/experimental/i18n-utils.h
new file mode 100644
index 0000000..7c31528
--- /dev/null
+++ b/src/extensions/experimental/i18n-utils.h
@@ -0,0 +1,69 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class UnicodeString;
+}
+
+namespace v8 {
+namespace internal {
+
+class I18NUtils {
+ public:
+  // Safe string copy. Null terminates the destination. Copies at most
+  // (length - 1) bytes.
+  // We can't use snprintf since it's not supported on all relevant platforms.
+  // We can't use OS::SNPrintF, it's only for internal code.
+  static void StrNCopy(char* dest, int length, const char* src);
+
+  // Extract a string setting named in |settings| and set it to |result|.
+  // Return true if it's specified. Otherwise, return false.
+  static bool ExtractStringSetting(const v8::Handle<v8::Object>& settings,
+                                   const char* setting,
+                                   icu::UnicodeString* result);
+
+  // Converts ASCII array into UChar array.
+  // Target is always \0 terminated.
+  static void AsciiToUChar(const char* source,
+                           int32_t source_length,
+                           UChar* target,
+                           int32_t target_length);
+
+ private:
+  I18NUtils() {}
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_I18N_UTILS_H_
diff --git a/src/extensions/experimental/i18n.js b/src/extensions/experimental/i18n.js
new file mode 100644
index 0000000..56bcf9e
--- /dev/null
+++ b/src/extensions/experimental/i18n.js
@@ -0,0 +1,380 @@
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(cira): Rename v8Locale into LocaleInfo once we have stable API.
+/**
+ * LocaleInfo class is an aggregate class of all i18n API calls.
+ * @param {Object} settings - localeID and regionID to create LocaleInfo from.
+ *   {Array.<string>|string} settings.localeID -
+ *     Unicode identifier of the locale.
+ *     See http://unicode.org/reports/tr35/#BCP_47_Conformance
+ *   {string} settings.regionID - ISO3166 region ID with addition of
+ *     invalid, undefined and reserved region codes.
+ * @constructor
+ */
+v8Locale = function(settings) {
+  native function NativeJSLocale();
+
+  // Assume user wanted to do v8Locale("sr");
+  if (typeof(settings) === "string") {
+    settings = {'localeID': settings};
+  }
+
+  var properties = NativeJSLocale(
+      v8Locale.__createSettingsOrDefault(settings, {'localeID': 'root'}));
+
+  // Keep the resolved ICU locale ID around to avoid resolving localeID to
+  // ICU locale ID every time BreakIterator, Collator and so forth are called.
+  this.__icuLocaleID = properties.icuLocaleID;
+  this.options = {'localeID': properties.localeID,
+                  'regionID': properties.regionID};
+};
+
+/**
+ * Clones existing locale with possible overrides for some of the options.
+ * @param {!Object} settings - overrides for current locale settings.
+ * @returns {Object} - new LocaleInfo object.
+ */
+v8Locale.prototype.derive = function(settings) {
+  return new v8Locale(
+      v8Locale.__createSettingsOrDefault(settings, this.options));
+};
+
+/**
+ * v8BreakIterator class implements locale aware segmenatation.
+ * It is not part of EcmaScript proposal.
+ * @param {Object} locale - locale object to pass to break
+ *   iterator implementation.
+ * @param {string} type - type of segmenatation:
+ *   - character
+ *   - word
+ *   - sentence
+ *   - line
+ * @private
+ * @constructor
+ */
+v8Locale.v8BreakIterator = function(locale, type) {
+  native function NativeJSBreakIterator();
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  // BCP47 ID would work in this case, but we use ICU locale for consistency.
+  var iterator = NativeJSBreakIterator(locale.__icuLocaleID, type);
+  iterator.type = type;
+  return iterator;
+};
+
+/**
+ * Type of the break we encountered during previous iteration.
+ * @type{Enum}
+ */
+v8Locale.v8BreakIterator.BreakType = {
+  'unknown': -1,
+  'none': 0,
+  'number': 100,
+  'word': 200,
+  'kana': 300,
+  'ideo': 400
+};
+
+/**
+ * Creates new v8BreakIterator based on current locale.
+ * @param {string} - type of segmentation. See constructor.
+ * @returns {Object} - new v8BreakIterator object.
+ */
+v8Locale.prototype.v8CreateBreakIterator = function(type) {
+  return new v8Locale.v8BreakIterator(this, type);
+};
+
+// TODO(jungshik): Set |collator.options| to actually recognized / resolved
+// values.
+/**
+ * Collator class implements locale-aware sort.
+ * @param {Object} locale - locale object to pass to collator implementation.
+ * @param {Object} settings - collation flags:
+ *   - ignoreCase
+ *   - ignoreAccents
+ *   - numeric
+ * @private
+ * @constructor
+ */
+v8Locale.Collator = function(locale, settings) {
+  native function NativeJSCollator();
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  var collator = NativeJSCollator(
+      locale.__icuLocaleID, v8Locale.__createSettingsOrDefault(settings, {}));
+  return collator;
+};
+
+/**
+ * Creates new Collator based on current locale.
+ * @param {Object} - collation flags. See constructor.
+ * @returns {Object} - new Collator object.
+ */
+v8Locale.prototype.createCollator = function(settings) {
+  return new v8Locale.Collator(this, settings);
+};
+
+/**
+ * DateTimeFormat class implements locale-aware date and time formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ *   - skeleton
+ *   - dateStyle
+ *   - timeStyle
+ * @private
+ * @constructor
+ */
+v8Locale.__DateTimeFormat = function(locale, settings) {
+  native function NativeJSDateTimeFormat();
+
+  settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+  var cleanSettings = {};
+  if (settings.hasOwnProperty('skeleton')) {
+    cleanSettings['skeleton'] = settings['skeleton'];
+  } else {
+    cleanSettings = {};
+    if (settings.hasOwnProperty('dateStyle')) {
+      var ds = settings['dateStyle'];
+      if (!/^(short|medium|long|full)$/.test(ds)) ds = 'short';
+      cleanSettings['dateStyle'] = ds;
+    } else if (settings.hasOwnProperty('dateType')) {
+      // Obsolete. New spec requires dateStyle, but we'll keep this around
+      // for current users.
+      // TODO(cira): Remove when all internal users switch to dateStyle.
+      var dt = settings['dateType'];
+      if (!/^(short|medium|long|full)$/.test(dt)) dt = 'short';
+      cleanSettings['dateStyle'] = dt;
+    }
+
+    if (settings.hasOwnProperty('timeStyle')) {
+      var ts = settings['timeStyle'];
+      if (!/^(short|medium|long|full)$/.test(ts)) ts = 'short';
+      cleanSettings['timeStyle'] = ts;
+    } else if (settings.hasOwnProperty('timeType')) {
+      // TODO(cira): Remove when all internal users switch to timeStyle.
+      var tt = settings['timeType'];
+      if (!/^(short|medium|long|full)$/.test(tt)) tt = 'short';
+      cleanSettings['timeStyle'] = tt;
+    }
+  }
+
+  // Default is to show short date and time.
+  if (!cleanSettings.hasOwnProperty('skeleton') &&
+      !cleanSettings.hasOwnProperty('dateStyle') &&
+      !cleanSettings.hasOwnProperty('timeStyle')) {
+    cleanSettings = {'dateStyle': 'short',
+                     'timeStyle': 'short'};
+  }
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  var formatter = NativeJSDateTimeFormat(locale.__icuLocaleID, cleanSettings);
+
+  // NativeJSDateTimeFormat creates formatter.options for us, we just need
+  // to append actual settings to it.
+  for (key in cleanSettings) {
+    formatter.options[key] = cleanSettings[key];
+  }
+
+  /**
+   * Clones existing date time format with possible overrides for some
+   * of the options.
+   * @param {!Object} overrideSettings - overrides for current format settings.
+   * @returns {Object} - new DateTimeFormat object.
+   * @public
+   */
+  formatter.derive = function(overrideSettings) {
+    // To remove a setting user can specify undefined as its value. We'll remove
+    // it from the map in that case.
+    for (var prop in overrideSettings) {
+      if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+        delete settings[prop];
+      }
+    }
+    return new v8Locale.__DateTimeFormat(
+        locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+  };
+
+  return formatter;
+};
+
+/**
+ * Creates new DateTimeFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new DateTimeFormat object.
+ */
+v8Locale.prototype.createDateTimeFormat = function(settings) {
+  return new v8Locale.__DateTimeFormat(this, settings);
+};
+
+/**
+ * NumberFormat class implements locale-aware number formatting.
+ * Constructor is not part of public API.
+ * @param {Object} locale - locale object to pass to formatter.
+ * @param {Object} settings - formatting flags:
+ *   - skeleton
+ *   - pattern
+ *   - style - decimal, currency, percent or scientific
+ *   - currencyCode - ISO 4217 3-letter currency code
+ * @private
+ * @constructor
+ */
+v8Locale.__NumberFormat = function(locale, settings) {
+  native function NativeJSNumberFormat();
+
+  settings = v8Locale.__createSettingsOrDefault(settings, {});
+
+  var cleanSettings = {};
+  if (settings.hasOwnProperty('skeleton')) {
+    // Assign skeleton to cleanSettings and fix invalid currency pattern
+    // if present - 'ooxo' becomes 'o'.
+    cleanSettings['skeleton'] =
+        settings['skeleton'].replace(/\u00a4+[^\u00a4]+\u00a4+/g, '\u00a4');
+  } else if (settings.hasOwnProperty('pattern')) {
+    cleanSettings['pattern'] = settings['pattern'];
+  } else if (settings.hasOwnProperty('style')) {
+    var style = settings['style'];
+    if (!/^(decimal|currency|percent|scientific)$/.test(style)) {
+      style = 'decimal';
+    }
+    cleanSettings['style'] = style;
+  }
+
+  // Default is to show decimal style.
+  if (!cleanSettings.hasOwnProperty('skeleton') &&
+      !cleanSettings.hasOwnProperty('pattern') &&
+      !cleanSettings.hasOwnProperty('style')) {
+    cleanSettings = {'style': 'decimal'};
+  }
+
+  // Add currency code if available and valid (3-letter ASCII code).
+  if (settings.hasOwnProperty('currencyCode') &&
+      /^[a-zA-Z]{3}$/.test(settings['currencyCode'])) {
+    cleanSettings['currencyCode'] = settings['currencyCode'].toUpperCase();
+  }
+
+  locale = v8Locale.__createLocaleOrDefault(locale);
+  // Pass in region ID for proper currency detection. Use ZZ if region is empty.
+  var region = locale.options.regionID !== '' ? locale.options.regionID : 'ZZ';
+  var formatter = NativeJSNumberFormat(
+      locale.__icuLocaleID, 'und_' + region, cleanSettings);
+
+  // ICU doesn't always uppercase the currency code.
+  if (formatter.options.hasOwnProperty('currencyCode')) {
+    formatter.options['currencyCode'] =
+        formatter.options['currencyCode'].toUpperCase();
+  }
+
+  for (key in cleanSettings) {
+    // Don't overwrite keys that are alredy in.
+    if (formatter.options.hasOwnProperty(key)) continue;
+
+    formatter.options[key] = cleanSettings[key];
+  }
+
+  /**
+   * Clones existing number format with possible overrides for some
+   * of the options.
+   * @param {!Object} overrideSettings - overrides for current format settings.
+   * @returns {Object} - new or cached NumberFormat object.
+   * @public
+   */
+  formatter.derive = function(overrideSettings) {
+    // To remove a setting user can specify undefined as its value. We'll remove
+    // it from the map in that case.
+    for (var prop in overrideSettings) {
+      if (settings.hasOwnProperty(prop) && !overrideSettings[prop]) {
+        delete settings[prop];
+      }
+    }
+    return new v8Locale.__NumberFormat(
+        locale, v8Locale.__createSettingsOrDefault(overrideSettings, settings));
+  };
+
+  return formatter;
+};
+
+/**
+ * Creates new NumberFormat based on current locale.
+ * @param {Object} - formatting flags. See constructor.
+ * @returns {Object} - new or cached NumberFormat object.
+ */
+v8Locale.prototype.createNumberFormat = function(settings) {
+  return new v8Locale.__NumberFormat(this, settings);
+};
+
+/**
+ * Merges user settings and defaults.
+ * Settings that are not of object type are rejected.
+ * Actual property values are not validated, but whitespace is trimmed if they
+ * are strings.
+ * @param {!Object} settings - user provided settings.
+ * @param {!Object} defaults - default values for this type of settings.
+ * @returns {Object} - valid settings object.
+ * @private
+ */
+v8Locale.__createSettingsOrDefault = function(settings, defaults) {
+  if (!settings || typeof(settings) !== 'object' ) {
+    return defaults;
+  }
+  for (var key in defaults) {
+    if (!settings.hasOwnProperty(key)) {
+      settings[key] = defaults[key];
+    }
+  }
+  // Clean up settings.
+  for (var key in settings) {
+    // Trim whitespace.
+    if (typeof(settings[key]) === "string") {
+      settings[key] = settings[key].trim();
+    }
+    // Remove all properties that are set to undefined/null. This allows
+    // derive method to remove a setting we don't need anymore.
+    if (!settings[key]) {
+      delete settings[key];
+    }
+  }
+
+  return settings;
+};
+
+/**
+ * If locale is valid (defined and of v8Locale type) we return it. If not
+ * we create default locale and return it.
+ * @param {!Object} locale - user provided locale.
+ * @returns {Object} - v8Locale object.
+ * @private
+ */
+v8Locale.__createLocaleOrDefault = function(locale) {
+  if (!locale || !(locale instanceof v8Locale)) {
+    return new v8Locale();
+  } else {
+    return locale;
+  }
+};
diff --git a/src/extensions/experimental/language-matcher.cc b/src/extensions/experimental/language-matcher.cc
new file mode 100644
index 0000000..127e571
--- /dev/null
+++ b/src/extensions/experimental/language-matcher.cc
@@ -0,0 +1,252 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO(cira): Remove LanguageMatcher from v8 when ICU implements
+// language matching API.
+
+#include "src/extensions/experimental/language-matcher.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/datefmt.h"  // For getAvailableLocales
+#include "unicode/locid.h"
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+const unsigned int LanguageMatcher::kLanguageWeight = 75;
+const unsigned int LanguageMatcher::kScriptWeight = 20;
+const unsigned int LanguageMatcher::kRegionWeight = 5;
+const unsigned int LanguageMatcher::kThreshold = 50;
+const unsigned int LanguageMatcher::kPositionBonus = 1;
+const char* const LanguageMatcher::kDefaultLocale = "root";
+
+static const char* GetLanguageException(const char*);
+static bool BCP47ToICUFormat(const char*, char*);
+static int CompareLocaleSubtags(const char*, const char*);
+static bool BuildLocaleName(const char*, const char*, LocaleIDMatch*);
+
+LocaleIDMatch::LocaleIDMatch()
+    : score(-1) {
+  I18NUtils::StrNCopy(
+      bcp47_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+
+  I18NUtils::StrNCopy(
+      icu_id, ULOC_FULLNAME_CAPACITY, LanguageMatcher::kDefaultLocale);
+}
+
+LocaleIDMatch& LocaleIDMatch::operator=(const LocaleIDMatch& rhs) {
+  I18NUtils::StrNCopy(this->bcp47_id, ULOC_FULLNAME_CAPACITY, rhs.bcp47_id);
+  I18NUtils::StrNCopy(this->icu_id, ULOC_FULLNAME_CAPACITY, rhs.icu_id);
+  this->score = rhs.score;
+
+  return *this;
+}
+
+// static
+void LanguageMatcher::GetBestMatchForPriorityList(
+    v8::Handle<v8::Array> locales, LocaleIDMatch* result) {
+  v8::HandleScope handle_scope;
+
+  unsigned int position_bonus = locales->Length() * kPositionBonus;
+
+  int max_score = 0;
+  LocaleIDMatch match;
+  for (unsigned int i = 0; i < locales->Length(); ++i) {
+    position_bonus -= kPositionBonus;
+
+    v8::TryCatch try_catch;
+    v8::Local<v8::Value> locale_id = locales->Get(v8::Integer::New(i));
+
+    // Return default if exception is raised when reading parameter.
+    if (try_catch.HasCaught()) break;
+
+    // JavaScript arrays can be heterogenous so check each item
+    // if it's a string.
+    if (!locale_id->IsString()) continue;
+
+    if (!CompareToSupportedLocaleIDList(locale_id->ToString(), &match)) {
+      continue;
+    }
+
+    // Skip items under threshold.
+    if (match.score < kThreshold) continue;
+
+    match.score += position_bonus;
+    if (match.score > max_score) {
+      *result = match;
+
+      max_score = match.score;
+    }
+  }
+}
+
+// static
+void LanguageMatcher::GetBestMatchForString(
+    v8::Handle<v8::String> locale, LocaleIDMatch* result) {
+  LocaleIDMatch match;
+
+  if (CompareToSupportedLocaleIDList(locale, &match) &&
+      match.score >= kThreshold) {
+    *result = match;
+  }
+}
+
+// static
+bool LanguageMatcher::CompareToSupportedLocaleIDList(
+    v8::Handle<v8::String> locale_id, LocaleIDMatch* result) {
+  static int32_t available_count = 0;
+  // Depending on how ICU data is built, locales returned by
+  // Locale::getAvailableLocale() are not guaranteed to support DateFormat,
+  // Collation and other services.  We can call getAvailableLocale() of all the
+  // services we want to support and take the intersection of them all, but
+  // using DateFormat::getAvailableLocales() should suffice.
+  // TODO(cira): Maybe make this thread-safe?
+  static const icu::Locale* available_locales =
+      icu::DateFormat::getAvailableLocales(available_count);
+
+  // Skip this locale_id if it's not in ASCII.
+  static LocaleIDMatch default_match;
+  v8::String::AsciiValue ascii_value(locale_id);
+  if (*ascii_value == NULL) return false;
+
+  char locale[ULOC_FULLNAME_CAPACITY];
+  if (!BCP47ToICUFormat(*ascii_value, locale)) return false;
+
+  icu::Locale input_locale(locale);
+
+  // Position of the best match locale in list of available locales.
+  int position = -1;
+  const char* language = GetLanguageException(input_locale.getLanguage());
+  const char* script = input_locale.getScript();
+  const char* region = input_locale.getCountry();
+  for (int32_t i = 0; i < available_count; ++i) {
+    int current_score = 0;
+    int sign =
+        CompareLocaleSubtags(language, available_locales[i].getLanguage());
+    current_score += sign * kLanguageWeight;
+
+    sign = CompareLocaleSubtags(script, available_locales[i].getScript());
+    current_score += sign * kScriptWeight;
+
+    sign = CompareLocaleSubtags(region, available_locales[i].getCountry());
+    current_score += sign * kRegionWeight;
+
+    if (current_score >= kThreshold && current_score > result->score) {
+      result->score = current_score;
+      position = i;
+    }
+  }
+
+  // Didn't find any good matches so use defaults.
+  if (position == -1) return false;
+
+  return BuildLocaleName(available_locales[position].getBaseName(),
+                         input_locale.getName(), result);
+}
+
+// For some unsupported language subtags it is better to fallback to related
+// language that is supported than to default.
+static const char* GetLanguageException(const char* language) {
+  // Serbo-croatian to Serbian.
+  if (!strcmp(language, "sh")) return "sr";
+
+  // Norweigan to Norweiaan to Norwegian Bokmal.
+  if (!strcmp(language, "no")) return "nb";
+
+  // Moldavian to Romanian.
+  if (!strcmp(language, "mo")) return "ro";
+
+  // Tagalog to Filipino.
+  if (!strcmp(language, "tl")) return "fil";
+
+  return language;
+}
+
+// Converts user input from BCP47 locale id format to ICU compatible format.
+// Returns false if uloc_forLanguageTag call fails or if extension is too long.
+static bool BCP47ToICUFormat(const char* locale_id, char* result) {
+  UErrorCode status = U_ZERO_ERROR;
+  int32_t locale_size = 0;
+
+  char locale[ULOC_FULLNAME_CAPACITY];
+  I18NUtils::StrNCopy(locale, ULOC_FULLNAME_CAPACITY, locale_id);
+
+  // uloc_forLanguageTag has a bug where long extension can crash the code.
+  // We need to check if extension part of language id conforms to the length.
+  // ICU bug: http://bugs.icu-project.org/trac/ticket/8519
+  const char* extension = strstr(locale_id, "-u-");
+  if (extension != NULL &&
+      strlen(extension) > ULOC_KEYWORD_AND_VALUES_CAPACITY) {
+    // Truncate to get non-crashing string, but still preserve base language.
+    int base_length = strlen(locale_id) - strlen(extension);
+    locale[base_length] = '\0';
+  }
+
+  uloc_forLanguageTag(locale, result, ULOC_FULLNAME_CAPACITY,
+                      &locale_size, &status);
+  return !U_FAILURE(status);
+}
+
+// Compares locale id subtags.
+// Returns 1 for match or -1 for mismatch.
+static int CompareLocaleSubtags(const char* lsubtag, const char* rsubtag) {
+  return strcmp(lsubtag, rsubtag) == 0 ? 1 : -1;
+}
+
+// Builds a BCP47 compliant locale id from base name of matched locale and
+// full user specified locale.
+// Returns false if uloc_toLanguageTag failed to convert locale id.
+// Example:
+//   base_name of matched locale (ICU ID): de_DE
+//   input_locale_name (ICU ID): de_AT@collation=phonebk
+//   result (ICU ID): de_DE@collation=phonebk
+//   result (BCP47 ID): de-DE-u-co-phonebk
+static bool BuildLocaleName(const char* base_name,
+                            const char* input_locale_name,
+                            LocaleIDMatch* result) {
+  I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+
+  // Get extensions (if any) from the original locale.
+  const char* extension = strchr(input_locale_name, ULOC_KEYWORD_SEPARATOR);
+  if (extension != NULL) {
+    I18NUtils::StrNCopy(result->icu_id + strlen(base_name),
+                        ULOC_KEYWORD_AND_VALUES_CAPACITY, extension);
+  } else {
+    I18NUtils::StrNCopy(result->icu_id, ULOC_LANG_CAPACITY, base_name);
+  }
+
+  // Convert ICU locale name into BCP47 format.
+  UErrorCode status = U_ZERO_ERROR;
+  uloc_toLanguageTag(result->icu_id, result->bcp47_id,
+                     ULOC_FULLNAME_CAPACITY, false, &status);
+  return !U_FAILURE(status);
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/language-matcher.h b/src/extensions/experimental/language-matcher.h
new file mode 100644
index 0000000..dd29304
--- /dev/null
+++ b/src/extensions/experimental/language-matcher.h
@@ -0,0 +1,95 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
+
+#include "include/v8.h"
+
+#include "unicode/uloc.h"
+
+namespace v8 {
+namespace internal {
+
+struct LocaleIDMatch {
+  LocaleIDMatch();
+
+  LocaleIDMatch& operator=(const LocaleIDMatch& rhs);
+
+  // Bcp47 locale id - "de-Latn-DE-u-co-phonebk".
+  char bcp47_id[ULOC_FULLNAME_CAPACITY];
+
+  // ICU locale id - "de_Latn_DE@collation=phonebk".
+  char icu_id[ULOC_FULLNAME_CAPACITY];
+
+  // Score for this locale.
+  int score;
+};
+
+class LanguageMatcher {
+ public:
+  // Default locale.
+  static const char* const kDefaultLocale;
+
+  // Finds best supported locale for a given a list of locale identifiers.
+  // It preserves the extension for the locale id.
+  static void GetBestMatchForPriorityList(
+      v8::Handle<v8::Array> locale_list, LocaleIDMatch* result);
+
+  // Finds best supported locale for a single locale identifier.
+  // It preserves the extension for the locale id.
+  static void GetBestMatchForString(
+      v8::Handle<v8::String> locale_id, LocaleIDMatch* result);
+
+ private:
+  // If langauge subtags match add this amount to the score.
+  static const unsigned int kLanguageWeight;
+
+  // If script subtags match add this amount to the score.
+  static const unsigned int kScriptWeight;
+
+  // If region subtags match add this amount to the score.
+  static const unsigned int kRegionWeight;
+
+  // LocaleID match score has to be over this number to accept the match.
+  static const unsigned int kThreshold;
+
+  // For breaking ties in priority queue.
+  static const unsigned int kPositionBonus;
+
+  LanguageMatcher();
+
+  // Compares locale_id to the supported list of locales and returns best
+  // match.
+  // Returns false if it fails to convert locale id from ICU to BCP47 format.
+  static bool CompareToSupportedLocaleIDList(v8::Handle<v8::String> locale_id,
+                                             LocaleIDMatch* result);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_LANGUAGE_MATCHER_H_
diff --git a/src/extensions/experimental/number-format.cc b/src/extensions/experimental/number-format.cc
new file mode 100644
index 0000000..2932c52
--- /dev/null
+++ b/src/extensions/experimental/number-format.cc
@@ -0,0 +1,374 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/extensions/experimental/number-format.h"
+
+#include <string.h>
+
+#include "src/extensions/experimental/i18n-utils.h"
+#include "unicode/dcfmtsym.h"
+#include "unicode/decimfmt.h"
+#include "unicode/locid.h"
+#include "unicode/numfmt.h"
+#include "unicode/uchar.h"
+#include "unicode/ucurr.h"
+#include "unicode/unum.h"
+#include "unicode/uversion.h"
+
+namespace v8 {
+namespace internal {
+
+const int NumberFormat::kCurrencyCodeLength = 4;
+
+v8::Persistent<v8::FunctionTemplate> NumberFormat::number_format_template_;
+
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String>,
+                                              v8::Handle<v8::String>,
+                                              v8::Handle<v8::Object>);
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+    const icu::Locale&, const icu::UnicodeString&, UErrorCode*);
+static icu::DecimalFormatSymbols* GetFormatSymbols(const icu::Locale&);
+static bool GetCurrencyCode(const icu::Locale&,
+                            const char* const,
+                            v8::Handle<v8::Object>,
+                            UChar*);
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError();
+
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
+    v8::Handle<v8::Object> obj) {
+  if (number_format_template_->HasInstance(obj)) {
+    return static_cast<icu::DecimalFormat*>(
+        obj->GetPointerFromInternalField(0));
+  }
+
+  return NULL;
+}
+
+void NumberFormat::DeleteNumberFormat(v8::Persistent<v8::Value> object,
+                                      void* param) {
+  v8::Persistent<v8::Object> persistent_object =
+      v8::Persistent<v8::Object>::Cast(object);
+
+  // First delete the hidden C++ object.
+  // Unpacking should never return NULL here. That would only happen if
+  // this method is used as the weak callback for persistent handles not
+  // pointing to a number formatter.
+  delete UnpackNumberFormat(persistent_object);
+
+  // Then dispose of the persistent handle to JS object.
+  persistent_object.Dispose();
+}
+
+v8::Handle<v8::Value> NumberFormat::Format(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  if (args.Length() != 1 || !args[0]->IsNumber()) {
+    // Just return NaN on invalid input.
+    return v8::String::New("NaN");
+  }
+
+  icu::DecimalFormat* number_format = UnpackNumberFormat(args.Holder());
+  if (!number_format) {
+    return ThrowUnexpectedObjectError();
+  }
+
+  // ICU will handle actual NaN value properly and return NaN string.
+  icu::UnicodeString result;
+  number_format->format(args[0]->NumberValue(), result);
+
+  return v8::String::New(
+      reinterpret_cast<const uint16_t*>(result.getBuffer()), result.length());
+}
+
+v8::Handle<v8::Value> NumberFormat::JSNumberFormat(const v8::Arguments& args) {
+  v8::HandleScope handle_scope;
+
+  // Expect locale id, region id and settings.
+  if (args.Length() != 3 ||
+      !args[0]->IsString() || !args[1]->IsString() || !args[2]->IsObject()) {
+    return v8::ThrowException(v8::Exception::SyntaxError(
+        v8::String::New("Locale, region and number settings are required.")));
+  }
+
+  icu::DecimalFormat* number_format = CreateNumberFormat(
+      args[0]->ToString(), args[1]->ToString(), args[2]->ToObject());
+
+  if (number_format_template_.IsEmpty()) {
+    v8::Local<v8::FunctionTemplate> raw_template(v8::FunctionTemplate::New());
+
+    raw_template->SetClassName(v8::String::New("v8Locale.NumberFormat"));
+
+    // Define internal field count on instance template.
+    v8::Local<v8::ObjectTemplate> object_template =
+        raw_template->InstanceTemplate();
+
+    // Set aside internal field for icu number formatter.
+    object_template->SetInternalFieldCount(1);
+
+    // Define all of the prototype methods on prototype template.
+    v8::Local<v8::ObjectTemplate> proto = raw_template->PrototypeTemplate();
+    proto->Set(v8::String::New("format"),
+               v8::FunctionTemplate::New(Format));
+
+    number_format_template_ =
+        v8::Persistent<v8::FunctionTemplate>::New(raw_template);
+  }
+
+  // Create an empty object wrapper.
+  v8::Local<v8::Object> local_object =
+      number_format_template_->GetFunction()->NewInstance();
+  v8::Persistent<v8::Object> wrapper =
+      v8::Persistent<v8::Object>::New(local_object);
+
+  // Set number formatter as internal field of the resulting JS object.
+  wrapper->SetPointerInInternalField(0, number_format);
+
+  // Create options key.
+  v8::Local<v8::Object> options = v8::Object::New();
+
+  // Show what ICU decided to use for easier problem tracking.
+  // Keep it as v8 specific extension.
+  icu::UnicodeString pattern;
+  number_format->toPattern(pattern);
+  options->Set(v8::String::New("v8ResolvedPattern"),
+               v8::String::New(reinterpret_cast<const uint16_t*>(
+                   pattern.getBuffer()), pattern.length()));
+
+  // Set resolved currency code in options.currency if not empty.
+  icu::UnicodeString currency(number_format->getCurrency());
+  if (!currency.isEmpty()) {
+    options->Set(v8::String::New("currencyCode"),
+                 v8::String::New(reinterpret_cast<const uint16_t*>(
+                     currency.getBuffer()), currency.length()));
+  }
+
+  wrapper->Set(v8::String::New("options"), options);
+
+  // Make object handle weak so we can delete iterator once GC kicks in.
+  wrapper.MakeWeak(NULL, DeleteNumberFormat);
+
+  return wrapper;
+}
+
+// Returns DecimalFormat.
+static icu::DecimalFormat* CreateNumberFormat(v8::Handle<v8::String> locale,
+                                              v8::Handle<v8::String> region,
+                                              v8::Handle<v8::Object> settings) {
+  v8::HandleScope handle_scope;
+
+  v8::String::AsciiValue ascii_locale(locale);
+  icu::Locale icu_locale(*ascii_locale);
+
+  // Make formatter from skeleton.
+  icu::DecimalFormat* number_format = NULL;
+  UErrorCode status = U_ZERO_ERROR;
+  icu::UnicodeString setting;
+
+  if (I18NUtils::ExtractStringSetting(settings, "skeleton", &setting)) {
+    // TODO(cira): Use ICU skeleton once
+    // http://bugs.icu-project.org/trac/ticket/8610 is resolved.
+    number_format = CreateFormatterFromSkeleton(icu_locale, setting, &status);
+  } else if (I18NUtils::ExtractStringSetting(settings, "pattern", &setting)) {
+    number_format =
+        new icu::DecimalFormat(setting, GetFormatSymbols(icu_locale), status);
+  } else if (I18NUtils::ExtractStringSetting(settings, "style", &setting)) {
+    if (setting == UNICODE_STRING_SIMPLE("currency")) {
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createCurrencyInstance(icu_locale, status));
+    } else if (setting == UNICODE_STRING_SIMPLE("percent")) {
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createPercentInstance(icu_locale, status));
+    } else if (setting == UNICODE_STRING_SIMPLE("scientific")) {
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createScientificInstance(icu_locale, status));
+    } else {
+      // Make it decimal in any other case.
+      number_format = static_cast<icu::DecimalFormat*>(
+          icu::NumberFormat::createInstance(icu_locale, status));
+    }
+  }
+
+  if (U_FAILURE(status)) {
+    delete number_format;
+    status = U_ZERO_ERROR;
+    number_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createInstance(icu_locale, status));
+  }
+
+  // Attach appropriate currency code to the formatter.
+  // It affects currency formatters only.
+  // Region is full language identifier in form 'und_' + region id.
+  v8::String::AsciiValue ascii_region(region);
+
+  UChar currency_code[NumberFormat::kCurrencyCodeLength];
+  if (GetCurrencyCode(icu_locale, *ascii_region, settings, currency_code)) {
+    number_format->setCurrency(currency_code, status);
+  }
+
+  return number_format;
+}
+
+// Generates ICU number format pattern from given skeleton.
+// TODO(cira): Remove once ICU includes equivalent method
+// (see http://bugs.icu-project.org/trac/ticket/8610).
+static icu::DecimalFormat* CreateFormatterFromSkeleton(
+    const icu::Locale& icu_locale,
+    const icu::UnicodeString& skeleton,
+    UErrorCode* status) {
+  icu::DecimalFormat skeleton_format(
+      skeleton, GetFormatSymbols(icu_locale), *status);
+
+  // Find out if skeleton contains currency or percent symbol and create
+  // proper instance to tweak.
+  icu::DecimalFormat* base_format = NULL;
+
+  // UChar representation of U+00A4 currency symbol.
+  const UChar currency_symbol = 0xA4u;
+
+  int32_t index = skeleton.indexOf(currency_symbol);
+  if (index != -1) {
+    // Find how many U+00A4 are there. There is at least one.
+    // Case of non-consecutive U+00A4 is taken care of in i18n.js.
+    int32_t end_index = skeleton.lastIndexOf(currency_symbol, index);
+
+#if (U_ICU_VERSION_MAJOR_NUM == 4) && (U_ICU_VERSION_MINOR_NUM <= 6)
+    icu::NumberFormat::EStyles style;
+    switch (end_index - index) {
+      case 0:
+        style = icu::NumberFormat::kCurrencyStyle;
+        break;
+      case 1:
+        style = icu::NumberFormat::kIsoCurrencyStyle;
+        break;
+      default:
+        style = icu::NumberFormat::kPluralCurrencyStyle;
+    }
+#else  // ICU version is 4.8 or above (we ignore versions below 4.0).
+    UNumberFormatStyle style;
+    switch (end_index - index) {
+      case 0:
+        style = UNUM_CURRENCY;
+        break;
+      case 1:
+        style = UNUM_CURRENCY_ISO;
+        break;
+      default:
+        style = UNUM_CURRENCY_PLURAL;
+    }
+#endif
+
+    base_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createInstance(icu_locale, style, *status));
+  } else if (skeleton.indexOf('%') != -1) {
+    base_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createPercentInstance(icu_locale, *status));
+  } else {
+    // TODO(cira): Handle scientific skeleton.
+    base_format = static_cast<icu::DecimalFormat*>(
+        icu::NumberFormat::createInstance(icu_locale, *status));
+  }
+
+  if (U_FAILURE(*status)) {
+    delete base_format;
+    return NULL;
+  }
+
+  // Copy important information from skeleton to the new formatter.
+  // TODO(cira): copy rounding information from skeleton?
+  base_format->setGroupingUsed(skeleton_format.isGroupingUsed());
+
+  base_format->setMinimumIntegerDigits(
+      skeleton_format.getMinimumIntegerDigits());
+
+  base_format->setMinimumFractionDigits(
+      skeleton_format.getMinimumFractionDigits());
+
+  base_format->setMaximumFractionDigits(
+      skeleton_format.getMaximumFractionDigits());
+
+  return base_format;
+}
+
+// Gets decimal symbols for a locale.
+static icu::DecimalFormatSymbols* GetFormatSymbols(
+    const icu::Locale& icu_locale) {
+  UErrorCode status = U_ZERO_ERROR;
+  icu::DecimalFormatSymbols* symbols =
+      new icu::DecimalFormatSymbols(icu_locale, status);
+
+  if (U_FAILURE(status)) {
+    delete symbols;
+    // Use symbols from default locale.
+    symbols = new icu::DecimalFormatSymbols(status);
+  }
+
+  return symbols;
+}
+
+// Gets currency ISO 4217 3-letter code.
+// Check currencyCode setting first, then @currency=code and in the end
+// try to infer currency code from locale in the form 'und_' + region id.
+// Returns false in case of error.
+static bool GetCurrencyCode(const icu::Locale& icu_locale,
+                            const char* const und_region_locale,
+                            v8::Handle<v8::Object> settings,
+                            UChar* code) {
+  UErrorCode status = U_ZERO_ERROR;
+
+  // If there is user specified currency code, use it.
+  icu::UnicodeString currency;
+  if (I18NUtils::ExtractStringSetting(settings, "currencyCode", &currency)) {
+    currency.extract(code, NumberFormat::kCurrencyCodeLength, status);
+    return true;
+  }
+
+  // If ICU locale has -cu- currency code use it.
+  char currency_code[NumberFormat::kCurrencyCodeLength];
+  int32_t length = icu_locale.getKeywordValue(
+      "currency", currency_code, NumberFormat::kCurrencyCodeLength, status);
+  if (length != 0) {
+    I18NUtils::AsciiToUChar(currency_code, length + 1,
+                            code, NumberFormat::kCurrencyCodeLength);
+    return true;
+  }
+
+  // Otherwise infer currency code from the region id.
+  ucurr_forLocale(
+      und_region_locale, code, NumberFormat::kCurrencyCodeLength, &status);
+
+  return !!U_SUCCESS(status);
+}
+
+// Throws a JavaScript exception.
+static v8::Handle<v8::Value> ThrowUnexpectedObjectError() {
+  // Returns undefined, and schedules an exception to be thrown.
+  return v8::ThrowException(v8::Exception::Error(
+      v8::String::New("NumberFormat method called on an object "
+                      "that is not a NumberFormat.")));
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/experimental/number-format.h b/src/extensions/experimental/number-format.h
new file mode 100644
index 0000000..bcfaed6
--- /dev/null
+++ b/src/extensions/experimental/number-format.h
@@ -0,0 +1,71 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+#define V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
+
+#include "include/v8.h"
+
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class DecimalFormat;
+}
+
+namespace v8 {
+namespace internal {
+
+class NumberFormat {
+ public:
+  // 3-letter ISO 4217 currency code plus \0.
+  static const int kCurrencyCodeLength;
+
+  static v8::Handle<v8::Value> JSNumberFormat(const v8::Arguments& args);
+
+  // Helper methods for various bindings.
+
+  // Unpacks date format object from corresponding JavaScript object.
+  static icu::DecimalFormat* UnpackNumberFormat(
+      v8::Handle<v8::Object> obj);
+
+  // Release memory we allocated for the NumberFormat once the JS object that
+  // holds the pointer gets garbage collected.
+  static void DeleteNumberFormat(v8::Persistent<v8::Value> object,
+                                 void* param);
+
+  // Formats number and returns corresponding string.
+  static v8::Handle<v8::Value> Format(const v8::Arguments& args);
+
+ private:
+  NumberFormat();
+
+  static v8::Persistent<v8::FunctionTemplate> number_format_template_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_EXPERIMENTAL_NUMBER_FORMAT_H_
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index 573797e..3740c27 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -40,15 +40,19 @@
 
 
 v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
-  HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
+  bool compact = false;
+  // All allocation spaces other than NEW_SPACE have the same effect.
+  if (args.Length() >= 1 && args[0]->IsBoolean()) {
+    compact = args[0]->BooleanValue();
+  }
+  HEAP->CollectAllGarbage(compact);
   return v8::Undefined();
 }
 
 
 void GCExtension::Register() {
-  static GCExtension* gc_extension = NULL;
-  if (gc_extension == NULL) gc_extension = new GCExtension();
-  static v8::DeclareExtension gc_extension_declaration(gc_extension);
+  static GCExtension gc_extension;
+  static v8::DeclareExtension gc_extension_declaration(&gc_extension);
 }
 
 } }  // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 143099c..971f9f9 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -59,13 +59,13 @@
 }
 
 
-Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size,
-                                                      PretenureFlag pretenure) {
+Handle<FixedArray> Factory::NewFixedDoubleArray(int size,
+                                                PretenureFlag pretenure) {
   ASSERT(0 <= size);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
-      FixedDoubleArray);
+      FixedArray);
 }
 
 
@@ -95,14 +95,6 @@
 }
 
 
-Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) {
-  ASSERT(0 <= at_least_space_for);
-  CALL_HEAP_FUNCTION(isolate(),
-                     ObjectHashSet::Allocate(at_least_space_for),
-                     ObjectHashSet);
-}
-
-
 Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) {
   ASSERT(0 <= at_least_space_for);
   CALL_HEAP_FUNCTION(isolate(),
@@ -141,20 +133,6 @@
 }
 
 
-Handle<AccessorPair> Factory::NewAccessorPair() {
-  CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->AllocateAccessorPair(),
-                     AccessorPair);
-}
-
-
-Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
-  CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->AllocateTypeFeedbackInfo(),
-                     TypeFeedbackInfo);
-}
-
-
 // Symbols are created in the old generation (data space).
 Handle<String> Factory::LookupSymbol(Vector<const char> string) {
   CALL_HEAP_FUNCTION(isolate(),
@@ -266,7 +244,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromAscii(
-    const ExternalAsciiString::Resource* resource) {
+    ExternalAsciiString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromAscii(resource),
@@ -275,7 +253,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromTwoByte(
-    const ExternalTwoByteString::Resource* resource) {
+    ExternalTwoByteString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@@ -327,7 +305,7 @@
 Handle<Context> Factory::NewBlockContext(
     Handle<JSFunction> function,
     Handle<Context> previous,
-    Handle<ScopeInfo> scope_info) {
+    Handle<SerializedScopeInfo> scope_info) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateBlockContext(*function,
@@ -382,8 +360,6 @@
   script->set_context_data(heap->undefined_value());
   script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
   script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST));
-  script->set_compilation_state(
-      Smi::FromInt(Script::COMPILATION_STATE_INITIAL));
   script->set_wrapper(*wrapper);
   script->set_line_ends(heap->undefined_value());
   script->set_eval_from_shared(heap->undefined_value());
@@ -438,12 +414,10 @@
 }
 
 
-Handle<Map> Factory::NewMap(InstanceType type,
-                            int instance_size,
-                            ElementsKind elements_kind) {
+Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
+      isolate()->heap()->AllocateMap(type, instance_size),
       Map);
 }
 
@@ -491,12 +465,23 @@
 }
 
 
+Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
+  CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
+}
+
+
+Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
+  CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
+}
+
+
 Handle<Map> Factory::GetElementsTransitionMap(
-    Handle<JSObject> src,
-    ElementsKind elements_kind) {
-  Isolate* i = isolate();
-  CALL_HEAP_FUNCTION(i,
-                     src->GetElementsTransitionMap(i, elements_kind),
+    Handle<Map> src,
+    ElementsKind elements_kind,
+    bool safe_to_add_transition) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     src->GetElementsTransitionMap(elements_kind,
+                                                   safe_to_add_transition),
                      Map);
 }
 
@@ -506,12 +491,6 @@
 }
 
 
-Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
-    Handle<FixedDoubleArray> array) {
-  CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
-}
-
-
 Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
     Handle<SharedFunctionInfo> function_info,
     Handle<Map> function_map,
@@ -532,30 +511,29 @@
     PretenureFlag pretenure) {
   Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
       function_info,
-      function_info->is_classic_mode()
-          ? isolate()->function_map()
-          : isolate()->strict_mode_function_map(),
+      function_info->strict_mode()
+          ? isolate()->strict_mode_function_map()
+          : isolate()->function_map(),
       pretenure);
 
   result->set_context(*context);
-  if (!function_info->bound()) {
-    int number_of_literals = function_info->num_literals();
-    Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
-    if (number_of_literals > 0) {
-      // Store the object, regexp and array functions in the literals
-      // array prefix.  These functions will be used when creating
-      // object, regexp and array literals in this function.
-      literals->set(JSFunction::kLiteralGlobalContextIndex,
-                    context->global_context());
-    }
-    result->set_literals(*literals);
+  int number_of_literals = function_info->num_literals();
+  Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
+  if (number_of_literals > 0) {
+    // Store the object, regexp and array functions in the literals
+    // array prefix.  These functions will be used when creating
+    // object, regexp and array literals in this function.
+    literals->set(JSFunction::kLiteralGlobalContextIndex,
+                  context->global_context());
   }
+  result->set_literals(*literals);
+  result->set_next_function_link(isolate()->heap()->undefined_value());
+
   if (V8::UseCrankshaft() &&
       FLAG_always_opt &&
       result->is_compiled() &&
       !function_info->is_toplevel() &&
-      function_info->allows_lazy_compilation() &&
-      !function_info->optimization_disabled()) {
+      function_info->allows_lazy_compilation()) {
     result->MarkForLazyRecompilation();
   }
   return result;
@@ -570,19 +548,17 @@
 }
 
 
-Handle<Object> Factory::NewNumberFromInt(int32_t value,
-                                         PretenureFlag pretenure) {
+Handle<Object> Factory::NewNumberFromInt(int value) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromInt32(value, pretenure), Object);
+      isolate()->heap()->NumberFromInt32(value), Object);
 }
 
 
-Handle<Object> Factory::NewNumberFromUint(uint32_t value,
-                                         PretenureFlag pretenure) {
+Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromUint32(value, pretenure), Object);
+      isolate()->heap()->NumberFromUint32(value), Object);
 }
 
 
@@ -675,16 +651,14 @@
     return undefined_value();
   Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
   Handle<Object> type_obj = LookupAsciiSymbol(type);
-  Handle<Object> argv[] = { type_obj, args };
+  Object** argv[2] = { type_obj.location(),
+                       Handle<Object>::cast(args).location() };
 
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   bool caught_exception;
   Handle<Object> result = Execution::TryCall(fun,
-                                             isolate()->js_builtins_object(),
-                                             ARRAY_SIZE(argv),
-                                             argv,
-                                             &caught_exception);
+      isolate()->js_builtins_object(), 2, argv, &caught_exception);
   return result;
 }
 
@@ -700,16 +674,13 @@
   Handle<JSFunction> fun = Handle<JSFunction>(
       JSFunction::cast(isolate()->js_builtins_object()->
                        GetPropertyNoExceptionThrown(*constr)));
-  Handle<Object> argv[] = { message };
+  Object** argv[1] = { Handle<Object>::cast(message).location() };
 
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   bool caught_exception;
   Handle<Object> result = Execution::TryCall(fun,
-                                             isolate()->js_builtins_object(),
-                                             ARRAY_SIZE(argv),
-                                             argv,
-                                             &caught_exception);
+      isolate()->js_builtins_object(), 1, argv, &caught_exception);
   return result;
 }
 
@@ -722,7 +693,7 @@
   // Allocate the function
   Handle<JSFunction> function = NewFunction(name, the_hole_value());
 
-  // Set up the code pointer in both the shared function info and in
+  // Setup the code pointer in both the shared function info and in
   // the function itself.
   function->shared()->set_code(*code);
   function->set_code(*code);
@@ -753,7 +724,7 @@
   // Allocate the function.
   Handle<JSFunction> function = NewFunction(name, prototype);
 
-  // Set up the code pointer in both the shared function info and in
+  // Setup the code pointer in both the shared function info and in
   // the function itself.
   function->shared()->set_code(*code);
   function->set_code(*code);
@@ -761,9 +732,7 @@
   if (force_initial_map ||
       type != JS_OBJECT_TYPE ||
       instance_size != JSObject::kHeaderSize) {
-    Handle<Map> initial_map = NewMap(type,
-                                     instance_size,
-                                     FAST_SMI_ONLY_ELEMENTS);
+    Handle<Map> initial_map = NewMap(type, instance_size);
     function->set_initial_map(*initial_map);
     initial_map->set_constructor(*function);
   }
@@ -772,10 +741,7 @@
   // property that refers to the function.
   SetPrototypeProperty(function, prototype);
   // Currently safe because it is only invoked from Genesis.
-  CHECK_NOT_EMPTY_HANDLE(isolate(),
-                         JSObject::SetLocalPropertyIgnoreAttributes(
-                             prototype, constructor_symbol(),
-                             function, DONT_ENUM));
+  SetLocalPropertyNoThrow(prototype, constructor_symbol(), function, DONT_ENUM);
   return function;
 }
 
@@ -783,7 +749,7 @@
 Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
                                                         Handle<Code> code) {
   Handle<JSFunction> function = NewFunctionWithoutPrototype(name,
-                                                            CLASSIC_MODE);
+                                                            kNonStrictMode);
   function->shared()->set_code(*code);
   function->set_code(*code);
   ASSERT(!function->has_initial_map());
@@ -792,11 +758,11 @@
 }
 
 
-Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
+Handle<SerializedScopeInfo> Factory::NewSerializedScopeInfo(int length) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateScopeInfo(length),
-      ScopeInfo);
+      isolate()->heap()->AllocateSerializedScopeInfo(length),
+      SerializedScopeInfo);
 }
 
 
@@ -865,13 +831,10 @@
   // Number of descriptors added to the result so far.
   int descriptor_count = 0;
 
-  // Ensure that marking will not progress and change color of objects.
-  DescriptorArray::WhitenessWitness witness(*result);
-
   // Copy the descriptors from the array.
   for (int i = 0; i < array->number_of_descriptors(); i++) {
-    if (!array->IsNullDescriptor(i)) {
-      DescriptorArray::CopyFrom(result, descriptor_count++, array, i, witness);
+    if (array->GetType(i) != NULL_DESCRIPTOR) {
+      result->CopyFrom(descriptor_count++, *array, i);
     }
   }
 
@@ -891,7 +854,7 @@
     if (result->LinearSearch(*key, descriptor_count) ==
         DescriptorArray::kNotFound) {
       CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
-      result->Set(descriptor_count, &desc, witness);
+      result->Set(descriptor_count, &desc);
       descriptor_count++;
     } else {
       duplicates++;
@@ -905,13 +868,13 @@
     Handle<DescriptorArray> new_result =
         NewDescriptorArray(number_of_descriptors);
     for (int i = 0; i < number_of_descriptors; i++) {
-      DescriptorArray::CopyFrom(new_result, i, result, i, witness);
+      new_result->CopyFrom(i, *result, i);
     }
     result = new_result;
   }
 
   // Sort the result before returning.
-  result->Sort(witness);
+  result->Sort();
   return result;
 }
 
@@ -942,62 +905,21 @@
 
 
 Handle<JSArray> Factory::NewJSArray(int capacity,
-                                    ElementsKind elements_kind,
                                     PretenureFlag pretenure) {
+  Handle<JSObject> obj = NewJSObject(isolate()->array_function(), pretenure);
   CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->AllocateJSArrayAndStorage(
-                         elements_kind,
-                         0,
-                         capacity,
-                         INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
-                         pretenure),
+                     Handle<JSArray>::cast(obj)->Initialize(capacity),
                      JSArray);
 }
 
 
-Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
-                                                ElementsKind elements_kind,
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
                                                 PretenureFlag pretenure) {
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      isolate()->heap()->AllocateJSArrayWithElements(*elements,
-                                                     elements_kind,
-                                                     pretenure),
-      JSArray);
-}
-
-
-void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
-                                           int capacity,
-                                           int length) {
-  ElementsAccessor* accessor = array->GetElementsAccessor();
-  CALL_HEAP_FUNCTION_VOID(
-      isolate(),
-      accessor->SetCapacityAndLength(*array, capacity, length));
-}
-
-
-void Factory::SetContent(Handle<JSArray> array,
-                         Handle<FixedArrayBase> elements) {
-  CALL_HEAP_FUNCTION_VOID(
-      isolate(),
-      array->SetContent(*elements));
-}
-
-
-void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
-  CALL_HEAP_FUNCTION_VOID(
-      isolate(),
-      array->EnsureCanContainHeapObjectElements());
-}
-
-
-void Factory::EnsureCanContainElements(Handle<JSArray> array,
-                                       Handle<FixedArrayBase> elements,
-                                       EnsureElementsMode mode) {
-  CALL_HEAP_FUNCTION_VOID(
-      isolate(),
-      array->EnsureCanContainElements(*elements, mode));
+  Handle<JSArray> result =
+      Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
+                                        pretenure));
+  result->SetContent(*elements);
+  return result;
 }
 
 
@@ -1026,18 +948,11 @@
 }
 
 
-void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
-  CALL_HEAP_FUNCTION_VOID(
-      isolate(),
-      object->SetIdentityHash(hash, ALLOW_CREATION));
-}
-
-
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name,
     int number_of_literals,
     Handle<Code> code,
-    Handle<ScopeInfo> scope_info) {
+    Handle<SerializedScopeInfo> scope_info) {
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name);
   shared->set_code(*code);
   shared->set_scope_info(*scope_info);
@@ -1085,12 +1000,6 @@
 }
 
 
-Handle<String> Factory::Uint32ToString(uint32_t value) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->Uint32ToString(value), String);
-}
-
-
 Handle<SeededNumberDictionary> Factory::DictionaryAtNumberPut(
     Handle<SeededNumberDictionary> dictionary,
     uint32_t key,
@@ -1133,11 +1042,11 @@
 
 Handle<JSFunction> Factory::NewFunctionWithoutPrototypeHelper(
     Handle<String> name,
-    LanguageMode language_mode) {
+    StrictModeFlag strict_mode) {
   Handle<SharedFunctionInfo> function_share = NewSharedFunctionInfo(name);
-  Handle<Map> map = (language_mode == CLASSIC_MODE)
-      ? isolate()->function_without_prototype_map()
-      : isolate()->strict_mode_function_without_prototype_map();
+  Handle<Map> map = strict_mode == kStrictMode
+      ? isolate()->strict_mode_function_without_prototype_map()
+      : isolate()->function_without_prototype_map();
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->AllocateFunction(
                          *map,
@@ -1149,9 +1058,8 @@
 
 Handle<JSFunction> Factory::NewFunctionWithoutPrototype(
     Handle<String> name,
-    LanguageMode language_mode) {
-  Handle<JSFunction> fun =
-      NewFunctionWithoutPrototypeHelper(name, language_mode);
+    StrictModeFlag strict_mode) {
+  Handle<JSFunction> fun = NewFunctionWithoutPrototypeHelper(name, strict_mode);
   fun->set_context(isolate()->context()->global_context());
   return fun;
 }
@@ -1411,20 +1319,4 @@
 }
 
 
-Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
-  Heap* h = isolate()->heap();
-  if (name->Equals(h->undefined_symbol())) return undefined_value();
-  if (name->Equals(h->nan_symbol())) return nan_value();
-  if (name->Equals(h->infinity_symbol())) return infinity_value();
-  return Handle<Object>::null();
-}
-
-
-Handle<Object> Factory::ToBoolean(bool value) {
-  return Handle<Object>(value
-                        ? isolate()->heap()->true_value()
-                        : isolate()->heap()->false_value());
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/factory.h b/src/factory.h
index 786d4a9..c9817fe 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,7 +50,7 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Allocate a new uninitialized fixed double array.
-  Handle<FixedDoubleArray> NewFixedDoubleArray(
+  Handle<FixedArray> NewFixedDoubleArray(
       int size,
       PretenureFlag pretenure = NOT_TENURED);
 
@@ -62,8 +62,6 @@
 
   Handle<StringDictionary> NewStringDictionary(int at_least_space_for);
 
-  Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for);
-
   Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for);
 
   Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors);
@@ -73,10 +71,6 @@
   Handle<DeoptimizationOutputData> NewDeoptimizationOutputData(
       int deopt_entry_count,
       PretenureFlag pretenure);
-  // Allocates a pre-tenured empty AccessorPair.
-  Handle<AccessorPair> NewAccessorPair();
-
-  Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
 
   Handle<String> LookupSymbol(Vector<const char> str);
   Handle<String> LookupSymbol(Handle<String> str);
@@ -155,9 +149,9 @@
   // not make sense to have a UTF-8 factory function for external strings,
   // because we cannot change the underlying buffer.
   Handle<String> NewExternalStringFromAscii(
-      const ExternalAsciiString::Resource* resource);
+      ExternalAsciiString::Resource* resource);
   Handle<String> NewExternalStringFromTwoByte(
-      const ExternalTwoByteString::Resource* resource);
+      ExternalTwoByteString::Resource* resource);
 
   // Create a global (but otherwise uninitialized) context.
   Handle<Context> NewGlobalContext();
@@ -180,7 +174,7 @@
   // Create a 'block' context.
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
-                                  Handle<ScopeInfo> scope_info);
+                                  Handle<SerializedScopeInfo> scope_info);
 
   // Return the Symbol matching the passed in string.
   Handle<String> SymbolFromString(Handle<String> value);
@@ -213,9 +207,7 @@
   Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
       Handle<Object> value);
 
-  Handle<Map> NewMap(InstanceType type,
-                     int instance_size,
-                     ElementsKind elements_kind = FAST_ELEMENTS);
+  Handle<Map> NewMap(InstanceType type, int instance_size);
 
   Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
 
@@ -227,22 +219,22 @@
 
   Handle<Map> CopyMapDropTransitions(Handle<Map> map);
 
-  Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
-                                       ElementsKind elements_kind);
+  Handle<Map> GetFastElementsMap(Handle<Map> map);
+
+  Handle<Map> GetSlowElementsMap(Handle<Map> map);
+
+  Handle<Map> GetElementsTransitionMap(Handle<Map> map,
+                                       ElementsKind elements_kind,
+                                       bool safe_to_add_transition);
 
   Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
 
-  Handle<FixedDoubleArray> CopyFixedDoubleArray(
-      Handle<FixedDoubleArray> array);
-
-  // Numbers (e.g. literals) are pretenured by the parser.
+  // Numbers (eg, literals) are pretenured by the parser.
   Handle<Object> NewNumber(double value,
                            PretenureFlag pretenure = NOT_TENURED);
 
-  Handle<Object> NewNumberFromInt(int32_t value,
-                                  PretenureFlag pretenure = NOT_TENURED);
-  Handle<Object> NewNumberFromUint(uint32_t value,
-                                  PretenureFlag pretenure = NOT_TENURED);
+  Handle<Object> NewNumberFromInt(int value);
+  Handle<Object> NewNumberFromUint(uint32_t value);
 
   // These objects are used by the api to create env-independent data
   // structures in the heap.
@@ -264,39 +256,24 @@
 
   // JS arrays are pretenured when allocated by the parser.
   Handle<JSArray> NewJSArray(int capacity,
-                             ElementsKind elements_kind = FAST_ELEMENTS,
                              PretenureFlag pretenure = NOT_TENURED);
 
   Handle<JSArray> NewJSArrayWithElements(
-      Handle<FixedArrayBase> elements,
-      ElementsKind elements_kind = FAST_ELEMENTS,
+      Handle<FixedArray> elements,
       PretenureFlag pretenure = NOT_TENURED);
 
-  void SetElementsCapacityAndLength(Handle<JSArray> array,
-                                    int capacity,
-                                    int length);
-
-  void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
-
-  void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
-  void EnsureCanContainElements(Handle<JSArray> array,
-                                Handle<FixedArrayBase> elements,
-                                EnsureElementsMode mode);
-
   Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
 
   // Change the type of the argument into a JS object/function and reinitialize.
   void BecomeJSObject(Handle<JSReceiver> object);
   void BecomeJSFunction(Handle<JSReceiver> object);
 
-  void SetIdentityHash(Handle<JSObject> object, Object* hash);
-
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Object> prototype);
 
   Handle<JSFunction> NewFunctionWithoutPrototype(
       Handle<String> name,
-      LanguageMode language_mode);
+      StrictModeFlag strict_mode);
 
   Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global);
 
@@ -310,7 +287,7 @@
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
-  Handle<ScopeInfo> NewScopeInfo(int length);
+  Handle<SerializedScopeInfo> NewSerializedScopeInfo(int length);
 
   Handle<Code> NewCode(const CodeDesc& desc,
                        Code::Flags flags,
@@ -383,7 +360,6 @@
       PropertyAttributes attributes);
 
   Handle<String> NumberToString(Handle<Object> number);
-  Handle<String> Uint32ToString(uint32_t value);
 
   enum ApiInstanceType {
     JavaScriptObject,
@@ -428,7 +404,7 @@
       Handle<String> name,
       int number_of_literals,
       Handle<Code> code,
-      Handle<ScopeInfo> scope_info);
+      Handle<SerializedScopeInfo> scope_info);
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name);
 
   Handle<JSMessageObject> NewJSMessageObject(
@@ -475,14 +451,6 @@
                              JSRegExp::Flags flags,
                              int capture_count);
 
-  // Returns the value for a known global constant (a property of the global
-  // object which is neither configurable nor writable) like 'undefined'.
-  // Returns a null handle when the given name is unknown.
-  Handle<Object> GlobalConstantFor(Handle<String> name);
-
-  // Converts the given boolean condition to JavaScript boolean value.
-  Handle<Object> ToBoolean(bool value);
-
  private:
   Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
 
@@ -491,7 +459,7 @@
 
   Handle<JSFunction> NewFunctionWithoutPrototypeHelper(
       Handle<String> name,
-      LanguageMode language_mode);
+      StrictModeFlag strict_mode);
 
   Handle<DescriptorArray> CopyAppendCallbackDescriptors(
       Handle<DescriptorArray> array,
diff --git a/src/fast-dtoa.h b/src/fast-dtoa.h
index ef28557..94c22ec 100644
--- a/src/fast-dtoa.h
+++ b/src/fast-dtoa.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,7 +43,7 @@
 
 // FastDtoa will produce at most kFastDtoaMaximalLength digits. This does not
 // include the terminating '\0' character.
-const int kFastDtoaMaximalLength = 17;
+static const int kFastDtoaMaximalLength = 17;
 
 // Provides a decimal representation of v.
 // The result should be interpreted as buffer * 10^(point - length).
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 75697a8..e8f6349 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,7 +41,6 @@
   extern ctype FLAG_##nam;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
   static ctype const FLAG_##nam = def;
-#define DEFINE_implication(whenflag, thenflag)
 
 // We want to supply the actual storage and value for the flag variable in the
 // .cc file.  We only do this for writable flags.
@@ -49,7 +48,6 @@
 #define FLAG_FULL(ftype, ctype, nam, def, cmt) \
   ctype FLAG_##nam = def;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
 
 // We need to define all of our default values so that the Flag structure can
 // access them by pointer.  These are just used internally inside of one .cc,
@@ -58,7 +56,7 @@
 #define FLAG_FULL(ftype, ctype, nam, def, cmt) \
   static ctype const FLAGDEFAULT_##nam = def;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
+
 
 // We want to write entries into our meta data table, for internal parsing and
 // printing / etc in the flag parser code.  We only do this for writable flags.
@@ -66,14 +64,6 @@
 #define FLAG_FULL(ftype, ctype, nam, def, cmt) \
   { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag)
-
-// We produce the code to set flags when it is implied by another flag.
-#elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt)
-#define FLAG_READONLY(ftype, ctype, nam, def, cmt)
-#define DEFINE_implication(whenflag, thenflag) \
-  if (FLAG_##whenflag) FLAG_##thenflag = true;
 
 #else
 #error No mode supplied when including flags.defs
@@ -81,41 +71,17 @@
 
 #ifdef FLAG_MODE_DECLARE
 // Structure used to hold a collection of arguments to the JavaScript code.
-#define JSARGUMENTS_INIT {{}}
 struct JSArguments {
 public:
-  inline int argc() const {
-    return static_cast<int>(storage_[0]);
-  }
-  inline const char** argv() const {
-    return reinterpret_cast<const char**>(storage_[1]);
-  }
-  inline const char*& operator[] (int idx) const {
-    return argv()[idx];
-  }
-  inline JSArguments& operator=(JSArguments args) {
-    set_argc(args.argc());
-    set_argv(args.argv());
-    return *this;
-  }
-  static JSArguments Create(int argc, const char** argv) {
-    JSArguments args;
-    args.set_argc(argc);
-    args.set_argv(argv);
-    return args;
-  }
+  JSArguments();
+  JSArguments(int argc, const char** argv);
+  int argc() const;
+  const char** argv();
+  const char*& operator[](int idx);
+  JSArguments& operator=(JSArguments args);
 private:
-  void set_argc(int argc) {
-    storage_[0] = argc;
-  }
-  void set_argv(const char** argv) {
-    storage_[1] = reinterpret_cast<AtomicWord>(argv);
-  }
-public:
-  // Contains argc and argv. Unfortunately we have to store these two fields
-  // into a single one to avoid making the initialization macro (which would be
-  // "{ 0, NULL }") contain a coma.
-  AtomicWord storage_[2];
+  int argc_;
+  const char** argv_;
 };
 #endif
 
@@ -130,49 +96,37 @@
 //
 #define FLAG FLAG_FULL
 
-// Flags for language modes and experimental language features.
-DEFINE_bool(use_strict, false, "enforce strict mode")
-
+// Flags for experimental language features.
 DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
-DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
-DEFINE_bool(harmony_modules, false,
-            "enable harmony modules (implies block scoping)")
 DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
-DEFINE_bool(harmony_collections, false,
-            "enable harmony collections (sets, maps, and weak maps)")
-DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
-DEFINE_implication(harmony, harmony_scoping)
-DEFINE_implication(harmony, harmony_modules)
-DEFINE_implication(harmony, harmony_proxies)
-DEFINE_implication(harmony, harmony_collections)
-DEFINE_implication(harmony_modules, harmony_scoping)
+DEFINE_bool(harmony_weakmaps, false, "enable harmony weak maps")
+DEFINE_bool(harmony_block_scoping, false, "enable harmony block scoping")
 
 // Flags for experimental implementation features.
-DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(clever_optimizations,
-            true,
-            "Optimize object size, Array shift, DOM strings and string +")
-
-// Flags for data representation optimizations
 DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_bool(string_slices, true, "use string slices")
+DEFINE_bool(string_slices, false, "use string slices")
 
 // Flags for Crankshaft.
-DEFINE_bool(crankshaft, true, "use crankshaft")
-DEFINE_string(hydrogen_filter, "", "optimization filter")
+#ifdef V8_TARGET_ARCH_MIPS
+  DEFINE_bool(crankshaft, false, "use crankshaft")
+#else
+  DEFINE_bool(crankshaft, true, "use crankshaft")
+#endif
+DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
+DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
+DEFINE_bool(build_lithium, true, "use lithium chunk builder")
+DEFINE_bool(alloc_lithium, true, "use lithium register allocator")
+DEFINE_bool(use_lithium, true, "use lithium code generator")
 DEFINE_bool(use_range, true, "use hydrogen range analysis")
 DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
 DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
 DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
 DEFINE_bool(use_inlining, true, "use function inlining")
 DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
+DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
 DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
-DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
-            true,
-            "crankshaft harvests type feedback from stub cache")
 DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
 DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
-DEFINE_string(trace_phase, "Z", "trace generated IR for specified phases")
 DEFINE_bool(trace_inlining, false, "trace inlining decisions")
 DEFINE_bool(trace_alloc, false, "trace register allocator")
 DEFINE_bool(trace_all_uses, false, "trace all use positions")
@@ -192,49 +146,13 @@
 DEFINE_bool(trace_osr, false, "trace on-stack replacement")
 DEFINE_int(stress_runs, 0, "number of stress runs")
 DEFINE_bool(optimize_closures, true, "optimize closures")
-DEFINE_bool(inline_construct, true, "inline constructor calls")
-DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
-DEFINE_int(loop_weight, 1, "loop weight for representation inference")
-
-DEFINE_bool(optimize_for_in, true,
-            "optimize functions containing for-in loops")
-
-// Experimental profiler changes.
-DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
-DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
-DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
-DEFINE_bool(self_optimization, false,
-            "primitive functions trigger their own optimization")
-DEFINE_bool(direct_self_opt, false,
-            "call recompile stub directly when self-optimizing")
-DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(count_based_interrupts, false,
-            "trigger profiler ticks based on counting instead of timing")
-DEFINE_bool(interrupt_at_exit, false,
-            "insert an interrupt check at function exit")
-DEFINE_bool(weighted_back_edges, false,
-            "weight back edges by jump distance for interrupt triggering")
-DEFINE_int(interrupt_budget, 5900,
-           "execution budget before interrupt is triggered")
-DEFINE_int(type_info_threshold, 15,
-           "percentage of ICs that must have type info to allow optimization")
-DEFINE_int(self_opt_count, 130, "call count before self-optimization")
-
-DEFINE_implication(experimental_profiler, watch_ic_patching)
-DEFINE_implication(experimental_profiler, self_optimization)
-// Not implying direct_self_opt here because it seems to be a bad idea.
-DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, count_based_interrupts)
-DEFINE_implication(experimental_profiler, interrupt_at_exit)
-DEFINE_implication(experimental_profiler, weighted_back_edges)
-
-DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
-DEFINE_implication(trace_opt_verbose, trace_opt)
 
 // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
 DEFINE_bool(debug_code, false,
             "generate extra code (assertions) for debugging")
 DEFINE_bool(code_comments, false, "emit comments in code disassembly")
+DEFINE_bool(peephole_optimization, true,
+            "perform peephole optimizations in assembly code")
 DEFINE_bool(enable_sse2, true,
             "enable use of SSE2 instructions if available")
 DEFINE_bool(enable_sse3, true,
@@ -262,8 +180,6 @@
 DEFINE_bool(expose_externalize_string, false,
             "expose externalize string extension")
 DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
-DEFINE_bool(builtins_in_stack_traces, false,
-            "show built-in functions in stack traces")
 DEFINE_bool(disable_native_files, false, "disable builtin natives files")
 
 // builtins-ia32.cc
@@ -284,8 +200,10 @@
 DEFINE_bool(trace_opt, false, "trace lazy optimization")
 DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
 DEFINE_bool(opt, true, "use adaptive optimizations")
+DEFINE_bool(opt_eagerly, false, "be more eager when adaptively optimizing")
 DEFINE_bool(always_opt, false, "always try to optimize functions")
 DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
+DEFINE_bool(deopt, true, "support deoptimization")
 DEFINE_bool(trace_deopt, false, "trace deoptimization")
 
 // compiler.cc
@@ -307,13 +225,10 @@
             "automatically set the debug break flag when debugger commands are "
             "in the queue")
 DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
-DEFINE_bool(break_on_abort, true, "always cause a debug break before aborting")
 
 // execution.cc
-// Slightly less than 1MB on 64-bit, since Windows' default stack size for
-// the main execution thread is 1MB for both 32 and 64-bit.
-DEFINE_int(stack_size, kPointerSize * 123,
-           "default size of stack region v8 is allowed to use (in kBytes)")
+DEFINE_int(stack_size, kPointerSize * 128,
+           "default size of stack region v8 is allowed to use (in KkBytes)")
 
 // frames.cc
 DEFINE_int(max_stack_trace_source_length, 300,
@@ -338,23 +253,14 @@
             "print cumulative GC statistics in name=value format on exit")
 DEFINE_bool(trace_gc_verbose, false,
             "print more details following each garbage collection")
-DEFINE_bool(trace_fragmentation, false,
-            "report fragmentation for old pointer and data pages")
 DEFINE_bool(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
 DEFINE_bool(flush_code, true,
             "flush code that we expect not to use again before full gc")
-DEFINE_bool(incremental_marking, true, "use incremental marking")
-DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
-DEFINE_bool(trace_incremental_marking, false,
-            "trace progress of the incremental marking")
 
 // v8.cc
 DEFINE_bool(use_idle_notification, true,
             "Use idle notification to reduce memory footprint.")
-
-DEFINE_bool(send_idle_notification, false,
-            "Send idle notifcation between stress runs.")
 // ic.cc
 DEFINE_bool(use_ic, true, "use inline caching")
 
@@ -370,12 +276,8 @@
 
 // mark-compact.cc
 DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(lazy_sweeping, true,
-            "Use lazy sweeping for old pointer and data spaces")
 DEFINE_bool(never_compact, false,
             "Never perform compaction on full GC - testing only")
-DEFINE_bool(compact_code_space, true,
-            "Compact code space on full non-incremental collections")
 DEFINE_bool(cleanup_code_caches_at_gc, true,
             "Flush inline caches prior to mark compact collection and "
             "flush code caches in maps during mark compact cycle.")
@@ -383,16 +285,31 @@
            "Default seed for initializing random generator "
            "(0, the default, means to use system random).")
 
+DEFINE_bool(canonicalize_object_literal_maps, true,
+            "Canonicalize maps for object literals.")
+
+DEFINE_bool(use_big_map_space, true,
+            "Use big map space, but don't compact if it grew too big.")
+
+DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
+           "Maximum number of pages in map space which still allows to encode "
+           "forwarding pointers.  That's actually a constant, but it's useful "
+           "to control it with a flag for better testing.")
+
+// mksnapshot.cc
+DEFINE_bool(h, false, "print this message")
+DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
+
 // objects.cc
 DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
 
 // parser.cc
 DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_bool(strict_mode, true, "allow strict mode directives")
 
 // simulator-arm.cc and simulator-mips.cc
 DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(check_icache, false,
-            "Check icache flushes in ARM and MIPS simulator")
+DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
 DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
 DEFINE_int(sim_stack_alignment, 8,
            "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
@@ -417,6 +334,7 @@
 
 // Regexp
 DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
+DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
 
 // Testing flags test/cctest/test-{flags,api,serialization}.cc
 DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@@ -438,17 +356,13 @@
 
 DEFINE_bool(help, false, "Print usage message, including flags, on console")
 DEFINE_bool(dump_counters, false, "Dump counters on exit")
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
 DEFINE_bool(debugger, false, "Enable JavaScript debugger")
 DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
                                     "debugger agent in another process")
 DEFINE_bool(debugger_agent, false, "Enable debugger agent")
 DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
-#endif  // ENABLE_DEBUGGER_SUPPORT
-
 DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments, JSARGUMENTS_INIT,
+DEFINE_args(js_arguments, JSArguments(),
             "Pass all remaining arguments to the script. Alias for \"--\".")
 
 #if defined(WEBOS__)
@@ -472,15 +386,6 @@
 DEFINE_string(gdbjit_dump_filter, "",
               "dump only objects containing this substring")
 
-// mark-compact.cc
-DEFINE_bool(force_marking_deque_overflows, false,
-            "force overflows of marking deque by reducing it's size "
-            "to 64 words")
-
-DEFINE_bool(stress_compaction, false,
-            "stress the GC compactor to flush out bugs (implies "
-            "--force_marking_deque_overflows)")
-
 //
 // Debug only flags
 //
@@ -503,7 +408,11 @@
             "pretty print source code for builtins")
 DEFINE_bool(print_ast, false, "print source AST")
 DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
+DEFINE_bool(print_json_ast, false, "print source AST as JSON")
+DEFINE_bool(print_builtin_json_ast, false,
+            "print source AST for builtins as JSON")
 DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
+DEFINE_bool(verify_stack_height, false, "verify stack height tracing on ia32")
 
 // compiler.cc
 DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
@@ -524,11 +433,6 @@
 // ic.cc
 DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
 
-// interface.cc
-DEFINE_bool(print_interfaces, false, "print interfaces")
-DEFINE_bool(print_interface_details, false, "print interface inference details")
-DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
-
 // objects.cc
 DEFINE_bool(trace_normalization,
             false,
@@ -537,6 +441,10 @@
 // runtime.cc
 DEFINE_bool(trace_lazy, false, "trace lazy compilation")
 
+// serialize.cc
+DEFINE_bool(debug_serialization, false,
+            "write debug information into the snapshot.")
+
 // spaces.cc
 DEFINE_bool(collect_heap_spill_statistics, false,
             "report heap spill statistics along with heap_stats "
@@ -601,18 +509,8 @@
 #define FLAG FLAG_READONLY
 #endif
 
-// elements.cc
-DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
-
 // code-stubs.cc
 DEFINE_bool(print_code_stubs, false, "print code stubs")
-DEFINE_bool(test_secondary_stub_cache,
-            false,
-            "test secondary stub cache by disabling the primary one")
-
-DEFINE_bool(test_primary_stub_cache,
-            false,
-            "test primary stub cache by disabling the secondary one")
 
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(print_code, false, "print generated code")
@@ -622,20 +520,6 @@
 DEFINE_bool(print_code_verbose, false, "print more information for code")
 DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
 
-#ifdef ENABLE_DISASSEMBLER
-DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
-DEFINE_implication(print_all_code, print_code)
-DEFINE_implication(print_all_code, print_opt_code)
-DEFINE_implication(print_all_code, print_unopt_code)
-DEFINE_implication(print_all_code, print_code_verbose)
-DEFINE_implication(print_all_code, print_builtin_code)
-DEFINE_implication(print_all_code, print_code_stubs)
-DEFINE_implication(print_all_code, code_comments)
-#ifdef DEBUG
-DEFINE_implication(print_all_code, trace_codegen)
-#endif
-#endif
-
 // Cleanup...
 #undef FLAG_FULL
 #undef FLAG_READONLY
@@ -644,10 +528,8 @@
 #undef DEFINE_bool
 #undef DEFINE_int
 #undef DEFINE_string
-#undef DEFINE_implication
 
 #undef FLAG_MODE_DECLARE
 #undef FLAG_MODE_DEFINE
 #undef FLAG_MODE_DEFINE_DEFAULTS
 #undef FLAG_MODE_META
-#undef FLAG_MODE_DEFINE_IMPLICATIONS
diff --git a/src/flags.cc b/src/flags.cc
index 5720cbd..ab5b57c 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -411,7 +411,7 @@
           for (int k = i; k < *argc; k++) {
             js_argv[k - start_pos] = StrDup(argv[k]);
           }
-          *flag->args_variable() = JSArguments::Create(js_argc, js_argv);
+          *flag->args_variable() = JSArguments(js_argc, js_argv);
           i = *argc;  // Consume all arguments
           break;
         }
@@ -534,10 +534,18 @@
   }
 }
 
-
-void FlagList::EnforceFlagImplications() {
-#define FLAG_MODE_DEFINE_IMPLICATIONS
-#include "flag-definitions.h"
+JSArguments::JSArguments()
+    : argc_(0), argv_(NULL) {}
+JSArguments::JSArguments(int argc, const char** argv)
+    : argc_(argc), argv_(argv) {}
+int JSArguments::argc() const { return argc_; }
+const char** JSArguments::argv() { return argv_; }
+const char*& JSArguments::operator[](int idx) { return argv_[idx]; }
+JSArguments& JSArguments::operator=(JSArguments args) {
+    argc_ = args.argc_;
+    argv_ = args.argv_;
+    return *this;
 }
 
+
 } }  // namespace v8::internal
diff --git a/src/flags.h b/src/flags.h
index f0b239b..f9cbde0 100644
--- a/src/flags.h
+++ b/src/flags.h
@@ -72,9 +72,6 @@
 
   // Print help to stdout with flags, types, and default values.
   static void PrintHelp();
-
-  // Set flags as consequence of being implied by another flag.
-  static void EnforceFlagImplications();
 };
 
 } }  // namespace v8::internal
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 27a526c..7ba79bf 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -68,7 +68,7 @@
 
 inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
   v->VisitPointer(context_address());
-  v->VisitPointer(code_address());
+  StackFrame::IteratePc(v, pc_address(), holder);
 }
 
 
@@ -77,24 +77,9 @@
 }
 
 
-inline bool StackHandler::is_js_entry() const {
-  return kind() == JS_ENTRY;
-}
-
-
-inline bool StackHandler::is_catch() const {
-  return kind() == CATCH;
-}
-
-
-inline bool StackHandler::is_finally() const {
-  return kind() == FINALLY;
-}
-
-
-inline StackHandler::Kind StackHandler::kind() const {
+inline StackHandler::State StackHandler::state() const {
   const int offset = StackHandlerConstants::kStateOffset;
-  return KindField::decode(Memory::unsigned_at(address() + offset));
+  return static_cast<State>(Memory::int_at(address() + offset));
 }
 
 
@@ -104,9 +89,9 @@
 }
 
 
-inline Object** StackHandler::code_address() const {
-  const int offset = StackHandlerConstants::kCodeOffset;
-  return reinterpret_cast<Object**>(address() + offset);
+inline Address* StackHandler::pc_address() const {
+  const int offset = StackHandlerConstants::kPCOffset;
+  return reinterpret_cast<Address*>(address() + offset);
 }
 
 
@@ -120,33 +105,8 @@
 }
 
 
-inline Code* StackFrame::LookupCode() const {
-  return GetContainingCode(isolate(), pc());
-}
-
-
 inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
-  return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
-}
-
-
-inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
-    : StackFrame(iterator) {
-}
-
-
-inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
-    : EntryFrame(iterator) {
-}
-
-
-inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
-    : StackFrame(iterator) {
-}
-
-
-inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
-    : StackFrame(iterator) {
+  return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
 }
 
 
@@ -191,12 +151,7 @@
 inline bool StandardFrame::IsConstructFrame(Address fp) {
   Object* marker =
       Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
-  return marker == Smi::FromInt(StackFrame::CONSTRUCT);
-}
-
-
-inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
-    : StandardFrame(iterator) {
+  return marker == Smi::FromInt(CONSTRUCT);
 }
 
 
@@ -235,26 +190,6 @@
 }
 
 
-inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
-    : JavaScriptFrame(iterator) {
-}
-
-
-inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
-    StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
-}
-
-
-inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
-    : StandardFrame(iterator) {
-}
-
-
-inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
-    : InternalFrame(iterator) {
-}
-
-
 template<typename Iterator>
 inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
     Isolate* isolate)
@@ -262,15 +197,6 @@
   if (!done()) Advance();
 }
 
-
-template<typename Iterator>
-inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
-    Isolate* isolate, ThreadLocalTop* top)
-    : iterator_(isolate, top) {
-  if (!done()) Advance();
-}
-
-
 template<typename Iterator>
 inline JavaScriptFrame* JavaScriptFrameIteratorTemp<Iterator>::frame() const {
   // TODO(1233797): The frame hierarchy needs to change. It's
diff --git a/src/frames.cc b/src/frames.cc
index 0571a81..60b1aad 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,6 @@
 #include "deoptimizer.h"
 #include "frames-inl.h"
 #include "full-codegen.h"
-#include "lazy-instance.h"
 #include "mark-compact.h"
 #include "safepoint-table.h"
 #include "scopeinfo.h"
@@ -42,22 +41,6 @@
 namespace v8 {
 namespace internal {
 
-
-static ReturnAddressLocationResolver return_address_location_resolver = NULL;
-
-
-// Resolves pc_address through the resolution address function if one is set.
-static inline Address* ResolveReturnAddressLocation(Address* pc_address) {
-  if (return_address_location_resolver == NULL) {
-    return pc_address;
-  } else {
-    return reinterpret_cast<Address*>(
-        return_address_location_resolver(
-            reinterpret_cast<uintptr_t>(pc_address)));
-  }
-}
-
-
 // Iterator that supports traversing the stack handlers of a
 // particular frame. Needs to know the top of the handler chain.
 class StackHandlerIterator BASE_EMBEDDED {
@@ -172,8 +155,8 @@
     ASSERT(fp_ != NULL);
     state.fp = fp_;
     state.sp = sp_;
-    state.pc_address = ResolveReturnAddressLocation(
-        reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_)));
+    state.pc_address =
+        reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp_));
     type = StackFrame::ComputeType(isolate(), &state);
   }
   if (SingletonFor(type) == NULL) return;
@@ -383,17 +366,16 @@
 
 
 Code* StackFrame::GetSafepointData(Isolate* isolate,
-                                   Address inner_pointer,
+                                   Address pc,
                                    SafepointEntry* safepoint_entry,
                                    unsigned* stack_slots) {
-  InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
-      isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
+  PcToCodeCache::PcToCodeCacheEntry* entry =
+      isolate->pc_to_code_cache()->GetCacheEntry(pc);
   if (!entry->safepoint_entry.is_valid()) {
-    entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
+    entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
     ASSERT(entry->safepoint_entry.is_valid());
   } else {
-    ASSERT(entry->safepoint_entry.Equals(
-        entry->code->GetSafepointEntry(inner_pointer)));
+    ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
   }
 
   // Fill in the results and return the code.
@@ -410,16 +392,11 @@
 }
 
 
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* object, Address addr);
-#endif
-
-
 void StackFrame::IteratePc(ObjectVisitor* v,
                            Address* pc_address,
                            Code* holder) {
   Address pc = *pc_address;
-  ASSERT(GcSafeCodeContains(holder, pc));
+  ASSERT(holder->contains(pc));
   unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
   Object* code = holder;
   v->VisitPointer(&code);
@@ -431,13 +408,6 @@
 }
 
 
-void StackFrame::SetReturnAddressLocationResolver(
-    ReturnAddressLocationResolver resolver) {
-  ASSERT(return_address_location_resolver == NULL);
-  return_address_location_resolver = resolver;
-}
-
-
 StackFrame::Type StackFrame::ComputeType(Isolate* isolate, State* state) {
   ASSERT(state->fp != NULL);
   if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
@@ -509,11 +479,11 @@
 
 
 void ExitFrame::ComputeCallerState(State* state) const {
-  // Set up the caller state.
+  // Setup the caller state.
   state->sp = caller_sp();
   state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
-  state->pc_address = ResolveReturnAddressLocation(
-      reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
+  state->pc_address
+      = reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
 }
 
 
@@ -547,8 +517,7 @@
 void ExitFrame::FillState(Address fp, Address sp, State* state) {
   state->sp = sp;
   state->fp = fp;
-  state->pc_address = ResolveReturnAddressLocation(
-      reinterpret_cast<Address*>(sp - 1 * kPointerSize));
+  state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
 }
 
 
@@ -583,8 +552,7 @@
 void StandardFrame::ComputeCallerState(State* state) const {
   state->sp = caller_sp();
   state->fp = caller_fp();
-  state->pc_address = ResolveReturnAddressLocation(
-      reinterpret_cast<Address*>(ComputePCAddress(fp())));
+  state->pc_address = reinterpret_cast<Address*>(ComputePCAddress(fp()));
 }
 
 
@@ -737,74 +705,6 @@
 }
 
 
-void JavaScriptFrame::PrintTop(FILE* file,
-                               bool print_args,
-                               bool print_line_number) {
-  // constructor calls
-  HandleScope scope;
-  AssertNoAllocation no_allocation;
-  JavaScriptFrameIterator it;
-  while (!it.done()) {
-    if (it.frame()->is_java_script()) {
-      JavaScriptFrame* frame = it.frame();
-      if (frame->IsConstructor()) PrintF(file, "new ");
-      // function name
-      Object* maybe_fun = frame->function();
-      if (maybe_fun->IsJSFunction()) {
-        JSFunction* fun = JSFunction::cast(maybe_fun);
-        fun->PrintName();
-        Code* js_code = frame->unchecked_code();
-        Address pc = frame->pc();
-        int code_offset =
-            static_cast<int>(pc - js_code->instruction_start());
-        PrintF("+%d", code_offset);
-        SharedFunctionInfo* shared = fun->shared();
-        if (print_line_number) {
-          Code* code = Code::cast(
-              v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
-          int source_pos = code->SourcePosition(pc);
-          Object* maybe_script = shared->script();
-          if (maybe_script->IsScript()) {
-            Handle<Script> script(Script::cast(maybe_script));
-            int line = GetScriptLineNumberSafe(script, source_pos) + 1;
-            Object* script_name_raw = script->name();
-            if (script_name_raw->IsString()) {
-              String* script_name = String::cast(script->name());
-              SmartArrayPointer<char> c_script_name =
-                  script_name->ToCString(DISALLOW_NULLS,
-                                         ROBUST_STRING_TRAVERSAL);
-              PrintF(file, " at %s:%d", *c_script_name, line);
-            } else {
-              PrintF(file, "at <unknown>:%d", line);
-            }
-          } else {
-            PrintF(file, " at <unknown>:<unknown>");
-          }
-        }
-      } else {
-        PrintF("<unknown>");
-      }
-
-      if (print_args) {
-        // function arguments
-        // (we are intentionally only printing the actually
-        // supplied parameters, not all parameters required)
-        PrintF(file, "(this=");
-        frame->receiver()->ShortPrint(file);
-        const int length = frame->ComputeParametersCount();
-        for (int i = 0; i < length; i++) {
-          PrintF(file, ", ");
-          frame->GetParameter(i)->ShortPrint(file);
-        }
-        PrintF(file, ")");
-      }
-      break;
-    }
-    it.Advance();
-  }
-}
-
-
 void FrameSummary::Print() {
   PrintF("receiver: ");
   receiver_->ShortPrint();
@@ -839,16 +739,18 @@
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
-  it.Next();  // Drop frame count.
-  int jsframe_count = it.Next();
+  int frame_count = it.Next();
 
   // We create the summary in reverse order because the frames
   // in the deoptimization translation are ordered bottom-to-top.
-  bool is_constructor = IsConstructor();
-  int i = jsframe_count;
+  int i = frame_count;
   while (i > 0) {
     opcode = static_cast<Translation::Opcode>(it.Next());
-    if (opcode == Translation::JS_FRAME) {
+    if (opcode == Translation::FRAME) {
+      // We don't inline constructor calls, so only the first, outermost
+      // frame can be a constructor frame in case of inlining.
+      bool is_constructor = (i == frame_count) && IsConstructor();
+
       i--;
       int ast_id = it.Next();
       int function_id = it.Next();
@@ -898,18 +800,11 @@
 
       FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
       frames->Add(summary);
-      is_constructor = false;
-    } else if (opcode == Translation::CONSTRUCT_STUB_FRAME) {
-      // The next encountered JS_FRAME will be marked as a constructor call.
-      it.Skip(Translation::NumberOfOperandsFor(opcode));
-      ASSERT(!is_constructor);
-      is_constructor = true;
     } else {
       // Skip over operands to advance to the next opcode.
       it.Skip(Translation::NumberOfOperandsFor(opcode));
     }
   }
-  ASSERT(!is_constructor);
 }
 
 
@@ -924,8 +819,7 @@
   // back to a slow search in this case to find the original optimized
   // code object.
   if (!code->contains(pc())) {
-    code = isolate()->inner_pointer_to_code_cache()->
-        GcSafeFindCodeForInnerPointer(pc());
+    code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
   }
   ASSERT(code != NULL);
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -949,9 +843,8 @@
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
   USE(opcode);
-  it.Next();  // Drop frame count.
-  int jsframe_count = it.Next();
-  return jsframe_count;
+  int frame_count = it.Next();
+  return frame_count;
 }
 
 
@@ -966,15 +859,14 @@
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
   ASSERT(opcode == Translation::BEGIN);
-  it.Next();  // Drop frame count.
-  int jsframe_count = it.Next();
+  int frame_count = it.Next();
 
   // We insert the frames in reverse order because the frames
   // in the deoptimization translation are ordered bottom-to-top.
-  while (jsframe_count > 0) {
+  while (frame_count > 0) {
     opcode = static_cast<Translation::Opcode>(it.Next());
-    if (opcode == Translation::JS_FRAME) {
-      jsframe_count--;
+    if (opcode == Translation::FRAME) {
+      frame_count--;
       it.Next();  // Skip ast id.
       int function_id = it.Next();
       it.Next();  // Skip height.
@@ -989,11 +881,6 @@
 }
 
 
-int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
-  return Smi::cast(GetExpression(0))->value();
-}
-
-
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
   return fp() + StandardFrameConstants::kCallerSPOffset;
 }
@@ -1040,15 +927,11 @@
   if (IsConstructor()) accumulator->Add("new ");
   accumulator->PrintFunction(function, receiver, &code);
 
-  // Get scope information for nicer output, if possible. If code is NULL, or
-  // doesn't contain scope info, scope_info will return 0 for the number of
-  // parameters, stack local variables, context local variables, stack slots,
-  // or context slots.
-  Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
+  Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
 
   if (function->IsJSFunction()) {
     Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
-    scope_info = Handle<ScopeInfo>(shared->scope_info());
+    scope_info = Handle<SerializedScopeInfo>(shared->scope_info());
     Object* script_obj = shared->script();
     if (script_obj->IsScript()) {
       Handle<Script> script(Script::cast(script_obj));
@@ -1073,6 +956,11 @@
 
   accumulator->Add("(this=%o", receiver);
 
+  // Get scope information for nicer output, if possible. If code is
+  // NULL, or doesn't contain scope info, info will return 0 for the
+  // number of parameters, stack slots, or context slots.
+  ScopeInfo<PreallocatedStorage> info(*scope_info);
+
   // Print the parameters.
   int parameters_count = ComputeParametersCount();
   for (int i = 0; i < parameters_count; i++) {
@@ -1080,8 +968,8 @@
     // If we have a name for the parameter we print it. Nameless
     // parameters are either because we have more actual parameters
     // than formal parameters or because we have no scope information.
-    if (i < scope_info->ParameterCount()) {
-      accumulator->PrintName(scope_info->ParameterName(i));
+    if (i < info.number_of_parameters()) {
+      accumulator->PrintName(*info.parameter_name(i));
       accumulator->Add("=");
     }
     accumulator->Add("%o", GetParameter(i));
@@ -1099,8 +987,8 @@
   accumulator->Add(" {\n");
 
   // Compute the number of locals and expression stack elements.
-  int stack_locals_count = scope_info->StackLocalCount();
-  int heap_locals_count = scope_info->ContextLocalCount();
+  int stack_locals_count = info.number_of_stack_slots();
+  int heap_locals_count = info.number_of_context_slots();
   int expressions_count = ComputeExpressionsCount();
 
   // Print stack-allocated local variables.
@@ -1109,7 +997,7 @@
   }
   for (int i = 0; i < stack_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(scope_info->StackLocalName(i));
+    accumulator->PrintName(*info.stack_slot_name(i));
     accumulator->Add(" = ");
     if (i < expressions_count) {
       accumulator->Add("%o", GetExpression(i));
@@ -1126,16 +1014,16 @@
   }
 
   // Print heap-allocated local variables.
-  if (heap_locals_count > 0) {
+  if (heap_locals_count > Context::MIN_CONTEXT_SLOTS) {
     accumulator->Add("  // heap-allocated locals\n");
   }
-  for (int i = 0; i < heap_locals_count; i++) {
+  for (int i = Context::MIN_CONTEXT_SLOTS; i < heap_locals_count; i++) {
     accumulator->Add("  var ");
-    accumulator->PrintName(scope_info->ContextLocalName(i));
+    accumulator->PrintName(*info.context_slot_name(i));
     accumulator->Add(" = ");
     if (context != NULL) {
       if (i < context->length()) {
-        accumulator->Add("%o", context->get(Context::MIN_CONTEXT_SLOTS + i));
+        accumulator->Add("%o", context->get(i));
       } else {
         accumulator->Add(
             "// warning: missing context slot - inconsistent frame?");
@@ -1204,7 +1092,7 @@
   StackHandlerIterator it(this, top_handler());
   ASSERT(!it.done());
   StackHandler* handler = it.handler();
-  ASSERT(handler->is_js_entry());
+  ASSERT(handler->is_entry());
   handler->Iterate(v, LookupCode());
 #ifdef DEBUG
   // Make sure that the entry frame does not contain more than one
@@ -1267,90 +1155,53 @@
 // -------------------------------------------------------------------------
 
 
-static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
-  MapWord map_word = object->map_word();
-  return map_word.IsForwardingAddress() ?
-      map_word.ToForwardingAddress()->map() : map_word.ToMap();
-}
-
-
-static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
-  return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
-}
-
-
-#ifdef DEBUG
-static bool GcSafeCodeContains(HeapObject* code, Address addr) {
-  Map* map = GcSafeMapOfCodeSpaceObject(code);
-  ASSERT(map == code->GetHeap()->code_map());
-  Address start = code->address();
-  Address end = code->address() + code->SizeFromMap(map);
-  return start <= addr && addr < end;
-}
-#endif
-
-
-Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
-                                                Address inner_pointer) {
+Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
   Code* code = reinterpret_cast<Code*>(object);
-  ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
+  ASSERT(code != NULL && code->contains(pc));
   return code;
 }
 
 
-Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
-    Address inner_pointer) {
+Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
   Heap* heap = isolate_->heap();
-  // Check if the inner pointer points into a large object chunk.
-  LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
-  if (large_page != NULL) {
-    return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
-  }
+  // Check if the pc points into a large object chunk.
+  LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
+  if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
 
-  // Iterate through the page until we reach the end or find an object starting
-  // after the inner pointer.
-  Page* page = Page::FromAddress(inner_pointer);
-
-  Address addr = page->skip_list()->StartFor(inner_pointer);
-
-  Address top = heap->code_space()->top();
-  Address limit = heap->code_space()->limit();
-
+  // Iterate through the 8K page until we reach the end or find an
+  // object starting after the pc.
+  Page* page = Page::FromAddress(pc);
+  HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
+  HeapObject* previous = NULL;
   while (true) {
-    if (addr == top && addr != limit) {
-      addr = limit;
-      continue;
+    HeapObject* next = iterator.next();
+    if (next == NULL || next->address() >= pc) {
+      return GcSafeCastToCode(previous, pc);
     }
-
-    HeapObject* obj = HeapObject::FromAddress(addr);
-    int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
-    Address next_addr = addr + obj_size;
-    if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
-    addr = next_addr;
+    previous = next;
   }
 }
 
 
-InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
-    InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
+PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
   isolate_->counters()->pc_to_code()->Increment();
-  ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
+  ASSERT(IsPowerOf2(kPcToCodeCacheSize));
   uint32_t hash = ComputeIntegerHash(
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)),
       v8::internal::kZeroHashSeed);
-  uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
-  InnerPointerToCodeCacheEntry* entry = cache(index);
-  if (entry->inner_pointer == inner_pointer) {
+  uint32_t index = hash & (kPcToCodeCacheSize - 1);
+  PcToCodeCacheEntry* entry = cache(index);
+  if (entry->pc == pc) {
     isolate_->counters()->pc_to_code_cached()->Increment();
-    ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
+    ASSERT(entry->code == GcSafeFindCodeForPc(pc));
   } else {
     // Because this code may be interrupted by a profiling signal that
-    // also queries the cache, we cannot update inner_pointer before the code
-    // has been set. Otherwise, we risk trying to use a cache entry before
+    // also queries the cache, we cannot update pc before the code has
+    // been set. Otherwise, we risk trying to use a cache entry before
     // the code has been computed.
-    entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
+    entry->code = GcSafeFindCodeForPc(pc);
     entry->safepoint_entry.Reset();
-    entry->inner_pointer = inner_pointer;
+    entry->pc = pc;
   }
   return entry;
 }
@@ -1381,12 +1232,12 @@
 };
 
 
-static LazyInstance<JSCallerSavedCodeData>::type caller_saved_code_data =
-    LAZY_INSTANCE_INITIALIZER;
+static const JSCallerSavedCodeData kCallerSavedCodeData;
+
 
 int JSCallerSavedCode(int n) {
   ASSERT(0 <= n && n < kNumJSCallerSaved);
-  return caller_saved_code_data.Get().reg_code[n];
+  return kCallerSavedCodeData.reg_code[n];
 }
 
 
diff --git a/src/frames.h b/src/frames.h
index 9071555..fed11c4 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,54 +49,47 @@
 class ThreadLocalTop;
 class Isolate;
 
-class InnerPointerToCodeCache {
+class PcToCodeCache {
  public:
-  struct InnerPointerToCodeCacheEntry {
-    Address inner_pointer;
+  struct PcToCodeCacheEntry {
+    Address pc;
     Code* code;
     SafepointEntry safepoint_entry;
   };
 
-  explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
+  explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
     Flush();
   }
 
-  Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
-  Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
+  Code* GcSafeFindCodeForPc(Address pc);
+  Code* GcSafeCastToCode(HeapObject* object, Address pc);
 
   void Flush() {
     memset(&cache_[0], 0, sizeof(cache_));
   }
 
-  InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
+  PcToCodeCacheEntry* GetCacheEntry(Address pc);
 
  private:
-  InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+  PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
 
   Isolate* isolate_;
 
-  static const int kInnerPointerToCodeCacheSize = 1024;
-  InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
+  static const int kPcToCodeCacheSize = 1024;
+  PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
 
-  DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
+  DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
 };
 
 
 class StackHandler BASE_EMBEDDED {
  public:
-  enum Kind {
-    JS_ENTRY,
-    CATCH,
-    FINALLY,
-    LAST_KIND = FINALLY
+  enum State {
+    ENTRY,
+    TRY_CATCH,
+    TRY_FINALLY
   };
 
-  static const int kKindWidth = 2;
-  STATIC_ASSERT(LAST_KIND < (1 << kKindWidth));
-  static const int kIndexWidth = 32 - kKindWidth;
-  class KindField: public BitField<StackHandler::Kind, 0, kKindWidth> {};
-  class IndexField: public BitField<unsigned, kKindWidth, kIndexWidth> {};
-
   // Get the address of this stack handler.
   inline Address address() const;
 
@@ -113,16 +106,16 @@
   static inline StackHandler* FromAddress(Address address);
 
   // Testers
-  inline bool is_js_entry() const;
-  inline bool is_catch() const;
-  inline bool is_finally() const;
+  bool is_entry() { return state() == ENTRY; }
+  bool is_try_catch() { return state() == TRY_CATCH; }
+  bool is_try_finally() { return state() == TRY_FINALLY; }
 
  private:
   // Accessors.
-  inline Kind kind() const;
+  inline State state() const;
 
   inline Object** context_address() const;
-  inline Object** code_address() const;
+  inline Address* pc_address() const;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
 };
@@ -146,10 +139,7 @@
   enum Type {
     NONE = 0,
     STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
-    NUMBER_OF_TYPES,
-    // Used by FrameScope to indicate that the stack frame is constructed
-    // manually and the FrameScope does not need to emit code.
-    MANUAL
+    NUMBER_OF_TYPES
   };
 #undef DECLARE_TYPE
 
@@ -225,7 +215,9 @@
   virtual Code* unchecked_code() const = 0;
 
   // Get the code associated with this frame.
-  inline Code* LookupCode() const;
+  Code* LookupCode() const {
+    return GetContainingCode(isolate(), pc());
+  }
 
   // Get the code object that contains the given pc.
   static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@@ -241,11 +233,6 @@
   virtual void Iterate(ObjectVisitor* v) const = 0;
   static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
 
-  // Sets a callback function for return-address rewriting profilers
-  // to resolve the location of a return address to the location of the
-  // profiler's stashed return address.
-  static void SetReturnAddressLocationResolver(
-      ReturnAddressLocationResolver resolver);
 
   // Printing support.
   enum PrintMode { OVERVIEW, DETAILS };
@@ -312,7 +299,7 @@
   virtual void SetCallerFp(Address caller_fp);
 
  protected:
-  inline explicit EntryFrame(StackFrameIterator* iterator);
+  explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
 
   // The caller stack pointer for entry frames is always zero. The
   // real information about the caller frame is available through the
@@ -339,7 +326,8 @@
   }
 
  protected:
-  inline explicit EntryConstructFrame(StackFrameIterator* iterator);
+  explicit EntryConstructFrame(StackFrameIterator* iterator)
+      : EntryFrame(iterator) { }
 
  private:
   friend class StackFrameIterator;
@@ -373,7 +361,7 @@
   static void FillState(Address fp, Address sp, State* state);
 
  protected:
-  inline explicit ExitFrame(StackFrameIterator* iterator);
+  explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
 
   virtual Address GetCallerStackPointer() const;
 
@@ -406,7 +394,8 @@
   }
 
  protected:
-  inline explicit StandardFrame(StackFrameIterator* iterator);
+  explicit StandardFrame(StackFrameIterator* iterator)
+      : StackFrame(iterator) { }
 
   virtual void ComputeCallerState(State* state) const;
 
@@ -524,10 +513,9 @@
     return static_cast<JavaScriptFrame*>(frame);
   }
 
-  static void PrintTop(FILE* file, bool print_args, bool print_line_number);
-
  protected:
-  inline explicit JavaScriptFrame(StackFrameIterator* iterator);
+  explicit JavaScriptFrame(StackFrameIterator* iterator)
+      : StandardFrame(iterator) { }
 
   virtual Address GetCallerStackPointer() const;
 
@@ -564,7 +552,8 @@
   DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
 
  protected:
-  inline explicit OptimizedFrame(StackFrameIterator* iterator);
+  explicit OptimizedFrame(StackFrameIterator* iterator)
+      : JavaScriptFrame(iterator) { }
 
  private:
   friend class StackFrameIterator;
@@ -592,9 +581,12 @@
                      int index) const;
 
  protected:
-  inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
+  explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
+      : JavaScriptFrame(iterator) { }
 
-  virtual int GetNumberOfIncomingArguments() const;
+  virtual int GetNumberOfIncomingArguments() const {
+    return Smi::cast(GetExpression(0))->value();
+  }
 
   virtual Address GetCallerStackPointer() const;
 
@@ -619,7 +611,8 @@
   }
 
  protected:
-  inline explicit InternalFrame(StackFrameIterator* iterator);
+  explicit InternalFrame(StackFrameIterator* iterator)
+      : StandardFrame(iterator) { }
 
   virtual Address GetCallerStackPointer() const;
 
@@ -640,7 +633,8 @@
   }
 
  protected:
-  inline explicit ConstructFrame(StackFrameIterator* iterator);
+  explicit ConstructFrame(StackFrameIterator* iterator)
+      : InternalFrame(iterator) { }
 
  private:
   friend class StackFrameIterator;
@@ -716,26 +710,20 @@
 
   inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
 
-  inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
-
   // Skip frames until the frame with the given id is reached.
   explicit JavaScriptFrameIteratorTemp(StackFrame::Id id) { AdvanceToId(id); }
 
   inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
 
-  JavaScriptFrameIteratorTemp(Address fp,
-                              Address sp,
-                              Address low_bound,
-                              Address high_bound) :
+  JavaScriptFrameIteratorTemp(Address fp, Address sp,
+                              Address low_bound, Address high_bound) :
       iterator_(fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
 
   JavaScriptFrameIteratorTemp(Isolate* isolate,
-                              Address fp,
-                              Address sp,
-                              Address low_bound,
-                              Address high_bound) :
+                              Address fp, Address sp,
+                              Address low_bound, Address high_bound) :
       iterator_(isolate, fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index d963979..8073874 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -51,37 +51,7 @@
 }
 
 
-void BreakableStatementChecker::VisitVariableDeclaration(
-    VariableDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitFunctionDeclaration(
-    FunctionDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitModuleDeclaration(
-    ModuleDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitImportDeclaration(
-    ImportDeclaration* decl) {
-}
-
-void BreakableStatementChecker::VisitExportDeclaration(
-    ExportDeclaration* decl) {
-}
-
-
-void BreakableStatementChecker::VisitModuleLiteral(ModuleLiteral* module) {
-}
-
-void BreakableStatementChecker::VisitModuleVariable(ModuleVariable* module) {
-}
-
-void BreakableStatementChecker::VisitModulePath(ModulePath* module) {
-}
-
-void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
+void BreakableStatementChecker::VisitDeclaration(Declaration* decl) {
 }
 
 
@@ -274,6 +244,11 @@
 }
 
 
+void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
+  Visit(expr->expression());
+}
+
+
 void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
   Visit(expr->left());
   Visit(expr->right());
@@ -303,8 +278,8 @@
   masm.positions_recorder()->StartGDBJITLineInfoRecording();
 #endif
 
-  FullCodeGenerator cgen(&masm, info);
-  cgen.Generate();
+  FullCodeGenerator cgen(&masm);
+  cgen.Generate(info);
   if (cgen.HasStackOverflow()) {
     ASSERT(!isolate->has_pending_exception());
     return false;
@@ -313,26 +288,15 @@
 
   Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
-  code->set_optimizable(info->IsOptimizable() &&
-                        !info->function()->flags()->Contains(kDontOptimize));
-  code->set_self_optimization_header(cgen.has_self_optimization_header_);
+  code->set_optimizable(info->IsOptimizable());
   cgen.PopulateDeoptimizationData(code);
-  cgen.PopulateTypeFeedbackInfo(code);
-  cgen.PopulateTypeFeedbackCells(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
-  code->set_handler_table(*cgen.handler_table());
-#ifdef ENABLE_DEBUGGER_SUPPORT
   code->set_has_debug_break_slots(
       info->isolate()->debugger()->IsDebuggerActive());
-  code->set_compiled_optimizable(info->IsOptimizable());
-#endif  // ENABLE_DEBUGGER_SUPPORT
   code->set_allow_osr_at_loop_nesting_level(0);
   code->set_stack_check_table_offset(table_offset);
   CodeGenerator::PrintCode(code, info);
-  info->SetCode(code);  // May be an empty handle.
-  if (!code.is_null()) {
-    isolate->runtime_profiler()->NotifyCodeGenerated(code->instruction_size());
-  }
+  info->SetCode(code);  // may be an empty handle.
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit && !code.is_null()) {
     GDBJITLineInfo* lineinfo =
@@ -366,7 +330,8 @@
   ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
   if (!info_->HasDeoptimizationSupport()) return;
   int length = bailout_entries_.length();
-  Handle<DeoptimizationOutputData> data = isolate()->factory()->
+  Handle<DeoptimizationOutputData> data =
+      isolate()->factory()->
       NewDeoptimizationOutputData(length, TENURED);
   for (int i = 0; i < length; i++) {
     data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
@@ -376,30 +341,6 @@
 }
 
 
-void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
-  Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
-  info->set_ic_total_count(ic_total_count_);
-  ASSERT(!isolate()->heap()->InNewSpace(*info));
-  code->set_type_feedback_info(*info);
-}
-
-
-void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
-  if (type_feedback_cells_.is_empty()) return;
-  int length = type_feedback_cells_.length();
-  int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
-  Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
-      isolate()->factory()->NewFixedArray(array_size, TENURED));
-  for (int i = 0; i < length; i++) {
-    cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id));
-    cache->SetCell(i, *type_feedback_cells_[i].cell);
-  }
-  TypeFeedbackInfo::cast(code->type_feedback_info())->set_type_feedback_cells(
-      *cache);
-}
-
-
-
 void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
   PrepareForBailoutForId(node->id(), state);
 }
@@ -422,23 +363,20 @@
 }
 
 
-void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
+void FullCodeGenerator::PrepareForBailoutForId(int id, State state) {
   // There's no need to prepare this code for bailouts from already optimized
   // code or code that can't be optimized.
-  if (!info_->HasDeoptimizationSupport()) return;
+  if (!FLAG_deopt || !info_->HasDeoptimizationSupport()) return;
   unsigned pc_and_state =
       StateField::encode(state) | PcField::encode(masm_->pc_offset());
-  ASSERT(Smi::IsValid(pc_and_state));
   BailoutEntry entry = { id, pc_and_state };
 #ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    // Assert that we don't have multiple bailout entries for the same node.
-    for (int i = 0; i < bailout_entries_.length(); i++) {
-      if (bailout_entries_.at(i).id == entry.id) {
-        AstPrinter printer;
-        PrintF("%s", printer.PrintProgram(info_->function()));
-        UNREACHABLE();
-      }
+  // Assert that we don't have multiple bailout entries for the same node.
+  for (int i = 0; i < bailout_entries_.length(); i++) {
+    if (bailout_entries_.at(i).id == entry.id) {
+      AstPrinter printer;
+      PrintF("%s", printer.PrintProgram(info_->function()));
+      UNREACHABLE();
     }
   }
 #endif  // DEBUG
@@ -446,18 +384,10 @@
 }
 
 
-void FullCodeGenerator::RecordTypeFeedbackCell(
-    unsigned id, Handle<JSGlobalPropertyCell> cell) {
-  TypeFeedbackCellEntry entry = { id, cell };
-  type_feedback_cells_.Add(entry);
-}
-
-
-void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
+void FullCodeGenerator::RecordStackCheck(int ast_id) {
   // The pc offset does not need to be encoded and packed together with a
   // state.
-  ASSERT(masm_->pc_offset() > 0);
-  BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
+  BailoutEntry entry = { ast_id, masm_->pc_offset() };
   stack_checks_.Add(entry);
 }
 
@@ -482,24 +412,27 @@
 
 void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
   __ push(reg);
+  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Register reg) const {
   // For simplicity we always test the accumulator register.
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
 
 void FullCodeGenerator::EffectContext::PlugTOS() const {
   __ Drop(1);
+  codegen()->decrement_stack_height();
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
   __ pop(result_register());
+  codegen()->decrement_stack_height();
 }
 
 
@@ -510,7 +443,8 @@
 void FullCodeGenerator::TestContext::PlugTOS() const {
   // For simplicity we always test the accumulator register.
   __ pop(result_register());
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->decrement_stack_height();
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -571,33 +505,33 @@
 
 void FullCodeGenerator::VisitDeclarations(
     ZoneList<Declaration*>* declarations) {
-  int save_global_count = global_count_;
-  global_count_ = 0;
-
-  AstVisitor::VisitDeclarations(declarations);
+  int length = declarations->length();
+  int global_count = 0;
+  for (int i = 0; i < length; i++) {
+    Declaration* decl = declarations->at(i);
+    EmitDeclaration(decl->proxy(), decl->mode(), decl->fun(), &global_count);
+  }
 
   // Batch declare global functions and variables.
-  if (global_count_ > 0) {
+  if (global_count > 0) {
     Handle<FixedArray> array =
-       isolate()->factory()->NewFixedArray(2 * global_count_, TENURED);
-    int length = declarations->length();
+        isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
     for (int j = 0, i = 0; i < length; i++) {
       Declaration* decl = declarations->at(i);
       Variable* var = decl->proxy()->var();
 
       if (var->IsUnallocated()) {
         array->set(j++, *(var->name()));
-        FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
-        if (fun_decl == NULL) {
-          if (var->binding_needs_init()) {
-            // In case this binding needs initialization use the hole.
+        if (decl->fun() == NULL) {
+          if (var->mode() == Variable::CONST) {
+            // In case this is const property use the hole.
             array->set_the_hole(j++);
           } else {
             array->set_undefined(j++);
           }
         } else {
           Handle<SharedFunctionInfo> function =
-              Compiler::BuildFunctionInfo(fun_decl->fun(), script());
+              Compiler::BuildFunctionInfo(decl->fun(), script());
           // Check for stack-overflow exception.
           if (function.is_null()) {
             SetStackOverflow();
@@ -611,61 +545,15 @@
     // declaration the global functions and variables.
     DeclareGlobals(array);
   }
-
-  global_count_ = save_global_count;
-}
-
-
-void FullCodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
-}
-
-
-void FullCodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), decl->fun());
-}
-
-
-void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
-}
-
-
-void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
-  EmitDeclaration(decl->proxy(), decl->mode(), NULL);
-}
-
-
-void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
-  // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
-  // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::VisitModuleVariable(ModuleVariable* module) {
-  // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::VisitModulePath(ModulePath* module) {
-  // TODO(rossberg)
-}
-
-
-void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) {
-  // TODO(rossberg)
 }
 
 
 int FullCodeGenerator::DeclareGlobalsFlags() {
-  ASSERT(DeclareGlobalsLanguageMode::is_valid(language_mode()));
-  return DeclareGlobalsEvalFlag::encode(is_eval()) |
-      DeclareGlobalsNativeFlag::encode(is_native()) |
-      DeclareGlobalsLanguageMode::encode(language_mode());
+  int flags = 0;
+  if (is_eval()) flags |= kDeclareGlobalsEvalFlag;
+  if (is_strict_mode()) flags |= kDeclareGlobalsStrictModeFlag;
+  if (is_native()) flags |= kDeclareGlobalsNativeFlag;
+  return flags;
 }
 
 
@@ -771,13 +659,14 @@
 }
 
 
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
-  const Runtime::Function* function = expr->function();
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
+  ZoneList<Expression*>* args = node->arguments();
+  const Runtime::Function* function = node->function();
   ASSERT(function != NULL);
   ASSERT(function->intrinsic_type == Runtime::INLINE);
   InlineFunctionGenerator generator =
       FindInlineFunctionGenerator(function->function_id);
-  ((*this).*(generator))(expr);
+  ((*this).*(generator))(args);
 }
 
 
@@ -794,25 +683,11 @@
 }
 
 
-void FullCodeGenerator::VisitInDuplicateContext(Expression* expr) {
-  if (context()->IsEffect()) {
-    VisitForEffect(expr);
-  } else if (context()->IsAccumulatorValue()) {
-    VisitForAccumulatorValue(expr);
-  } else if (context()->IsStackValue()) {
-    VisitForStackValue(expr);
-  } else if (context()->IsTest()) {
-    const TestContext* test = TestContext::cast(context());
-    VisitForControl(expr, test->true_label(), test->false_label(),
-                    test->fall_through());
-  }
-}
-
-
 void FullCodeGenerator::VisitComma(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ Comma");
   VisitForEffect(expr->left());
-  VisitInDuplicateContext(expr->right());
+  if (context()->IsTest()) ForwardBailoutToChild(expr);
+  VisitInCurrentContext(expr->right());
 }
 
 
@@ -834,6 +709,7 @@
     }
     PrepareForBailoutForId(right_id, NO_REGISTERS);
     __ bind(&eval_right);
+    ForwardBailoutToChild(expr);
 
   } else if (context()->IsAccumulatorValue()) {
     VisitForAccumulatorValue(left);
@@ -841,6 +717,7 @@
     // case we need it.
     __ push(result_register());
     Label discard, restore;
+    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &restore, &restore);
     } else {
@@ -859,6 +736,7 @@
     // case we need it.
     __ push(result_register());
     Label discard;
+    PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
     if (is_logical_and) {
       DoTest(left, &discard, &done, &discard);
     } else {
@@ -880,7 +758,7 @@
     __ bind(&eval_right);
   }
 
-  VisitInDuplicateContext(right);
+  VisitInCurrentContext(right);
   __ bind(&done);
 }
 
@@ -907,6 +785,34 @@
 }
 
 
+void FullCodeGenerator::ForwardBailoutToChild(Expression* expr) {
+  if (!info_->HasDeoptimizationSupport()) return;
+  ASSERT(context()->IsTest());
+  ASSERT(expr == forward_bailout_stack_->expr());
+  forward_bailout_pending_ = forward_bailout_stack_;
+}
+
+
+void FullCodeGenerator::VisitInCurrentContext(Expression* expr) {
+  if (context()->IsTest()) {
+    ForwardBailoutStack stack(expr, forward_bailout_pending_);
+    ForwardBailoutStack* saved = forward_bailout_stack_;
+    forward_bailout_pending_ = NULL;
+    forward_bailout_stack_ = &stack;
+    Visit(expr);
+    forward_bailout_stack_ = saved;
+  } else {
+    ASSERT(forward_bailout_pending_ == NULL);
+    Visit(expr);
+    State state = context()->IsAccumulatorValue() ? TOS_REG : NO_REGISTERS;
+    PrepareForBailout(expr, state);
+    // Forwarding bailouts to children is a one shot operation. It should have
+    // been processed at this point.
+    ASSERT(forward_bailout_pending_ == NULL);
+  }
+}
+
+
 void FullCodeGenerator::VisitBlock(Block* stmt) {
   Comment cmnt(masm_, "[ Block");
   NestedBlock nested_block(this, stmt);
@@ -917,18 +823,9 @@
   if (stmt->block_scope() != NULL) {
     { Comment cmnt(masm_, "[ Extend block context");
       scope_ = stmt->block_scope();
-      Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
-      int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
-      __ Push(scope_info);
+      __ Push(scope_->GetSerializedScopeInfo());
       PushFunctionArgumentForContextAllocation();
-      if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
-        FastNewBlockContextStub stub(heap_slots);
-        __ CallStub(&stub);
-      } else {
-        __ CallRuntime(Runtime::kPushBlockContext, 2);
-      }
-
-      // Replace the context stored in the frame.
+      __ CallRuntime(Runtime::kPushBlockContext, 2);
       StoreToFrameField(StandardFrameConstants::kContextOffset,
                         context_register());
     }
@@ -1075,6 +972,7 @@
   VisitForStackValue(stmt->expression());
   PushFunctionArgumentForContextAllocation();
   __ CallRuntime(Runtime::kPushWithContext, 2);
+  decrement_stack_height();
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 
   { WithOrCatch body(this);
@@ -1112,7 +1010,7 @@
   // Check stack before looping.
   PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
   __ bind(&stack_check);
-  EmitStackCheck(stmt, &body);
+  EmitStackCheck(stmt);
   __ jmp(&body);
 
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1141,7 +1039,7 @@
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  EmitStackCheck(stmt, &body);
+  EmitStackCheck(stmt);
 
   __ bind(&test);
   VisitForControl(stmt->cond(),
@@ -1160,10 +1058,6 @@
   Label test, body;
 
   Iteration loop_statement(this, stmt);
-
-  // Set statement position for a break slot before entering the for-body.
-  SetStatementPosition(stmt);
-
   if (stmt->init() != NULL) {
     Visit(stmt->init());
   }
@@ -1178,6 +1072,7 @@
 
   PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
   __ bind(loop_statement.continue_label());
+  SetStatementPosition(stmt);
   if (stmt->next() != NULL) {
     Visit(stmt->next());
   }
@@ -1187,7 +1082,7 @@
   SetStatementPosition(stmt);
 
   // Check stack before looping.
-  EmitStackCheck(stmt, &body);
+  EmitStackCheck(stmt);
 
   __ bind(&test);
   if (stmt->cond() != NULL) {
@@ -1208,17 +1103,20 @@
 void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   Comment cmnt(masm_, "[ TryCatchStatement");
   SetStatementPosition(stmt);
-  // The try block adds a handler to the exception handler chain before
-  // entering, and removes it again when exiting normally.  If an exception
-  // is thrown during execution of the try block, the handler is consumed
-  // and control is passed to the catch block with the exception in the
-  // result register.
+  // The try block adds a handler to the exception handler chain
+  // before entering, and removes it again when exiting normally.
+  // If an exception is thrown during execution of the try block,
+  // control is passed to the handler, which also consumes the handler.
+  // At this point, the exception is in a register, and store it in
+  // the temporary local variable (prints as ".catch-var") before
+  // executing the catch block. The catch block has been rewritten
+  // to introduce a new scope to bind the catch variable and to remove
+  // that scope again afterwards.
 
-  Label try_entry, handler_entry, exit;
-  __ jmp(&try_entry);
-  __ bind(&handler_entry);
-  handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
-  // Exception handler code, the exception is in the result register.
+  Label try_handler_setup, done;
+  __ Call(&try_handler_setup);
+  // Try handler code, exception in result register.
+
   // Extend the context before executing the catch block.
   { Comment cmnt(masm_, "[ Extend catch context");
     __ Push(stmt->variable()->name());
@@ -1232,23 +1130,27 @@
   Scope* saved_scope = scope();
   scope_ = stmt->scope();
   ASSERT(scope_->declarations()->is_empty());
-  { WithOrCatch catch_body(this);
+  { WithOrCatch body(this);
     Visit(stmt->catch_block());
   }
   // Restore the context.
   LoadContextField(context_register(), Context::PREVIOUS_INDEX);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
   scope_ = saved_scope;
-  __ jmp(&exit);
+  __ jmp(&done);
 
   // Try block code. Sets up the exception handler chain.
-  __ bind(&try_entry);
-  __ PushTryHandler(StackHandler::CATCH, stmt->index());
-  { TryCatch try_body(this);
+  __ bind(&try_handler_setup);
+  {
+    const int delta = StackHandlerConstants::kSize / kPointerSize;
+    TryCatch try_block(this);
+    __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+    increment_stack_height(delta);
     Visit(stmt->try_block());
+    __ PopTryHandler();
+    decrement_stack_height(delta);
   }
-  __ PopTryHandler();
-  __ bind(&exit);
+  __ bind(&done);
 }
 
 
@@ -1260,12 +1162,12 @@
   //
   // The try-finally construct can enter the finally block in three ways:
   // 1. By exiting the try-block normally. This removes the try-handler and
-  //    calls the finally block code before continuing.
+  //      calls the finally block code before continuing.
   // 2. By exiting the try-block with a function-local control flow transfer
   //    (break/continue/return). The site of the, e.g., break removes the
   //    try handler and calls the finally block code before continuing
   //    its outward control transfer.
-  // 3. By exiting the try-block with a thrown exception.
+  // 3. by exiting the try-block with a thrown exception.
   //    This can happen in nested function calls. It traverses the try-handler
   //    chain and consumes the try-handler entry before jumping to the
   //    handler code. The handler code then calls the finally-block before
@@ -1276,39 +1178,49 @@
   // exception) in the result register (rax/eax/r0), both of which must
   // be preserved. The return address isn't GC-safe, so it should be
   // cooked before GC.
-  Label try_entry, handler_entry, finally_entry;
+  Label finally_entry;
+  Label try_handler_setup;
+  const int original_stack_height = stack_height();
 
-  // Jump to try-handler setup and try-block code.
-  __ jmp(&try_entry);
-  __ bind(&handler_entry);
-  handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
-  // Exception handler code.  This code is only executed when an exception
-  // is thrown.  The exception is in the result register, and must be
-  // preserved by the finally block.  Call the finally block and then
-  // rethrow the exception if it returns.
-  __ Call(&finally_entry);
-  __ push(result_register());
-  __ CallRuntime(Runtime::kReThrow, 1);
+  // Setup the try-handler chain. Use a call to
+  // Jump to try-handler setup and try-block code. Use call to put try-handler
+  // address on stack.
+  __ Call(&try_handler_setup);
+  // Try handler code. Return address of call is pushed on handler stack.
+  {
+    // This code is only executed during stack-handler traversal when an
+    // exception is thrown. The exception is in the result register, which
+    // is retained by the finally block.
+    // Call the finally block and then rethrow the exception if it returns.
+    __ Call(&finally_entry);
+    __ push(result_register());
+    __ CallRuntime(Runtime::kReThrow, 1);
+  }
 
-  // Finally block implementation.
   __ bind(&finally_entry);
-  EnterFinallyBlock();
-  { Finally finally_body(this);
+  {
+    // Finally block implementation.
+    Finally finally_block(this);
+    EnterFinallyBlock();
+    set_stack_height(original_stack_height + Finally::kElementCount);
     Visit(stmt->finally_block());
+    ExitFinallyBlock();  // Return to the calling code.
   }
-  ExitFinallyBlock();  // Return to the calling code.
 
-  // Set up try handler.
-  __ bind(&try_entry);
-  __ PushTryHandler(StackHandler::FINALLY, stmt->index());
-  { TryFinally try_body(this, &finally_entry);
+  __ bind(&try_handler_setup);
+  {
+    // Setup try handler (stack pointer registers).
+    const int delta = StackHandlerConstants::kSize / kPointerSize;
+    TryFinally try_block(this, &finally_entry);
+    __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+    set_stack_height(original_stack_height + delta);
     Visit(stmt->try_block());
+    __ PopTryHandler();
+    set_stack_height(original_stack_height);
   }
-  __ PopTryHandler();
   // Execute the finally block on the way out.  Clobber the unpredictable
-  // value in the result register with one that's safe for GC because the
-  // finally block will unconditionally preserve the result register on the
-  // stack.
+  // value in the accumulator with one that's safe for GC.  The finally
+  // block will unconditionally preserve the accumulator on the stack.
   ClearAccumulator();
   __ Call(&finally_entry);
 }
@@ -1334,6 +1246,7 @@
   __ bind(&true_case);
   SetExpressionPosition(expr->then_expression(),
                         expr->then_expression_position());
+  int start_stack_height = stack_height();
   if (context()->IsTest()) {
     const TestContext* for_test = TestContext::cast(context());
     VisitForControl(expr->then_expression(),
@@ -1341,15 +1254,17 @@
                     for_test->false_label(),
                     NULL);
   } else {
-    VisitInDuplicateContext(expr->then_expression());
+    VisitInCurrentContext(expr->then_expression());
     __ jmp(&done);
   }
 
   PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
   __ bind(&false_case);
+  set_stack_height(start_stack_height);
+  if (context()->IsTest()) ForwardBailoutToChild(expr);
   SetExpressionPosition(expr->else_expression(),
                         expr->else_expression_position());
-  VisitInDuplicateContext(expr->else_expression());
+  VisitInCurrentContext(expr->else_expression());
   // If control flow falls through Visit, merge it with true case here.
   if (!context()->IsTest()) {
     __ bind(&done);
@@ -1386,8 +1301,11 @@
 
 void FullCodeGenerator::VisitThrow(Throw* expr) {
   Comment cmnt(masm_, "[ Throw");
+  // Throw has no effect on the stack height or the current expression context.
+  // Usually the expression context is null, because throw is a statement.
   VisitForStackValue(expr->exception());
   __ CallRuntime(Runtime::kThrow, 1);
+  decrement_stack_height();
   // Never returns here.
 }
 
@@ -1403,21 +1321,19 @@
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
-  Expression* sub_expr;
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
+                                          Label* if_true,
+                                          Label* if_false,
+                                          Label* fall_through) {
+  Expression *expr;
   Handle<String> check;
-  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
-    EmitLiteralCompareTypeof(expr, sub_expr, check);
+  if (compare->IsLiteralCompareTypeof(&expr, &check)) {
+    EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
     return true;
   }
 
-  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
-    EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
-    return true;
-  }
-
-  if (expr->IsLiteralCompareNull(&sub_expr)) {
-    EmitLiteralCompareNil(expr, sub_expr, kNullValue);
+  if (compare->IsLiteralCompareUndefined(&expr)) {
+    EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
     return true;
   }
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 58d5986..803c618 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -77,32 +77,27 @@
     TOS_REG
   };
 
-  FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
+  explicit FullCodeGenerator(MacroAssembler* masm)
       : masm_(masm),
-        info_(info),
-        scope_(info->scope()),
+        info_(NULL),
+        scope_(NULL),
         nesting_stack_(NULL),
         loop_depth_(0),
-        global_count_(0),
+        stack_height_(0),
         context_(NULL),
-        bailout_entries_(info->HasDeoptimizationSupport()
-                         ? info->function()->ast_node_count() : 0),
+        bailout_entries_(0),
         stack_checks_(2),  // There's always at least one.
-        type_feedback_cells_(info->HasDeoptimizationSupport()
-                             ? info->function()->ast_node_count() : 0),
-        ic_total_count_(0),
-        has_self_optimization_header_(false) { }
+        forward_bailout_stack_(NULL),
+        forward_bailout_pending_(NULL) {
+  }
 
   static bool MakeCode(CompilationInfo* info);
 
-  // Returns the platform-specific size in bytes of the self-optimization
-  // header.
-  static int self_optimization_header_size();
+  void Generate(CompilationInfo* info);
+  void PopulateDeoptimizationData(Handle<Code> code);
 
-  // Encode state and pc-offset as a BitField<type, start, size>.
-  // Only use 30 bits because we encode the result as a smi.
-  class StateField : public BitField<State, 0, 1> { };
-  class PcField    : public BitField<unsigned, 1, 30-1> { };
+  class StateField : public BitField<State, 0, 8> { };
+  class PcField    : public BitField<unsigned, 8, 32-8> { };
 
   static const char* State2String(State state) {
     switch (state) {
@@ -148,13 +143,11 @@
       return previous_;
     }
 
-   protected:
+ protected:
     MacroAssembler* masm() { return codegen_->masm(); }
 
     FullCodeGenerator* codegen_;
     NestedStatement* previous_;
-
-   private:
     DISALLOW_COPY_AND_ASSIGN(NestedStatement);
   };
 
@@ -283,8 +276,27 @@
     }
   };
 
+  // The forward bailout stack keeps track of the expressions that can
+  // bail out to just before the control flow is split in a child
+  // node. The stack elements are linked together through the parent
+  // link when visiting expressions in test contexts after requesting
+  // bailout in child forwarding.
+  class ForwardBailoutStack BASE_EMBEDDED {
+   public:
+    ForwardBailoutStack(Expression* expr, ForwardBailoutStack* parent)
+        : expr_(expr), parent_(parent) { }
+
+    Expression* expr() const { return expr_; }
+    ForwardBailoutStack* parent() const { return parent_; }
+
+   private:
+    Expression* const expr_;
+    ForwardBailoutStack* const parent_;
+  };
+
   // Type of a member function that generates inline code for a native function.
-  typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
+  typedef void (FullCodeGenerator::*InlineFunctionGenerator)
+      (ZoneList<Expression*>*);
 
   static const InlineFunctionGenerator kInlineFunctionGenerators[];
 
@@ -345,22 +357,23 @@
   // need the write barrier if location is CONTEXT.
   MemOperand VarOperand(Variable* var, Register scratch);
 
+  // Forward the bailout responsibility for the given expression to
+  // the next child visited (which must be in a test context).
+  void ForwardBailoutToChild(Expression* expr);
+
   void VisitForEffect(Expression* expr) {
     EffectContext context(this);
-    Visit(expr);
-    PrepareForBailout(expr, NO_REGISTERS);
+    VisitInCurrentContext(expr);
   }
 
   void VisitForAccumulatorValue(Expression* expr) {
     AccumulatorValueContext context(this);
-    Visit(expr);
-    PrepareForBailout(expr, TOS_REG);
+    VisitInCurrentContext(expr);
   }
 
   void VisitForStackValue(Expression* expr) {
     StackValueContext context(this);
-    Visit(expr);
-    PrepareForBailout(expr, NO_REGISTERS);
+    VisitInCurrentContext(expr);
   }
 
   void VisitForControl(Expression* expr,
@@ -368,14 +381,9 @@
                        Label* if_false,
                        Label* fall_through) {
     TestContext context(this, expr, if_true, if_false, fall_through);
-    Visit(expr);
-    // For test contexts, we prepare for bailout before branching, not at
-    // the end of the entire expression.  This happens as part of visiting
-    // the expression.
+    VisitInCurrentContext(expr);
   }
 
-  void VisitInDuplicateContext(Expression* expr);
-
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
   int DeclareGlobalsFlags();
@@ -383,26 +391,29 @@
   // Try to perform a comparison as a fast inlined literal compare if
   // the operands allow it.  Returns true if the compare operations
   // has been matched and all code generated; false otherwise.
-  bool TryLiteralCompare(CompareOperation* compare);
+  bool TryLiteralCompare(CompareOperation* compare,
+                         Label* if_true,
+                         Label* if_false,
+                         Label* fall_through);
 
   // Platform-specific code for comparing the type of a value with
   // a given literal string.
   void EmitLiteralCompareTypeof(Expression* expr,
-                                Expression* sub_expr,
-                                Handle<String> check);
+                                Handle<String> check,
+                                Label* if_true,
+                                Label* if_false,
+                                Label* fall_through);
 
-  // Platform-specific code for equality comparison with a nil-like value.
-  void EmitLiteralCompareNil(CompareOperation* expr,
-                             Expression* sub_expr,
-                             NilValue nil);
+  // Platform-specific code for strict equality comparison with
+  // the undefined value.
+  void EmitLiteralCompareUndefined(Expression* expr,
+                                   Label* if_true,
+                                   Label* if_false,
+                                   Label* fall_through);
 
   // Bailout support.
   void PrepareForBailout(Expression* node, State state);
-  void PrepareForBailoutForId(unsigned id, State state);
-
-  // Cache cell support.  This associates AST ids with global property cells
-  // that will be cleared during GC and collected by the type-feedback oracle.
-  void RecordTypeFeedbackCell(unsigned id, Handle<JSGlobalPropertyCell> cell);
+  void PrepareForBailoutForId(int id, State state);
 
   // Record a call's return site offset, used to rebuild the frame if the
   // called function was inlined at the site.
@@ -413,33 +424,27 @@
   // canonical JS true value so we will insert a (dead) test against true at
   // the actual bailout target from the optimized code. If not
   // should_normalize, the true and false labels are ignored.
-  void PrepareForBailoutBeforeSplit(Expression* expr,
+  void PrepareForBailoutBeforeSplit(State state,
                                     bool should_normalize,
                                     Label* if_true,
                                     Label* if_false);
 
   // Platform-specific code for a variable, constant, or function
   // declaration.  Functions have an initial value.
-  // Increments global_count_ for unallocated variables.
   void EmitDeclaration(VariableProxy* proxy,
-                       VariableMode mode,
-                       FunctionLiteral* function);
+                       Variable::Mode mode,
+                       FunctionLiteral* function,
+                       int* global_count);
 
   // Platform-specific code for checking the stack limit at the back edge of
   // a loop.
-  // This is meant to be called at loop back edges, |back_edge_target| is
-  // the jump target of the back edge and is used to approximate the amount
-  // of code inside the loop.
-  void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
+  void EmitStackCheck(IterationStatement* stmt);
   // Record the OSR AST id corresponding to a stack check in the code.
-  void RecordStackCheck(unsigned osr_ast_id);
+  void RecordStackCheck(int osr_ast_id);
   // Emit a table of stack check ids and pcs into the code stream.  Return
   // the offset of the start of the table.
   unsigned EmitStackCheckTable();
 
-  void EmitProfilingCounterDecrement(int delta);
-  void EmitProfilingCounterReset();
-
   // Platform-specific return sequence
   void EmitReturnSequence();
 
@@ -454,7 +459,7 @@
   void EmitInlineRuntimeCall(CallRuntime* expr);
 
 #define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
-  void Emit##name(CallRuntime* expr);
+  void Emit##name(ZoneList<Expression*>* arguments);
   INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
   INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
 #undef EMIT_INLINE_RUNTIME_CALL
@@ -470,10 +475,13 @@
                                  Label* done);
   void EmitVariableLoad(VariableProxy* proxy);
 
-  void EmitAccessor(Expression* expression);
+  enum ResolveEvalFlag {
+    SKIP_CONTEXT_LOOKUP,
+    PERFORM_CONTEXT_LOOKUP
+  };
 
   // Expects the arguments and the function already pushed.
-  void EmitResolvePossiblyDirectEval(int arg_count);
+  void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
 
   // Platform-specific support for allocating a new closure based on
   // the given function info.
@@ -505,7 +513,7 @@
 
   // Assign to the given expression as if via '='. The right-hand-side value
   // is expected in the accumulator.
-  void EmitAssignment(Expression* expr);
+  void EmitAssignment(Expression* expr, int bailout_ast_id);
 
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
@@ -521,10 +529,6 @@
   // accumulator.
   void EmitKeyedPropertyAssignment(Assignment* expr);
 
-  void CallIC(Handle<Code> code,
-              RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
-              unsigned ast_id = kNoASTId);
-
   void SetFunctionPosition(FunctionLiteral* fun);
   void SetReturnPosition(FunctionLiteral* fun);
   void SetStatementPosition(Statement* stmt);
@@ -544,6 +548,35 @@
     loop_depth_--;
   }
 
+#if defined(V8_TARGET_ARCH_IA32)
+  int stack_height() { return stack_height_; }
+  void set_stack_height(int depth) { stack_height_ = depth; }
+  void increment_stack_height() { stack_height_++; }
+  void increment_stack_height(int delta) { stack_height_ += delta; }
+  void decrement_stack_height() {
+    if (FLAG_verify_stack_height) {
+      ASSERT(stack_height_ > 0);
+    }
+    stack_height_--;
+  }
+  void decrement_stack_height(int delta) {
+    stack_height_-= delta;
+    if (FLAG_verify_stack_height) {
+      ASSERT(stack_height_ >= 0);
+    }
+  }
+  // Call this function only if FLAG_verify_stack_height is true.
+  void verify_stack_height();  // Generates a runtime check of esp - ebp.
+#else
+  int stack_height() { return 0; }
+  void set_stack_height(int depth) {}
+  void increment_stack_height() {}
+  void increment_stack_height(int delta) {}
+  void decrement_stack_height() {}
+  void decrement_stack_height(int delta) {}
+  void verify_stack_height() {}
+#endif  // V8_TARGET_ARCH_IA32
+
   MacroAssembler* masm() { return masm_; }
 
   class ExpressionContext;
@@ -553,11 +586,9 @@
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
   bool is_native() { return info_->is_native(); }
-  bool is_classic_mode() {
-    return language_mode() == CLASSIC_MODE;
-  }
-  LanguageMode language_mode() {
-    return function()->language_mode();
+  bool is_strict_mode() { return function()->strict_mode(); }
+  StrictModeFlag strict_mode_flag() {
+    return is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
   FunctionLiteral* function() { return info_->function(); }
   Scope* scope() { return scope_; }
@@ -587,26 +618,15 @@
   void VisitComma(BinaryOperation* expr);
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
+  void VisitInCurrentContext(Expression* expr);
 
   void VisitForTypeofValue(Expression* expr);
 
-  void Generate();
-  void PopulateDeoptimizationData(Handle<Code> code);
-  void PopulateTypeFeedbackInfo(Handle<Code> code);
-  void PopulateTypeFeedbackCells(Handle<Code> code);
-
-  Handle<FixedArray> handler_table() { return handler_table_; }
-
   struct BailoutEntry {
     unsigned id;
     unsigned pc_and_state;
   };
 
-  struct TypeFeedbackCellEntry {
-    unsigned ast_id;
-    Handle<JSGlobalPropertyCell> cell;
-  };
-
 
   class ExpressionContext BASE_EMBEDDED {
    public:
@@ -617,6 +637,10 @@
 
     virtual ~ExpressionContext() {
       codegen_->set_new_context(old_);
+      if (FLAG_verify_stack_height) {
+        ASSERT_EQ(expected_stack_height_, codegen()->stack_height());
+        codegen()->verify_stack_height();
+      }
     }
 
     Isolate* isolate() const { return codegen_->isolate(); }
@@ -654,8 +678,8 @@
                              Label** if_false,
                              Label** fall_through) const = 0;
 
-    // Returns true if we are evaluating only for side effects (i.e. if the
-    // result will be discarded).
+    // Returns true if we are evaluating only for side effects (ie if the result
+    // will be discarded).
     virtual bool IsEffect() const { return false; }
 
     // Returns true if we are evaluating for the value (in accu/on stack).
@@ -670,6 +694,7 @@
     FullCodeGenerator* codegen() const { return codegen_; }
     MacroAssembler* masm() const { return masm_; }
     MacroAssembler* masm_;
+    int expected_stack_height_;  // The expected stack height esp - ebp on exit.
 
    private:
     const ExpressionContext* old_;
@@ -679,7 +704,9 @@
   class AccumulatorValueContext : public ExpressionContext {
    public:
     explicit AccumulatorValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) { }
+        : ExpressionContext(codegen) {
+      expected_stack_height_ = codegen->stack_height();
+    }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -700,7 +727,9 @@
   class StackValueContext : public ExpressionContext {
    public:
     explicit StackValueContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) { }
+        : ExpressionContext(codegen) {
+      expected_stack_height_ = codegen->stack_height() + 1;
+    }
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -729,7 +758,9 @@
           condition_(condition),
           true_label_(true_label),
           false_label_(false_label),
-          fall_through_(fall_through) { }
+          fall_through_(fall_through) {
+      expected_stack_height_ = codegen->stack_height();
+    }
 
     static const TestContext* cast(const ExpressionContext* context) {
       ASSERT(context->IsTest());
@@ -766,7 +797,10 @@
   class EffectContext : public ExpressionContext {
    public:
     explicit EffectContext(FullCodeGenerator* codegen)
-        : ExpressionContext(codegen) { }
+        : ExpressionContext(codegen) {
+      expected_stack_height_ = codegen->stack_height();
+    }
+
 
     virtual void Plug(bool flag) const;
     virtual void Plug(Register reg) const;
@@ -790,15 +824,12 @@
   Label return_label_;
   NestedStatement* nesting_stack_;
   int loop_depth_;
-  int global_count_;
+  int stack_height_;
   const ExpressionContext* context_;
   ZoneList<BailoutEntry> bailout_entries_;
   ZoneList<BailoutEntry> stack_checks_;
-  ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
-  int ic_total_count_;
-  bool has_self_optimization_header_;
-  Handle<FixedArray> handler_table_;
-  Handle<JSGlobalPropertyCell> profiling_counter_;
+  ForwardBailoutStack* forward_bailout_stack_;
+  ForwardBailoutStack* forward_bailout_pending_;
 
   friend class NestedStatement;
 
@@ -806,28 +837,6 @@
 };
 
 
-// A map from property names to getter/setter pairs allocated in the zone.
-class AccessorTable: public TemplateHashMap<Literal,
-                                            ObjectLiteral::Accessors,
-                                            ZoneListAllocationPolicy> {
- public:
-  explicit AccessorTable(Zone* zone) :
-      TemplateHashMap<Literal,
-                      ObjectLiteral::Accessors,
-                      ZoneListAllocationPolicy>(Literal::Match),
-      zone_(zone) { }
-
-  Iterator lookup(Literal* literal) {
-    Iterator it = find(literal, true);
-    if (it->second == NULL) it->second = new(zone_) ObjectLiteral::Accessors();
-    return it;
-  }
-
- private:
-  Zone* zone_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_FULL_CODEGEN_H_
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index d3cd447..68cb053 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -33,7 +33,6 @@
 #include "compiler.h"
 #include "global-handles.h"
 #include "messages.h"
-#include "platform.h"
 #include "natives.h"
 #include "scopeinfo.h"
 
@@ -1116,13 +1115,13 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.LocalCount();
+      int locals = scope_info.NumberOfLocals();
       int current_abbreviation = 4;
 
       for (int param = 0; param < params; ++param) {
         w->WriteULEB128(current_abbreviation++);
         w->WriteString(
-            *scope_info.ParameterName(param)->ToCString(DISALLOW_NULLS));
+            *scope_info.parameter_name(param)->ToCString(DISALLOW_NULLS));
         w->Write<uint32_t>(ty_offset);
         Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
         uintptr_t block_start = w->position();
@@ -1313,7 +1312,7 @@
       int context_slots = scope_info.number_of_context_slots();
       // The real slot ID is internal_slots + context_slot_id.
       int internal_slots = Context::MIN_CONTEXT_SLOTS;
-      int locals = scope_info.LocalCount();
+      int locals = scope_info.NumberOfLocals();
       int total_children =
           params + slots + context_slots + internal_slots + locals + 2;
 
@@ -1557,23 +1556,23 @@
 
 class UnwindInfoSection : public DebugSection {
  public:
-  explicit UnwindInfoSection(CodeDescription* desc);
-  virtual bool WriteBody(Writer* w);
+  explicit UnwindInfoSection(CodeDescription *desc);
+  virtual bool WriteBody(Writer *w);
 
-  int WriteCIE(Writer* w);
-  void WriteFDE(Writer* w, int);
+  int WriteCIE(Writer *w);
+  void WriteFDE(Writer *w, int);
 
-  void WriteFDEStateOnEntry(Writer* w);
-  void WriteFDEStateAfterRBPPush(Writer* w);
-  void WriteFDEStateAfterRBPSet(Writer* w);
-  void WriteFDEStateAfterRBPPop(Writer* w);
+  void WriteFDEStateOnEntry(Writer *w);
+  void WriteFDEStateAfterRBPPush(Writer *w);
+  void WriteFDEStateAfterRBPSet(Writer *w);
+  void WriteFDEStateAfterRBPPop(Writer *w);
 
-  void WriteLength(Writer* w,
+  void WriteLength(Writer *w,
                    Writer::Slot<uint32_t>* length_slot,
                    int initial_position);
 
  private:
-  CodeDescription* desc_;
+  CodeDescription *desc_;
 
   // DWARF3 Specification, Table 7.23
   enum CFIInstructions {
@@ -1624,7 +1623,7 @@
 };
 
 
-void UnwindInfoSection::WriteLength(Writer* w,
+void UnwindInfoSection::WriteLength(Writer *w,
                                     Writer::Slot<uint32_t>* length_slot,
                                     int initial_position) {
   uint32_t align = (w->position() - initial_position) % kPointerSize;
@@ -1640,7 +1639,7 @@
 }
 
 
-UnwindInfoSection::UnwindInfoSection(CodeDescription* desc)
+UnwindInfoSection::UnwindInfoSection(CodeDescription *desc)
 #ifdef __ELF
     : ELFSection(".eh_frame", TYPE_X86_64_UNWIND, 1),
 #else
@@ -1649,7 +1648,7 @@
 #endif
       desc_(desc) { }
 
-int UnwindInfoSection::WriteCIE(Writer* w) {
+int UnwindInfoSection::WriteCIE(Writer *w) {
   Writer::Slot<uint32_t> cie_length_slot = w->CreateSlotHere<uint32_t>();
   uint32_t cie_position = w->position();
 
@@ -1669,7 +1668,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDE(Writer* w, int cie_position) {
+void UnwindInfoSection::WriteFDE(Writer *w, int cie_position) {
   // The only FDE for this function. The CFA is the current RBP.
   Writer::Slot<uint32_t> fde_length_slot = w->CreateSlotHere<uint32_t>();
   int fde_position = w->position();
@@ -1687,7 +1686,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateOnEntry(Writer* w) {
+void UnwindInfoSection::WriteFDEStateOnEntry(Writer *w) {
   // The first state, just after the control has been transferred to the the
   // function.
 
@@ -1714,7 +1713,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer* w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPPush(Writer *w) {
   // The second state, just after RBP has been pushed.
 
   // RBP / CFA for this function is now the current RSP, so just set the
@@ -1735,7 +1734,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer* w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPSet(Writer *w) {
   // The third state, after the RBP has been set.
 
   // The CFA can now directly be set to RBP.
@@ -1750,7 +1749,7 @@
 }
 
 
-void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer* w) {
+void UnwindInfoSection::WriteFDEStateAfterRBPPop(Writer *w) {
   // The fourth (final) state. The RBP has been popped (just before issuing a
   // return).
 
@@ -1770,7 +1769,7 @@
 }
 
 
-bool UnwindInfoSection::WriteBody(Writer* w) {
+bool UnwindInfoSection::WriteBody(Writer *w) {
   uint32_t cie_position = WriteCIE(w);
   WriteFDE(w, cie_position);
   return true;
@@ -1811,8 +1810,8 @@
   struct JITDescriptor {
     uint32_t version_;
     uint32_t action_flag_;
-    JITCodeEntry* relevant_entry_;
-    JITCodeEntry* first_entry_;
+    JITCodeEntry *relevant_entry_;
+    JITCodeEntry *first_entry_;
   };
 
   // GDB will place breakpoint into this function.
@@ -1999,7 +1998,7 @@
   }
 }
 
-static void AddUnwindInfo(CodeDescription* desc) {
+static void AddUnwindInfo(CodeDescription *desc) {
 #ifdef V8_TARGET_ARCH_X64
   if (desc->tag() == GDBJITInterface::FUNCTION) {
     // To avoid propagating unwinding information through
@@ -2036,7 +2035,7 @@
 }
 
 
-static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
+Mutex* GDBJITInterface::mutex_ = OS::CreateMutex();
 
 
 void GDBJITInterface::AddCode(const char* name,
@@ -2046,7 +2045,7 @@
                               CompilationInfo* info) {
   if (!FLAG_gdbjit) return;
 
-  ScopedLock lock(mutex.Pointer());
+  ScopedLock lock(mutex_);
   AssertNoAllocation no_gc;
 
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -2127,7 +2126,7 @@
 void GDBJITInterface::RemoveCode(Code* code) {
   if (!FLAG_gdbjit) return;
 
-  ScopedLock lock(mutex.Pointer());
+  ScopedLock lock(mutex_);
   HashMap::Entry* e = GetEntries()->Lookup(code,
                                            HashForCodeObject(code),
                                            false);
@@ -2147,7 +2146,7 @@
 
 void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
                                                GDBJITLineInfo* line_info) {
-  ScopedLock lock(mutex.Pointer());
+  ScopedLock lock(mutex_);
   ASSERT(!IsLineInfoTagged(line_info));
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
   ASSERT(e->value == NULL);
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index 0eca938..2cf15bc 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -132,6 +132,9 @@
   static void RemoveCode(Code* code);
 
   static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
+
+ private:
+  static Mutex* mutex_;
 };
 
 #define GDBJIT(action) GDBJITInterface::action
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 9c0ad45..87066fa 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -232,7 +232,7 @@
       VMState state(isolate, EXTERNAL);
       func(object, par);
     }
-    // Absence of explicit cleanup or revival of weak handle
+    // Absense of explicit cleanup or revival of weak handle
     // in most of the cases would lead to memory leak.
     ASSERT(state_ != NEAR_DEATH);
     return true;
@@ -384,7 +384,6 @@
     : isolate_(isolate),
       number_of_weak_handles_(0),
       number_of_global_object_weak_handles_(0),
-      number_of_global_handles_(0),
       first_block_(NULL),
       first_used_block_(NULL),
       first_free_(NULL),
@@ -404,7 +403,6 @@
 
 Handle<Object> GlobalHandles::Create(Object* value) {
   isolate_->counters()->global_handles()->Increment();
-  number_of_global_handles_++;
   if (first_free_ == NULL) {
     first_block_ = new NodeBlock(first_block_);
     first_block_->PutNodesOnFreeList(&first_free_);
@@ -425,7 +423,6 @@
 
 void GlobalHandles::Destroy(Object** location) {
   isolate_->counters()->global_handles()->Decrement();
-  number_of_global_handles_--;
   if (location == NULL) return;
   Node::FromLocation(location)->Release(this);
 }
diff --git a/src/global-handles.h b/src/global-handles.h
index ddf5fe2..153d4da 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -143,11 +143,6 @@
     return number_of_global_object_weak_handles_;
   }
 
-  // Returns the current number of handles to global objects.
-  int NumberOfGlobalHandles() {
-    return number_of_global_handles_;
-  }
-
   // Clear the weakness of a global handle.
   void ClearWeakness(Object** location);
 
@@ -253,9 +248,6 @@
   // number_of_weak_handles_.
   int number_of_global_object_weak_handles_;
 
-  // Field always containing the number of handles to global objects.
-  int number_of_global_handles_;
-
   // List of all allocated node blocks.
   NodeBlock* first_block_;
 
diff --git a/src/globals.h b/src/globals.h
index 25d4ffe..6c6966a 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -175,22 +175,17 @@
 // than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
 // works on compilers that don't have it (like MSVC).
 #if V8_HOST_ARCH_64_BIT
-#if defined(_MSC_VER)
+#ifdef _MSC_VER
 #define V8_UINT64_C(x)  (x ## UI64)
 #define V8_INT64_C(x)   (x ## I64)
 #define V8_INTPTR_C(x)  (x ## I64)
 #define V8_PTR_PREFIX "ll"
-#elif defined(__MINGW64__)
-#define V8_UINT64_C(x)  (x ## ULL)
-#define V8_INT64_C(x)   (x ## LL)
-#define V8_INTPTR_C(x)  (x ## LL)
-#define V8_PTR_PREFIX "I64"
-#else
+#else  // _MSC_VER
 #define V8_UINT64_C(x)  (x ## UL)
 #define V8_INT64_C(x)   (x ## L)
 #define V8_INTPTR_C(x)  (x ## L)
 #define V8_PTR_PREFIX "l"
-#endif
+#endif  // _MSC_VER
 #else  // V8_HOST_ARCH_64_BIT
 #define V8_INTPTR_C(x)  (x)
 #define V8_PTR_PREFIX ""
@@ -235,9 +230,6 @@
 
 const int kDoubleSizeLog2 = 3;
 
-// Size of the state of a the random number generator.
-const int kRandomStateSize = 2 * kIntSize;
-
 #if V8_HOST_ARCH_64_BIT
 const int kPointerSizeLog2 = 3;
 const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
@@ -263,13 +255,8 @@
 const int kBinary32MantissaBits = 23;
 const int kBinary32ExponentShift = 23;
 
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
-
-// ASCII/UTF-16 constants
+// ASCII/UC16 constants
 // Code-point values in Unicode 4.0 are 21 bits wide.
-// Code units in UTF-16 are 16 bits wide.
 typedef uint16_t uc16;
 typedef int32_t uc32;
 const int kASCIISize    = kCharSize;
@@ -300,7 +287,7 @@
 // The USE(x) template is used to silence C++ compiler warnings
 // issued for (yet) unused variables (typically parameters).
 template <typename T>
-inline void USE(T) { }
+static inline void USE(T) { }
 
 
 // FUNCTION_ADDR(f) gets the address of a C function f.
@@ -364,39 +351,6 @@
 class FreeStoreAllocationPolicy;
 template <typename T, class P = FreeStoreAllocationPolicy> class List;
 
-// -----------------------------------------------------------------------------
-// Declarations for use in both the preparser and the rest of V8.
-
-// The different language modes that V8 implements. ES5 defines two language
-// modes: an unrestricted mode respectively a strict mode which are indicated by
-// CLASSIC_MODE respectively STRICT_MODE in the enum. The harmony spec drafts
-// for the next ES standard specify a new third mode which is called 'extended
-// mode'. The extended mode is only available if the harmony flag is set. It is
-// based on the 'strict mode' and adds new functionality to it. This means that
-// most of the semantics of these two modes coincide.
-//
-// In the current draft the term 'base code' is used to refer to code that is
-// neither in strict nor extended mode. However, the more distinguishing term
-// 'classic mode' is used in V8 instead to avoid mix-ups.
-
-enum LanguageMode {
-  CLASSIC_MODE,
-  STRICT_MODE,
-  EXTENDED_MODE
-};
-
-
-// The Strict Mode (ECMA-262 5th edition, 4.2.2).
-//
-// This flag is used in the backend to represent the language mode. So far
-// there is no semantic difference between the strict and the extended mode in
-// the backend, so both modes are represented by the kStrictMode value.
-enum StrictModeFlag {
-  kNonStrictMode,
-  kStrictMode
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_GLOBALS_H_
diff --git a/src/handles.cc b/src/handles.cc
index 416ecbd..c482fa6 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -190,11 +190,7 @@
 
   // Inobject slack tracking will reclaim redundant inobject space later,
   // so we can afford to adjust the estimate generously.
-  if (FLAG_clever_optimizations) {
-    return estimate + 8;
-  } else {
-    return estimate + 3;
-  }
+  return estimate + 8;
 }
 
 
@@ -208,6 +204,42 @@
 }
 
 
+void NormalizeProperties(Handle<JSObject> object,
+                         PropertyNormalizationMode mode,
+                         int expected_additional_properties) {
+  CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
+                          object->NormalizeProperties(
+                              mode,
+                              expected_additional_properties));
+}
+
+
+Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->NormalizeElements(),
+                     SeededNumberDictionary);
+}
+
+
+void TransformToFastProperties(Handle<JSObject> object,
+                               int unused_property_fields) {
+  CALL_HEAP_FUNCTION_VOID(
+      object->GetIsolate(),
+      object->TransformToFastProperties(unused_property_fields));
+}
+
+
+Handle<SeededNumberDictionary> SeededNumberDictionarySet(
+    Handle<SeededNumberDictionary> dictionary,
+    uint32_t index,
+    Handle<Object> value,
+    PropertyDetails details) {
+  CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
+                     dictionary->Set(index, *value, details),
+                     SeededNumberDictionary);
+}
+
+
 void FlattenString(Handle<String> string) {
   CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten());
 }
@@ -229,6 +261,17 @@
 }
 
 
+Handle<Object> SetProperty(Handle<JSReceiver> object,
+                           Handle<String> key,
+                           Handle<Object> value,
+                           PropertyAttributes attributes,
+                           StrictModeFlag strict_mode) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->SetProperty(*key, *value, attributes, strict_mode),
+                     Object);
+}
+
+
 Handle<Object> SetProperty(Handle<Object> object,
                            Handle<Object> key,
                            Handle<Object> value,
@@ -256,6 +299,16 @@
 }
 
 
+Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+                                     Handle<String> key,
+                                     Handle<Object> value,
+                                     PropertyDetails details) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->SetNormalizedProperty(*key, *value, details),
+                     Object);
+}
+
+
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key) {
   Isolate* isolate = object->GetIsolate();
@@ -265,6 +318,30 @@
 }
 
 
+Handle<Object> SetLocalPropertyIgnoreAttributes(
+    Handle<JSObject> object,
+    Handle<String> key,
+    Handle<Object> value,
+    PropertyAttributes attributes) {
+  CALL_HEAP_FUNCTION(
+    object->GetIsolate(),
+    object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
+    Object);
+}
+
+
+void SetLocalPropertyNoThrow(Handle<JSObject> object,
+                             Handle<String> key,
+                             Handle<Object> value,
+                             PropertyAttributes attributes) {
+  Isolate* isolate = object->GetIsolate();
+  ASSERT(!isolate->has_pending_exception());
+  CHECK(!SetLocalPropertyIgnoreAttributes(
+        object, key, value, attributes).is_null());
+  CHECK(!isolate->has_pending_exception());
+}
+
+
 Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
                                           Handle<String> key,
                                           Handle<Object> value,
@@ -295,6 +372,24 @@
 }
 
 
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
+                           Handle<String> name,
+                           LookupResult* result) {
+  PropertyAttributes attributes;
+  Isolate* isolate = Isolate::Current();
+  CALL_HEAP_FUNCTION(isolate,
+                     obj->GetProperty(*obj, result, *name, &attributes),
+                     Object);
+}
+
+
+Handle<Object> GetElement(Handle<Object> obj,
+                          uint32_t index) {
+  Isolate* isolate = Isolate::Current();
+  CALL_HEAP_FUNCTION(isolate, Runtime::GetElement(obj, index), Object);
+}
+
+
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
@@ -308,6 +403,12 @@
 }
 
 
+Handle<Object> GetPrototype(Handle<Object> obj) {
+  Handle<Object> result(obj->GetPrototype());
+  return result;
+}
+
+
 Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
   const bool skip_hidden_prototypes = false;
   CALL_HEAP_FUNCTION(obj->GetIsolate(),
@@ -315,6 +416,43 @@
 }
 
 
+Handle<Object> PreventExtensions(Handle<JSObject> object) {
+  CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
+}
+
+
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
+                                   JSObject::HiddenPropertiesFlag flag) {
+  CALL_HEAP_FUNCTION(obj->GetIsolate(),
+                     obj->GetHiddenProperties(flag),
+                     Object);
+}
+
+
+int GetIdentityHash(Handle<JSObject> obj) {
+  CALL_AND_RETRY(obj->GetIsolate(),
+                 obj->GetIdentityHash(JSObject::ALLOW_CREATION),
+                 return Smi::cast(__object__)->value(),
+                 return 0);
+}
+
+
+Handle<Object> DeleteElement(Handle<JSObject> obj,
+                             uint32_t index) {
+  CALL_HEAP_FUNCTION(obj->GetIsolate(),
+                     obj->DeleteElement(index, JSObject::NORMAL_DELETION),
+                     Object);
+}
+
+
+Handle<Object> DeleteProperty(Handle<JSObject> obj,
+                              Handle<String> prop) {
+  CALL_HEAP_FUNCTION(obj->GetIsolate(),
+                     obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
+                     Object);
+}
+
+
 Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
   Isolate* isolate = Isolate::Current();
   CALL_HEAP_FUNCTION(
@@ -332,6 +470,35 @@
 }
 
 
+Handle<Object> SetElement(Handle<JSObject> object,
+                          uint32_t index,
+                          Handle<Object> value,
+                          StrictModeFlag strict_mode) {
+  if (object->HasExternalArrayElements()) {
+    if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
+      bool has_exception;
+      Handle<Object> number = Execution::ToNumber(value, &has_exception);
+      if (has_exception) return Handle<Object>();
+      value = number;
+    }
+  }
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->SetElement(index, *value, strict_mode, true),
+                     Object);
+}
+
+
+Handle<Object> SetOwnElement(Handle<JSObject> object,
+                             uint32_t index,
+                             Handle<Object> value,
+                             StrictModeFlag strict_mode) {
+  ASSERT(!object->HasExternalArrayElements());
+  CALL_HEAP_FUNCTION(object->GetIsolate(),
+                     object->SetElement(index, *value, strict_mode, false),
+                     Object);
+}
+
+
 Handle<JSObject> Copy(Handle<JSObject> obj) {
   Isolate* isolate = obj->GetIsolate();
   CALL_HEAP_FUNCTION(isolate,
@@ -354,9 +521,8 @@
   Handle<Object> cache = Utils::OpenHandle(*handle);
   JSValue* wrapper = JSValue::cast(*cache);
   Foreign* foreign = Script::cast(wrapper->value())->wrapper();
-  ASSERT(foreign->foreign_address() ==
-         reinterpret_cast<Address>(cache.location()));
-  foreign->set_foreign_address(0);
+  ASSERT(foreign->address() == reinterpret_cast<Address>(cache.location()));
+  foreign->set_address(0);
   Isolate* isolate = Isolate::Current();
   isolate->global_handles()->Destroy(cache.location());
   isolate->counters()->script_wrappers()->Decrement();
@@ -364,10 +530,10 @@
 
 
 Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
-  if (script->wrapper()->foreign_address() != NULL) {
+  if (script->wrapper()->address() != NULL) {
     // Return the script wrapper directly from the cache.
     return Handle<JSValue>(
-        reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+        reinterpret_cast<JSValue**>(script->wrapper()->address()));
   }
   Isolate* isolate = Isolate::Current();
   // Construct a new script wrapper.
@@ -383,8 +549,7 @@
   Handle<Object> handle = isolate->global_handles()->Create(*result);
   isolate->global_handles()->MakeWeak(handle.location(), NULL,
                                       &ClearWrapperCache);
-  script->wrapper()->set_foreign_address(
-      reinterpret_cast<Address>(handle.location()));
+  script->wrapper()->set_address(reinterpret_cast<Address>(handle.location()));
   return result;
 }
 
@@ -500,19 +665,6 @@
   return right + script->line_offset()->value();
 }
 
-// Convert code position into column number.
-int GetScriptColumnNumber(Handle<Script> script, int code_pos) {
-  int line_number = GetScriptLineNumber(script, code_pos);
-  if (line_number == -1) return -1;
-
-  AssertNoAllocation no_allocation;
-  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
-  line_number = line_number - script->line_offset()->value();
-  if (line_number == 0) return code_pos + script->column_offset()->value();
-  int prev_line_end_pos =
-      Smi::cast(line_ends_array->get(line_number - 1))->value();
-  return code_pos - (prev_line_end_pos + 1);
-}
 
 int GetScriptLineNumberSafe(Handle<Script> script, int code_pos) {
   AssertNoAllocation no_allocation;
@@ -544,7 +696,7 @@
 
 
 // Compute the property keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
                                                  Handle<JSObject> object) {
   Isolate* isolate = receiver->GetIsolate();
   Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
@@ -566,7 +718,7 @@
 
 
 // Compute the element keys from the interceptor.
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
                                                    Handle<JSObject> object) {
   Isolate* isolate = receiver->GetIsolate();
   Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
@@ -597,9 +749,8 @@
 }
 
 
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
-                                          KeyCollectionType type,
-                                          bool* threw) {
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+                                          KeyCollectionType type) {
   USE(ContainsOnlyValidKeys);
   Isolate* isolate = object->GetIsolate();
   Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
@@ -614,16 +765,6 @@
   for (Handle<Object> p = object;
        *p != isolate->heap()->null_value();
        p = Handle<Object>(p->GetPrototype(), isolate)) {
-    if (p->IsJSProxy()) {
-      Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
-      Handle<Object> args[] = { proxy };
-      Handle<Object> names = Execution::Call(
-          isolate->proxy_enumerate(), object, ARRAY_SIZE(args), args, threw);
-      if (*threw) return content;
-      content = AddKeysFromJSArray(content, Handle<JSArray>::cast(names));
-      break;
-    }
-
     Handle<JSObject> current(JSObject::cast(*p), isolate);
 
     // Check access rights if required.
@@ -690,11 +831,11 @@
 }
 
 
-Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw) {
+Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
   Isolate* isolate = object->GetIsolate();
   isolate->counters()->for_in()->Increment();
-  Handle<FixedArray> elements =
-      GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, threw);
+  Handle<FixedArray> elements = GetKeysInFixedArrayFor(object,
+                                                       INCLUDE_PROTOS);
   return isolate->factory()->NewJSArrayWithElements(elements);
 }
 
@@ -711,62 +852,31 @@
                                 isolate);
     }
     isolate->counters()->enum_cache_misses()->Increment();
-    Handle<Map> map(object->map());
-    int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
-
+    int num_enum = object->NumberOfEnumProperties();
     Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
     Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
-
-    Handle<FixedArray> indices;
-    Handle<FixedArray> sort_array2;
-
-    if (cache_result) {
-      indices = isolate->factory()->NewFixedArray(num_enum);
-      sort_array2 = isolate->factory()->NewFixedArray(num_enum);
-    }
-
     Handle<DescriptorArray> descs =
         Handle<DescriptorArray>(object->map()->instance_descriptors(), isolate);
-
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
       if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
-        storage->set(index, descs->GetKey(i));
+        (*storage)->set(index, descs->GetKey(i));
         PropertyDetails details(descs->GetDetails(i));
-        sort_array->set(index, Smi::FromInt(details.index()));
-        if (!indices.is_null()) {
-          if (details.type() != FIELD) {
-            indices = Handle<FixedArray>();
-            sort_array2 = Handle<FixedArray>();
-          } else {
-            int field_index = Descriptor::IndexFromValue(descs->GetValue(i));
-            if (field_index >= map->inobject_properties()) {
-              field_index = -(field_index - map->inobject_properties() + 1);
-            }
-            indices->set(index, Smi::FromInt(field_index));
-            sort_array2->set(index, Smi::FromInt(details.index()));
-          }
-        }
+        (*sort_array)->set(index, Smi::FromInt(details.index()));
         index++;
       }
     }
-    storage->SortPairs(*sort_array, sort_array->length());
-    if (!indices.is_null()) {
-      indices->SortPairs(*sort_array2, sort_array2->length());
-    }
+    (*storage)->SortPairs(*sort_array, sort_array->length());
     if (cache_result) {
       Handle<FixedArray> bridge_storage =
           isolate->factory()->NewFixedArray(
               DescriptorArray::kEnumCacheBridgeLength);
       DescriptorArray* desc = object->map()->instance_descriptors();
-      desc->SetEnumCache(*bridge_storage,
-                         *storage,
-                         indices.is_null() ? Object::cast(Smi::FromInt(0))
-                                           : Object::cast(*indices));
+      desc->SetEnumCache(*bridge_storage, *storage);
     }
     ASSERT(storage->length() == index);
     return storage;
   } else {
-    int num_enum = object->NumberOfLocalProperties(DONT_ENUM);
+    int num_enum = object->NumberOfEnumProperties();
     Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum);
     Handle<FixedArray> sort_array = isolate->factory()->NewFixedArray(num_enum);
     object->property_dictionary()->CopyEnumKeysTo(*storage, *sort_array);
@@ -775,24 +885,8 @@
 }
 
 
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
-                                       Handle<Object> key) {
-  CALL_HEAP_FUNCTION(table->GetIsolate(),
-                     table->Add(*key),
-                     ObjectHashSet);
-}
-
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
-                                          Handle<Object> key) {
-  CALL_HEAP_FUNCTION(table->GetIsolate(),
-                     table->Remove(*key),
-                     ObjectHashSet);
-}
-
-
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<Object> key,
+                                               Handle<JSObject> key,
                                                Handle<Object> value) {
   CALL_HEAP_FUNCTION(table->GetIsolate(),
                      table->Put(*key, *value),
@@ -800,162 +894,53 @@
 }
 
 
-// This method determines the type of string involved and then gets the UTF8
-// length of the string.  It doesn't flatten the string and has log(n) recursion
-// for a string of length n.  If the failure flag gets set, then we have to
-// flatten the string and retry.  Failures are caused by surrogate pairs in deep
-// cons strings.
-
-// Single surrogate characters that are encountered in the UTF-16 character
-// sequence of the input string get counted as 3 UTF-8 bytes, because that
-// is the way that WriteUtf8 will encode them.  Surrogate pairs are counted and
-// encoded as one 4-byte UTF-8 sequence.
-
-// This function conceptually uses recursion on the two halves of cons strings.
-// However, in order to avoid the recursion going too deep it recurses on the
-// second string of the cons, but iterates on the first substring (by manually
-// eliminating it as a tail recursion).  This means it counts the UTF-8 length
-// from the end to the start, which makes no difference to the total.
-
-// Surrogate pairs are recognized even if they are split across two sides of a
-// cons, which complicates the implementation somewhat.  Therefore, too deep
-// recursion cannot always be avoided.  This case is detected, and the failure
-// flag is set, a signal to the caller that the string should be flattened and
-// the operation retried.
-int Utf8LengthHelper(String* input,
-                     int from,
-                     int to,
-                     bool followed_by_surrogate,
-                     int max_recursion,
-                     bool* failure,
-                     bool* starts_with_surrogate) {
-  if (from == to) return 0;
-  int total = 0;
-  bool dummy;
-  while (true) {
-    if (input->IsAsciiRepresentation()) {
-      *starts_with_surrogate = false;
-      return total + to - from;
-    }
-    switch (StringShape(input).representation_tag()) {
-      case kConsStringTag: {
-        ConsString* str = ConsString::cast(input);
-        String* first = str->first();
-        String* second = str->second();
-        int first_length = first->length();
-        if (first_length - from > to - first_length) {
-          if (first_length < to) {
-            // Right hand side is shorter.  No need to check the recursion depth
-            // since this can only happen log(n) times.
-            bool right_starts_with_surrogate = false;
-            total += Utf8LengthHelper(second,
-                                      0,
-                                      to - first_length,
-                                      followed_by_surrogate,
-                                      max_recursion - 1,
-                                      failure,
-                                      &right_starts_with_surrogate);
-            if (*failure) return 0;
-            followed_by_surrogate = right_starts_with_surrogate;
-            input = first;
-            to = first_length;
-          } else {
-            // We only need the left hand side.
-            input = first;
-          }
-        } else {
-          if (first_length > from) {
-            // Left hand side is shorter.
-            if (first->IsAsciiRepresentation()) {
-              total += first_length - from;
-              *starts_with_surrogate = false;
-              starts_with_surrogate = &dummy;
-              input = second;
-              from = 0;
-              to -= first_length;
-            } else if (second->IsAsciiRepresentation()) {
-              followed_by_surrogate = false;
-              total += to - first_length;
-              input = first;
-              to = first_length;
-            } else if (max_recursion > 0) {
-              bool right_starts_with_surrogate = false;
-              // Recursing on the long one.  This may fail.
-              total += Utf8LengthHelper(second,
-                                        0,
-                                        to - first_length,
-                                        followed_by_surrogate,
-                                        max_recursion - 1,
-                                        failure,
-                                        &right_starts_with_surrogate);
-              if (*failure) return 0;
-              input = first;
-              to = first_length;
-              followed_by_surrogate = right_starts_with_surrogate;
-            } else {
-              *failure = true;
-              return 0;
-            }
-          } else {
-            // We only need the right hand side.
-            input = second;
-            from = 0;
-            to -= first_length;
-          }
-        }
-        continue;
-      }
-      case kExternalStringTag:
-      case kSeqStringTag: {
-        Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
-        const uc16* p = vector.start();
-        int previous = unibrow::Utf16::kNoPreviousCharacter;
-        for (int i = from; i < to; i++) {
-          uc16 c = p[i];
-          total += unibrow::Utf8::Length(c, previous);
-          previous = c;
-        }
-        if (to - from > 0) {
-          if (unibrow::Utf16::IsLeadSurrogate(previous) &&
-              followed_by_surrogate) {
-            total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
-          }
-          if (unibrow::Utf16::IsTrailSurrogate(p[from])) {
-            *starts_with_surrogate = true;
-          }
-        }
-        return total;
-      }
-      case kSlicedStringTag: {
-        SlicedString* str = SlicedString::cast(input);
-        int offset = str->offset();
-        input = str->parent();
-        from += offset;
-        to += offset;
-        continue;
-      }
-      default:
-        break;
-    }
-    UNREACHABLE();
-    return 0;
-  }
-  return 0;
+bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+                    ClearExceptionFlag flag) {
+  return shared->is_compiled() || CompileLazyShared(shared, flag);
 }
 
 
-int Utf8Length(Handle<String> str) {
-  bool dummy;
-  bool failure;
-  int len;
-  const int kRecursionBudget = 100;
-  do {
-    failure = false;
-    len = Utf8LengthHelper(
-        *str, 0, str->length(), false, kRecursionBudget, &failure, &dummy);
-    if (failure) FlattenString(str);
-  } while (failure);
-  return len;
+static bool CompileLazyHelper(CompilationInfo* info,
+                              ClearExceptionFlag flag) {
+  // Compile the source information to a code object.
+  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
+  ASSERT(!info->isolate()->has_pending_exception());
+  bool result = Compiler::CompileLazy(info);
+  ASSERT(result != Isolate::Current()->has_pending_exception());
+  if (!result && flag == CLEAR_EXCEPTION) {
+    info->isolate()->clear_pending_exception();
+  }
+  return result;
+}
+
+
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+                       ClearExceptionFlag flag) {
+  CompilationInfo info(shared);
+  return CompileLazyHelper(&info, flag);
+}
+
+
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
+  bool result = true;
+  if (function->shared()->is_compiled()) {
+    function->ReplaceCode(function->shared()->code());
+    function->shared()->set_code_age(0);
+  } else {
+    CompilationInfo info(function);
+    result = CompileLazyHelper(&info, flag);
+    ASSERT(!result || function->is_compiled());
+  }
+  return result;
+}
+
+
+bool CompileOptimized(Handle<JSFunction> function,
+                      int osr_ast_id,
+                      ClearExceptionFlag flag) {
+  CompilationInfo info(function);
+  info.SetOptimizing(osr_ast_id);
+  return CompileLazyHelper(&info, flag);
 }
 
 } }  // namespace v8::internal
diff --git a/src/handles.h b/src/handles.h
index 960696b..5674120 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -167,6 +167,18 @@
 // an object of expected type, or the handle is an error if running out
 // of space or encountering an internal error.
 
+void NormalizeProperties(Handle<JSObject> object,
+                         PropertyNormalizationMode mode,
+                         int expected_additional_properties);
+Handle<SeededNumberDictionary> NormalizeElements(Handle<JSObject> object);
+void TransformToFastProperties(Handle<JSObject> object,
+                               int unused_property_fields);
+MUST_USE_RESULT Handle<SeededNumberDictionary> SeededNumberDictionarySet(
+    Handle<SeededNumberDictionary> dictionary,
+    uint32_t index,
+    Handle<Object> value,
+    PropertyDetails details);
+
 // Flattens a string.
 void FlattenString(Handle<String> str);
 
@@ -174,7 +186,11 @@
 // string.
 Handle<String> FlattenGetString(Handle<String> str);
 
-int Utf8Length(Handle<String> str);
+Handle<Object> SetProperty(Handle<JSReceiver> object,
+                           Handle<String> key,
+                           Handle<Object> value,
+                           PropertyAttributes attributes,
+                           StrictModeFlag strict_mode);
 
 Handle<Object> SetProperty(Handle<Object> object,
                            Handle<Object> key,
@@ -187,22 +203,78 @@
                                 Handle<Object> value,
                                 PropertyAttributes attributes);
 
+Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
+                                     Handle<String> key,
+                                     Handle<Object> value,
+                                     PropertyDetails details);
+
 Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
                                    Handle<Object> key);
 
+Handle<Object> SetLocalPropertyIgnoreAttributes(
+    Handle<JSObject> object,
+    Handle<String> key,
+    Handle<Object> value,
+    PropertyAttributes attributes);
+
+// Used to set local properties on the object we totally control
+// and which therefore has no accessors and alikes.
+void SetLocalPropertyNoThrow(Handle<JSObject> object,
+                             Handle<String> key,
+                             Handle<Object> value,
+                             PropertyAttributes attributes = NONE);
+
+Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
+                                          Handle<String> key,
+                                          Handle<Object> value,
+                                          PropertyAttributes attributes,
+                                          StrictModeFlag strict_mode);
+
+MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
+                                          uint32_t index,
+                                          Handle<Object> value,
+                                          StrictModeFlag strict_mode);
+
+Handle<Object> SetOwnElement(Handle<JSObject> object,
+                             uint32_t index,
+                             Handle<Object> value,
+                             StrictModeFlag strict_mode);
+
 Handle<Object> GetProperty(Handle<JSReceiver> obj,
                            const char* name);
 
 Handle<Object> GetProperty(Handle<Object> obj,
                            Handle<Object> key);
 
+Handle<Object> GetProperty(Handle<JSReceiver> obj,
+                           Handle<String> name,
+                           LookupResult* result);
+
+
+Handle<Object> GetElement(Handle<Object> obj,
+                          uint32_t index);
+
 Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
                                           Handle<JSObject> holder,
                                           Handle<String> name,
                                           PropertyAttributes* attributes);
 
+Handle<Object> GetPrototype(Handle<Object> obj);
+
 Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
 
+// Return the object's hidden properties object. If the object has no hidden
+// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
+// hidden property object will be allocated. Otherwise Heap::undefined_value
+// is returned.
+Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
+                                   JSObject::HiddenPropertiesFlag flag);
+
+int GetIdentityHash(Handle<JSObject> obj);
+
+Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
+Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
+
 Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
 
 Handle<JSObject> Copy(Handle<JSObject> obj);
@@ -226,23 +298,21 @@
 int GetScriptLineNumber(Handle<Script> script, int code_position);
 // The safe version does not make heap allocations but may work much slower.
 int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
-int GetScriptColumnNumber(Handle<Script> script, int code_position);
 
 // Computes the enumerable keys from interceptors. Used for debug mirrors and
 // by GetKeysInFixedArrayFor below.
-v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver,
+v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSObject> receiver,
                                                  Handle<JSObject> object);
-v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
+v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSObject> receiver,
                                                    Handle<JSObject> object);
 
 enum KeyCollectionType { LOCAL_ONLY, INCLUDE_PROTOS };
 
 // Computes the enumerable keys for a JSObject. Used for implementing
 // "for (n in object) { }".
-Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
-                                          KeyCollectionType type,
-                                          bool* threw);
-Handle<JSArray> GetKeysFor(Handle<JSReceiver> object, bool* threw);
+Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object,
+                                          KeyCollectionType type);
+Handle<JSArray> GetKeysFor(Handle<JSObject> object);
 Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
                                        bool cache_result);
 
@@ -256,6 +326,7 @@
                          int end,
                          PretenureFlag pretenure = NOT_TENURED);
 
+
 // Sets the expected number of properties for the function's instances.
 void SetExpectedNofProperties(Handle<JSFunction> func, int nof);
 
@@ -274,16 +345,28 @@
 Handle<Object> SetPrototype(Handle<JSFunction> function,
                             Handle<Object> prototype);
 
-Handle<ObjectHashSet> ObjectHashSetAdd(Handle<ObjectHashSet> table,
-                                       Handle<Object> key);
-
-Handle<ObjectHashSet> ObjectHashSetRemove(Handle<ObjectHashSet> table,
-                                          Handle<Object> key);
+Handle<Object> PreventExtensions(Handle<JSObject> object);
 
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<Object> key,
+                                               Handle<JSObject> key,
                                                Handle<Object> value);
 
+// Does lazy compilation of the given function. Returns true on success and
+// false if the compilation resulted in a stack overflow.
+enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
+
+bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
+                    ClearExceptionFlag flag);
+
+bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
+                       ClearExceptionFlag flag);
+
+bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
+
+bool CompileOptimized(Handle<JSFunction> function,
+                      int osr_ast_id,
+                      ClearExceptionFlag flag);
+
 class NoHandleAllocation BASE_EMBEDDED {
  public:
 #ifndef DEBUG
diff --git a/src/hashmap.cc b/src/hashmap.cc
new file mode 100644
index 0000000..1422afd
--- /dev/null
+++ b/src/hashmap.cc
@@ -0,0 +1,230 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "../include/v8stdint.h"
+#include "globals.h"
+#include "checks.h"
+#include "utils.h"
+#include "allocation.h"
+
+#include "hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+Allocator HashMap::DefaultAllocator;
+
+
+HashMap::HashMap() {
+  allocator_ = NULL;
+  match_ = NULL;
+}
+
+
+HashMap::HashMap(MatchFun match,
+                 Allocator* allocator,
+                 uint32_t initial_capacity) {
+  allocator_ = allocator;
+  match_ = match;
+  Initialize(initial_capacity);
+}
+
+
+HashMap::~HashMap() {
+  if (allocator_) {
+    allocator_->Delete(map_);
+  }
+}
+
+
+HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
+  // Find a matching entry.
+  Entry* p = Probe(key, hash);
+  if (p->key != NULL) {
+    return p;
+  }
+
+  // No entry found; insert one if necessary.
+  if (insert) {
+    p->key = key;
+    p->value = NULL;
+    p->hash = hash;
+    occupancy_++;
+
+    // Grow the map if we reached >= 80% occupancy.
+    if (occupancy_ + occupancy_/4 >= capacity_) {
+      Resize();
+      p = Probe(key, hash);
+    }
+
+    return p;
+  }
+
+  // No entry found and none inserted.
+  return NULL;
+}
+
+
+void HashMap::Remove(void* key, uint32_t hash) {
+  // Lookup the entry for the key to remove.
+  Entry* p = Probe(key, hash);
+  if (p->key == NULL) {
+    // Key not found nothing to remove.
+    return;
+  }
+
+  // To remove an entry we need to ensure that it does not create an empty
+  // entry that will cause the search for another entry to stop too soon. If all
+  // the entries between the entry to remove and the next empty slot have their
+  // initial position inside this interval, clearing the entry to remove will
+  // not break the search. If, while searching for the next empty entry, an
+  // entry is encountered which does not have its initial position between the
+  // entry to remove and the position looked at, then this entry can be moved to
+  // the place of the entry to remove without breaking the search for it. The
+  // entry made vacant by this move is now the entry to remove and the process
+  // starts over.
+  // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
+
+  // This guarantees loop termination as there is at least one empty entry so
+  // eventually the removed entry will have an empty entry after it.
+  ASSERT(occupancy_ < capacity_);
+
+  // p is the candidate entry to clear. q is used to scan forwards.
+  Entry* q = p;  // Start at the entry to remove.
+  while (true) {
+    // Move q to the next entry.
+    q = q + 1;
+    if (q == map_end()) {
+      q = map_;
+    }
+
+    // All entries between p and q have their initial position between p and q
+    // and the entry p can be cleared without breaking the search for these
+    // entries.
+    if (q->key == NULL) {
+      break;
+    }
+
+    // Find the initial position for the entry at position q.
+    Entry* r = map_ + (q->hash & (capacity_ - 1));
+
+    // If the entry at position q has its initial position outside the range
+    // between p and q it can be moved forward to position p and will still be
+    // found. There is now a new candidate entry for clearing.
+    if ((q > p && (r <= p || r > q)) ||
+        (q < p && (r <= p && r > q))) {
+      *p = *q;
+      p = q;
+    }
+  }
+
+  // Clear the entry which is allowed to en emptied.
+  p->key = NULL;
+  occupancy_--;
+}
+
+
+void HashMap::Clear() {
+  // Mark all entries as empty.
+  const Entry* end = map_end();
+  for (Entry* p = map_; p < end; p++) {
+    p->key = NULL;
+  }
+  occupancy_ = 0;
+}
+
+
+HashMap::Entry* HashMap::Start() const {
+  return Next(map_ - 1);
+}
+
+
+HashMap::Entry* HashMap::Next(Entry* p) const {
+  const Entry* end = map_end();
+  ASSERT(map_ - 1 <= p && p < end);
+  for (p++; p < end; p++) {
+    if (p->key != NULL) {
+      return p;
+    }
+  }
+  return NULL;
+}
+
+
+HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
+  ASSERT(key != NULL);
+
+  ASSERT(IsPowerOf2(capacity_));
+  Entry* p = map_ + (hash & (capacity_ - 1));
+  const Entry* end = map_end();
+  ASSERT(map_ <= p && p < end);
+
+  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
+  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
+    p++;
+    if (p >= end) {
+      p = map_;
+    }
+  }
+
+  return p;
+}
+
+
+void HashMap::Initialize(uint32_t capacity) {
+  ASSERT(IsPowerOf2(capacity));
+  map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
+  if (map_ == NULL) {
+    v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
+    return;
+  }
+  capacity_ = capacity;
+  Clear();
+}
+
+
+void HashMap::Resize() {
+  Entry* map = map_;
+  uint32_t n = occupancy_;
+
+  // Allocate larger map.
+  Initialize(capacity_ * 2);
+
+  // Rehash all current entries.
+  for (Entry* p = map; n > 0; p++) {
+    if (p->key != NULL) {
+      Lookup(p->key, p->hash, true)->value = p->value;
+      n--;
+    }
+  }
+
+  // Delete old map.
+  allocator_->Delete(map);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/hashmap.h b/src/hashmap.h
index 5aeb895..5c13212 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,22 +29,39 @@
 #define V8_HASHMAP_H_
 
 #include "allocation.h"
-#include "checks.h"
-#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
-template<class AllocationPolicy>
-class TemplateHashMapImpl {
+
+// Allocator defines the memory allocator interface
+// used by HashMap and implements a default allocator.
+class Allocator BASE_EMBEDDED {
  public:
+  virtual ~Allocator()  {}
+  virtual void* New(size_t size)  { return Malloced::New(size); }
+  virtual void Delete(void* p)  { Malloced::Delete(p); }
+};
+
+
+class HashMap {
+ public:
+  static Allocator DefaultAllocator;
+
   typedef bool (*MatchFun) (void* key1, void* key2);
 
+  // Dummy constructor.  This constructor doesn't set up the hash
+  // map properly so don't use it unless you have good reason (e.g.,
+  // you know that the HashMap will never be used).
+  HashMap();
+
   // initial_capacity is the size of the initial hash map;
   // it must be a power of 2 (and thus must not be 0).
-  TemplateHashMapImpl(MatchFun match, uint32_t initial_capacity = 8);
+  explicit HashMap(MatchFun match,
+                   Allocator* allocator = &DefaultAllocator,
+                   uint32_t initial_capacity = 8);
 
-  ~TemplateHashMapImpl();
+  ~HashMap();
 
   // HashMap entries are (key, value, hash) triplets.
   // Some clients may not need to use the value slot
@@ -88,6 +105,7 @@
   Entry* Next(Entry* p) const;
 
  private:
+  Allocator* allocator_;
   MatchFun match_;
   Entry* map_;
   uint32_t capacity_;
@@ -99,241 +117,6 @@
   void Resize();
 };
 
-typedef TemplateHashMapImpl<FreeStoreAllocationPolicy> HashMap;
-
-template<class P>
-TemplateHashMapImpl<P>::TemplateHashMapImpl(MatchFun match,
-                    uint32_t initial_capacity) {
-  match_ = match;
-  Initialize(initial_capacity);
-}
-
-
-template<class P>
-TemplateHashMapImpl<P>::~TemplateHashMapImpl() {
-  P::Delete(map_);
-}
-
-
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Lookup(
-    void* key, uint32_t hash, bool insert) {
-  // Find a matching entry.
-  Entry* p = Probe(key, hash);
-  if (p->key != NULL) {
-    return p;
-  }
-
-  // No entry found; insert one if necessary.
-  if (insert) {
-    p->key = key;
-    p->value = NULL;
-    p->hash = hash;
-    occupancy_++;
-
-    // Grow the map if we reached >= 80% occupancy.
-    if (occupancy_ + occupancy_/4 >= capacity_) {
-      Resize();
-      p = Probe(key, hash);
-    }
-
-    return p;
-  }
-
-  // No entry found and none inserted.
-  return NULL;
-}
-
-
-template<class P>
-void TemplateHashMapImpl<P>::Remove(void* key, uint32_t hash) {
-  // Lookup the entry for the key to remove.
-  Entry* p = Probe(key, hash);
-  if (p->key == NULL) {
-    // Key not found nothing to remove.
-    return;
-  }
-
-  // To remove an entry we need to ensure that it does not create an empty
-  // entry that will cause the search for another entry to stop too soon. If all
-  // the entries between the entry to remove and the next empty slot have their
-  // initial position inside this interval, clearing the entry to remove will
-  // not break the search. If, while searching for the next empty entry, an
-  // entry is encountered which does not have its initial position between the
-  // entry to remove and the position looked at, then this entry can be moved to
-  // the place of the entry to remove without breaking the search for it. The
-  // entry made vacant by this move is now the entry to remove and the process
-  // starts over.
-  // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
-
-  // This guarantees loop termination as there is at least one empty entry so
-  // eventually the removed entry will have an empty entry after it.
-  ASSERT(occupancy_ < capacity_);
-
-  // p is the candidate entry to clear. q is used to scan forwards.
-  Entry* q = p;  // Start at the entry to remove.
-  while (true) {
-    // Move q to the next entry.
-    q = q + 1;
-    if (q == map_end()) {
-      q = map_;
-    }
-
-    // All entries between p and q have their initial position between p and q
-    // and the entry p can be cleared without breaking the search for these
-    // entries.
-    if (q->key == NULL) {
-      break;
-    }
-
-    // Find the initial position for the entry at position q.
-    Entry* r = map_ + (q->hash & (capacity_ - 1));
-
-    // If the entry at position q has its initial position outside the range
-    // between p and q it can be moved forward to position p and will still be
-    // found. There is now a new candidate entry for clearing.
-    if ((q > p && (r <= p || r > q)) ||
-        (q < p && (r <= p && r > q))) {
-      *p = *q;
-      p = q;
-    }
-  }
-
-  // Clear the entry which is allowed to en emptied.
-  p->key = NULL;
-  occupancy_--;
-}
-
-
-template<class P>
-void TemplateHashMapImpl<P>::Clear() {
-  // Mark all entries as empty.
-  const Entry* end = map_end();
-  for (Entry* p = map_; p < end; p++) {
-    p->key = NULL;
-  }
-  occupancy_ = 0;
-}
-
-
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Start() const {
-  return Next(map_ - 1);
-}
-
-
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Next(Entry* p)
-    const {
-  const Entry* end = map_end();
-  ASSERT(map_ - 1 <= p && p < end);
-  for (p++; p < end; p++) {
-    if (p->key != NULL) {
-      return p;
-    }
-  }
-  return NULL;
-}
-
-
-template<class P>
-typename TemplateHashMapImpl<P>::Entry* TemplateHashMapImpl<P>::Probe(void* key,
-                                                            uint32_t hash) {
-  ASSERT(key != NULL);
-
-  ASSERT(IsPowerOf2(capacity_));
-  Entry* p = map_ + (hash & (capacity_ - 1));
-  const Entry* end = map_end();
-  ASSERT(map_ <= p && p < end);
-
-  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
-  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
-    p++;
-    if (p >= end) {
-      p = map_;
-    }
-  }
-
-  return p;
-}
-
-
-template<class P>
-void TemplateHashMapImpl<P>::Initialize(uint32_t capacity) {
-  ASSERT(IsPowerOf2(capacity));
-  map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
-  if (map_ == NULL) {
-    v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
-    return;
-  }
-  capacity_ = capacity;
-  Clear();
-}
-
-
-template<class P>
-void TemplateHashMapImpl<P>::Resize() {
-  Entry* map = map_;
-  uint32_t n = occupancy_;
-
-  // Allocate larger map.
-  Initialize(capacity_ * 2);
-
-  // Rehash all current entries.
-  for (Entry* p = map; n > 0; p++) {
-    if (p->key != NULL) {
-      Lookup(p->key, p->hash, true)->value = p->value;
-      n--;
-    }
-  }
-
-  // Delete old map.
-  P::Delete(map);
-}
-
-
-// A hash map for pointer keys and values with an STL-like interface.
-template<class Key, class Value, class AllocationPolicy>
-class TemplateHashMap: private TemplateHashMapImpl<AllocationPolicy> {
- public:
-  STATIC_ASSERT(sizeof(Key*) == sizeof(void*));  // NOLINT
-  STATIC_ASSERT(sizeof(Value*) == sizeof(void*));  // NOLINT
-  struct value_type {
-    Key* first;
-    Value* second;
-  };
-
-  class Iterator {
-   public:
-    Iterator& operator++() {
-      entry_ = map_->Next(entry_);
-      return *this;
-    }
-
-    value_type* operator->() { return reinterpret_cast<value_type*>(entry_); }
-    bool operator!=(const Iterator& other) { return  entry_ != other.entry_; }
-
-   private:
-    Iterator(const TemplateHashMapImpl<AllocationPolicy>* map,
-             typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry) :
-        map_(map), entry_(entry) { }
-
-    const TemplateHashMapImpl<AllocationPolicy>* map_;
-    typename TemplateHashMapImpl<AllocationPolicy>::Entry* entry_;
-
-    friend class TemplateHashMap;
-  };
-
-  TemplateHashMap(
-      typename TemplateHashMapImpl<AllocationPolicy>::MatchFun match)
-    : TemplateHashMapImpl<AllocationPolicy>(match) { }
-
-  Iterator begin() const { return Iterator(this, this->Start()); }
-  Iterator end() const { return Iterator(this, NULL); }
-  Iterator find(Key* key, bool insert = false) {
-    return Iterator(this, this->Lookup(key, key->Hash(), insert));
-  }
-};
 
 } }  // namespace v8::internal
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 706d288..7b666af 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,53 +32,21 @@
 #include "isolate.h"
 #include "list-inl.h"
 #include "objects.h"
-#include "platform.h"
 #include "v8-counters.h"
-#include "store-buffer.h"
-#include "store-buffer-inl.h"
 
 namespace v8 {
 namespace internal {
 
 void PromotionQueue::insert(HeapObject* target, int size) {
-  if (emergency_stack_ != NULL) {
-    emergency_stack_->Add(Entry(target, size));
-    return;
-  }
-
-  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
-    NewSpacePage* rear_page =
-        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
-    ASSERT(!rear_page->prev_page()->is_anchor());
-    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
-    ActivateGuardIfOnTheSamePage();
-  }
-
-  if (guard_) {
-    ASSERT(GetHeadPage() ==
-           Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
-
-    if ((rear_ - 2) < limit_) {
-      RelocateQueueHead();
-      emergency_stack_->Add(Entry(target, size));
-      return;
-    }
-  }
-
   *(--rear_) = reinterpret_cast<intptr_t>(target);
   *(--rear_) = size;
   // Assert no overflow into live objects.
-#ifdef DEBUG
-  SemiSpace::AssertValidRange(HEAP->new_space()->top(),
-                              reinterpret_cast<Address>(rear_));
-#endif
+  ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
 }
 
 
-void PromotionQueue::ActivateGuardIfOnTheSamePage() {
-  guard_ = guard_ ||
-      heap_->new_space()->active_space()->current_page()->address() ==
-      GetHeadPage()->address();
+int Heap::MaxObjectSizeInPagedSpace() {
+  return Page::kMaxHeapObjectSize;
 }
 
 
@@ -115,14 +83,13 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
-                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+                   ? lo_space_->AllocateRaw(size)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  // String maps are all immortal immovable objects.
-  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
+  reinterpret_cast<HeapObject*>(result)->set_map(map);
   // Set length and hash fields of the allocated string.
   String* answer = String::cast(result);
   answer->set_length(str.length());
@@ -149,8 +116,8 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
-                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+                   ? lo_space_->AllocateRaw(size)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -214,7 +181,7 @@
   } else if (CODE_SPACE == space) {
     result = code_space_->AllocateRaw(size_in_bytes);
   } else if (LO_SPACE == space) {
-    result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+    result = lo_space_->AllocateRaw(size_in_bytes);
   } else if (CELL_SPACE == space) {
     result = cell_space_->AllocateRaw(size_in_bytes);
   } else {
@@ -226,21 +193,19 @@
 }
 
 
-MaybeObject* Heap::NumberFromInt32(
-    int32_t value, PretenureFlag pretenure) {
+MaybeObject* Heap::NumberFromInt32(int32_t value) {
   if (Smi::IsValid(value)) return Smi::FromInt(value);
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastI2D(value), pretenure);
+  return AllocateHeapNumber(FastI2D(value));
 }
 
 
-MaybeObject* Heap::NumberFromUint32(
-    uint32_t value, PretenureFlag pretenure) {
+MaybeObject* Heap::NumberFromUint32(uint32_t value) {
   if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
     return Smi::FromInt((int32_t)value);
   }
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastUI2D(value), pretenure);
+  return AllocateHeapNumber(FastUI2D(value));
 }
 
 
@@ -255,8 +220,10 @@
   // Dispose of the C++ object if it has not already been disposed.
   if (*resource_addr != NULL) {
     (*resource_addr)->Dispose();
-    *resource_addr = NULL;
   }
+
+  // Clear the resource pointer in the string.
+  *resource_addr = NULL;
 }
 
 
@@ -298,11 +265,6 @@
 }
 
 
-bool Heap::InNewSpace(Address addr) {
-  return new_space_.Contains(addr);
-}
-
-
 bool Heap::InFromSpace(Object* object) {
   return new_space_.FromSpaceContains(object);
 }
@@ -313,36 +275,29 @@
 }
 
 
-bool Heap::OldGenerationAllocationLimitReached() {
-  if (!incremental_marking()->IsStopped()) return false;
-  return OldGenerationSpaceAvailable() < 0;
-}
-
-
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   // An object should be promoted if:
   // - the object has survived a scavenge operation or
   // - to space is already 25% full.
-  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
-  Address age_mark = new_space_.age_mark();
-  bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
-      (!page->ContainsLimit(age_mark) || old_address < age_mark);
-  return below_mark || (new_space_.Size() + object_size) >=
-                        (new_space_.EffectiveCapacity() >> 2);
+  return old_address < new_space_.age_mark()
+      || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
 }
 
 
 void Heap::RecordWrite(Address address, int offset) {
-  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
+  if (new_space_.Contains(address)) return;
+  ASSERT(!new_space_.FromSpaceContains(address));
+  SLOW_ASSERT(Contains(address + offset));
+  Page::FromAddress(address)->MarkRegionDirty(address + offset);
 }
 
 
 void Heap::RecordWrites(Address address, int start, int len) {
-  if (!InNewSpace(address)) {
-    for (int i = 0; i < len; i++) {
-      store_buffer_.Mark(address + start + i * kPointerSize);
-    }
-  }
+  if (new_space_.Contains(address)) return;
+  ASSERT(!new_space_.FromSpaceContains(address));
+  Page* page = Page::FromAddress(address);
+  page->SetRegionMarks(page->GetRegionMarks() |
+      page->GetRegionMaskForSpan(address + start, len * kPointerSize));
 }
 
 
@@ -381,12 +336,38 @@
 
 
 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
+  ASSERT(IsAligned(byte_size, kPointerSize));
   CopyWords(reinterpret_cast<Object**>(dst),
             reinterpret_cast<Object**>(src),
             byte_size / kPointerSize);
 }
 
 
+void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+                                                   Address src,
+                                                   int byte_size) {
+  ASSERT(IsAligned(byte_size, kPointerSize));
+
+  Page* page = Page::FromAddress(dst);
+  uint32_t marks = page->GetRegionMarks();
+
+  for (int remaining = byte_size / kPointerSize;
+       remaining > 0;
+       remaining--) {
+    Memory::Object_at(dst) = Memory::Object_at(src);
+
+    if (InNewSpace(Memory::Object_at(dst))) {
+      marks |= page->GetRegionMaskForAddress(dst);
+    }
+
+    dst += kPointerSize;
+    src += kPointerSize;
+  }
+
+  page->SetRegionMarks(marks);
+}
+
+
 void Heap::MoveBlock(Address dst, Address src, int byte_size) {
   ASSERT(IsAligned(byte_size, kPointerSize));
 
@@ -406,6 +387,16 @@
 }
 
 
+void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+                                                   Address src,
+                                                   int byte_size) {
+  ASSERT(IsAligned(byte_size, kPointerSize));
+  ASSERT((dst < src) || (dst >= (src + byte_size)));
+
+  CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
+}
+
+
 void Heap::ScavengePointer(HeapObject** p) {
   ScavengeObject(p, *p);
 }
@@ -423,9 +414,7 @@
   // If the first word is a forwarding address, the object has already been
   // copied.
   if (first_word.IsForwardingAddress()) {
-    HeapObject* dest = first_word.ToForwardingAddress();
-    ASSERT(HEAP->InFromSpace(*p));
-    *p = dest;
+    *p = first_word.ToForwardingAddress();
     return;
   }
 
@@ -434,10 +423,8 @@
 }
 
 
-bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
-  const char* collector_reason = NULL;
-  GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
-  return CollectGarbage(space, collector, gc_reason, collector_reason);
+bool Heap::CollectGarbage(AllocationSpace space) {
+  return CollectGarbage(space, SelectGarbageCollector(space));
 }
 
 
@@ -461,7 +448,7 @@
 
 
 int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
-  ASSERT(HasBeenSetUp());
+  ASSERT(HasBeenSetup());
   int amount = amount_of_external_allocated_memory_ + change_in_bytes;
   if (change_in_bytes >= 0) {
     // Avoid overflow.
@@ -472,7 +459,7 @@
         amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
     if (amount_since_last_global_gc > external_allocation_limit_) {
-      CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
+      CollectAllGarbage(false);
     }
   } else {
     // Avoid underflow.
@@ -489,7 +476,6 @@
   roots_[kLastScriptIdRootIndex] = last_script_id;
 }
 
-
 Isolate* Heap::isolate() {
   return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
       reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@@ -503,6 +489,7 @@
 #define GC_GREEDY_CHECK() { }
 #endif
 
+
 // Calls the FUNCTION_CALL function and retries it up to three times
 // to guarantee that any allocations performed during the call will
 // succeed if there's enough memory.
@@ -521,8 +508,7 @@
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
     ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)->     \
-                                    allocation_space(),                   \
-                                    "allocation failure");                \
+                                    allocation_space());                  \
     __maybe_object__ = FUNCTION_CALL;                                     \
     if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
     if (__maybe_object__->IsOutOfMemory()) {                              \
@@ -530,7 +516,7 @@
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
     ISOLATE->counters()->gc_last_resort_from_handles()->Increment();      \
-    ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc");        \
+    ISOLATE->heap()->CollectAllAvailableGarbage();                        \
     {                                                                     \
       AlwaysAllocateScope __scope__;                                      \
       __maybe_object__ = FUNCTION_CALL;                                   \
@@ -595,11 +581,11 @@
 #ifdef DEBUG
   for (int i = 0; i < new_space_strings_.length(); ++i) {
     ASSERT(heap_->InNewSpace(new_space_strings_[i]));
-    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
+    ASSERT(new_space_strings_[i] != HEAP->raw_unchecked_null_value());
   }
   for (int i = 0; i < old_space_strings_.length(); ++i) {
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
-    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_the_hole_value());
+    ASSERT(old_space_strings_[i] != HEAP->raw_unchecked_null_value());
   }
 #endif
 }
@@ -614,9 +600,7 @@
 
 void ExternalStringTable::ShrinkNewStrings(int position) {
   new_space_strings_.Rewind(position);
-  if (FLAG_verify_heap) {
-    Verify();
-  }
+  Verify();
 }
 
 
@@ -659,15 +643,15 @@
     case ATAN:
       return atan(input);
     case COS:
-      return fast_cos(input);
+      return cos(input);
     case EXP:
       return exp(input);
     case LOG:
-      return fast_log(input);
+      return log(input);
     case SIN:
-      return fast_sin(input);
+      return sin(input);
     case TAN:
-      return fast_tan(input);
+      return tan(input);
     default:
       return 0.0;  // Never happens.
   }
@@ -699,94 +683,20 @@
 }
 
 
-AlwaysAllocateScope::AlwaysAllocateScope() {
-  // We shouldn't hit any nested scopes, because that requires
-  // non-handle code to call handle code. The code still works but
-  // performance will degrade, so we want to catch this situation
-  // in debug mode.
-  ASSERT(HEAP->always_allocate_scope_depth_ == 0);
-  HEAP->always_allocate_scope_depth_++;
+Heap* _inline_get_heap_() {
+  return HEAP;
 }
 
 
-AlwaysAllocateScope::~AlwaysAllocateScope() {
-  HEAP->always_allocate_scope_depth_--;
-  ASSERT(HEAP->always_allocate_scope_depth_ == 0);
-}
-
-
-LinearAllocationScope::LinearAllocationScope() {
-  HEAP->linear_allocation_scope_depth_++;
-}
-
-
-LinearAllocationScope::~LinearAllocationScope() {
-  HEAP->linear_allocation_scope_depth_--;
-  ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
-}
-
-
+void MarkCompactCollector::SetMark(HeapObject* obj) {
+  tracer_->increment_marked_count();
 #ifdef DEBUG
-void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
-  for (Object** current = start; current < end; current++) {
-    if ((*current)->IsHeapObject()) {
-      HeapObject* object = HeapObject::cast(*current);
-      ASSERT(HEAP->Contains(object));
-      ASSERT(object->map()->IsMap());
-    }
-  }
-}
+  UpdateLiveObjectCount(obj);
 #endif
-
-
-double GCTracer::SizeOfHeapObjects() {
-  return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
+  obj->SetMark();
 }
 
 
-#ifdef DEBUG
-DisallowAllocationFailure::DisallowAllocationFailure() {
-  old_state_ = HEAP->disallow_allocation_failure_;
-  HEAP->disallow_allocation_failure_ = true;
-}
-
-
-DisallowAllocationFailure::~DisallowAllocationFailure() {
-  HEAP->disallow_allocation_failure_ = old_state_;
-}
-#endif
-
-
-#ifdef DEBUG
-AssertNoAllocation::AssertNoAllocation() {
-  old_state_ = HEAP->allow_allocation(false);
-}
-
-
-AssertNoAllocation::~AssertNoAllocation() {
-  HEAP->allow_allocation(old_state_);
-}
-
-
-DisableAssertNoAllocation::DisableAssertNoAllocation() {
-  old_state_ = HEAP->allow_allocation(true);
-}
-
-
-DisableAssertNoAllocation::~DisableAssertNoAllocation() {
-  HEAP->allow_allocation(old_state_);
-}
-
-#else
-
-AssertNoAllocation::AssertNoAllocation() { }
-AssertNoAllocation::~AssertNoAllocation() { }
-DisableAssertNoAllocation::DisableAssertNoAllocation() { }
-DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
-
-#endif
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_INL_H_
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 8be6f27..7e613e9 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -51,7 +51,7 @@
 }
 
 
-void HeapProfiler::SetUp() {
+void HeapProfiler::Setup() {
   Isolate* isolate = Isolate::Current();
   if (isolate->heap_profiler() == NULL) {
     isolate->set_heap_profiler(new HeapProfiler());
@@ -114,6 +114,7 @@
   bool generation_completed = true;
   switch (s_type) {
     case HeapSnapshot::kFull: {
+      HEAP->CollectAllGarbage(true);
       HeapSnapshotGenerator generator(result, control);
       generation_completed = generator.GenerateSnapshot();
       break;
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index ef5c4f4..b1bc91c 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -48,7 +48,7 @@
 // to generate .hp files for use by the GHC/Valgrind tool hp2ps.
 class HeapProfiler {
  public:
-  static void SetUp();
+  static void Setup();
   static void TearDown();
 
   static HeapSnapshot* TakeSnapshot(const char* name,
diff --git a/src/heap.cc b/src/heap.cc
index a1cccf6..c91f769 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,16 +36,13 @@
 #include "deoptimizer.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
-#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
-#include "objects-visiting-inl.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
-#include "store-buffer.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -60,7 +57,12 @@
 namespace v8 {
 namespace internal {
 
-static LazyMutex gc_initializer_mutex = LAZY_MUTEX_INITIALIZER;
+
+static const intptr_t kMinimumPromotionLimit = 2 * MB;
+static const intptr_t kMinimumAllocationLimit = 8 * MB;
+
+
+static Mutex* gc_initializer_mutex = OS::CreateMutex();
 
 
 Heap::Heap()
@@ -68,21 +70,27 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-#define LUMP_OF_MEMORY (128 * KB)
+      reserved_semispace_size_(2*MB),
+      max_semispace_size_(2*MB),
+      initial_semispace_size_(128*KB),
+      max_old_generation_size_(192*MB),
+      max_executable_size_(max_old_generation_size_),
       code_range_size_(0),
 #elif defined(V8_TARGET_ARCH_X64)
-#define LUMP_OF_MEMORY (2 * MB)
+      reserved_semispace_size_(16*MB),
+      max_semispace_size_(16*MB),
+      initial_semispace_size_(1*MB),
+      max_old_generation_size_(1400*MB),
+      max_executable_size_(256*MB),
       code_range_size_(512*MB),
 #else
-#define LUMP_OF_MEMORY MB
+      reserved_semispace_size_(8*MB),
+      max_semispace_size_(8*MB),
+      initial_semispace_size_(512*KB),
+      max_old_generation_size_(700*MB),
+      max_executable_size_(128*MB),
       code_range_size_(0),
 #endif
-      reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
-      max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
-      initial_semispace_size_(Page::kPageSize),
-      max_old_generation_size_(700ul * LUMP_OF_MEMORY),
-      max_executable_size_(256l * LUMP_OF_MEMORY),
-
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
 // Will be 4 * reserved_semispace_size_ to ensure that young
@@ -92,8 +100,6 @@
       always_allocate_scope_depth_(0),
       linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
-      global_ic_age_(0),
-      scan_on_scavenge_pages_(0),
       new_space_(this),
       old_pointer_space_(NULL),
       old_data_space_(NULL),
@@ -103,9 +109,9 @@
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
       gc_post_processing_depth_(0),
+      mc_count_(0),
       ms_count_(0),
       gc_count_(0),
-      remembered_unmapped_pages_index_(0),
       unflattened_strings_length_(0),
 #ifdef DEBUG
       allocation_allowed_(true),
@@ -113,16 +119,12 @@
       disallow_allocation_failure_(false),
       debug_utils_(NULL),
 #endif  // DEBUG
-      new_space_high_promotion_mode_active_(false),
       old_gen_promotion_limit_(kMinimumPromotionLimit),
       old_gen_allocation_limit_(kMinimumAllocationLimit),
-      old_gen_limit_factor_(1),
-      size_of_old_gen_at_last_old_space_gc_(0),
       external_allocation_limit_(0),
       amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
       old_gen_exhausted_(false),
-      store_buffer_rebuilder_(store_buffer()),
       hidden_symbol_(NULL),
       global_gc_prologue_callback_(NULL),
       global_gc_epilogue_callback_(NULL),
@@ -139,20 +141,12 @@
       min_in_mutator_(kMaxInt),
       alive_after_last_gc_(0),
       last_gc_end_timestamp_(0.0),
-      store_buffer_(this),
-      marking_(this),
-      incremental_marking_(this),
+      page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
-      idle_notification_will_schedule_next_gc_(false),
-      mark_sweeps_since_idle_round_started_(0),
-      ms_count_at_last_idle_notification_(0),
-      gc_count_at_last_idle_gc_(0),
-      scavenges_since_last_idle_round_(kIdleScavengeThreshold),
-      promotion_queue_(this),
       configured_(false),
-      chunks_queued_for_free_(NULL) {
+      is_safe_to_read_maps_(true) {
   // Allow build-time customization of the max semispace size. Building
   // V8 with snapshots and a non-default max semispace size is much
   // easier if you can define it as part of the build environment.
@@ -177,7 +171,7 @@
 
 
 intptr_t Heap::Capacity() {
-  if (!HasBeenSetUp()) return 0;
+  if (!HasBeenSetup()) return 0;
 
   return new_space_.Capacity() +
       old_pointer_space_->Capacity() +
@@ -189,7 +183,7 @@
 
 
 intptr_t Heap::CommittedMemory() {
-  if (!HasBeenSetUp()) return 0;
+  if (!HasBeenSetup()) return 0;
 
   return new_space_.CommittedMemory() +
       old_pointer_space_->CommittedMemory() +
@@ -201,14 +195,14 @@
 }
 
 intptr_t Heap::CommittedMemoryExecutable() {
-  if (!HasBeenSetUp()) return 0;
+  if (!HasBeenSetup()) return 0;
 
   return isolate()->memory_allocator()->SizeExecutable();
 }
 
 
 intptr_t Heap::Available() {
-  if (!HasBeenSetUp()) return 0;
+  if (!HasBeenSetup()) return 0;
 
   return new_space_.Available() +
       old_pointer_space_->Available() +
@@ -219,7 +213,7 @@
 }
 
 
-bool Heap::HasBeenSetUp() {
+bool Heap::HasBeenSetup() {
   return old_pointer_space_ != NULL &&
          old_data_space_ != NULL &&
          code_space_ != NULL &&
@@ -230,26 +224,42 @@
 
 
 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
-  if (IntrusiveMarking::IsMarked(object)) {
-    return IntrusiveMarking::SizeOfMarkedObject(object);
-  }
-  return object->SizeFromMap(object->map());
+  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
+  ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
+  MapWord map_word = object->map_word();
+  map_word.ClearMark();
+  map_word.ClearOverflow();
+  return object->SizeFromMap(map_word.ToMap());
 }
 
 
-GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
-                                              const char** reason) {
+int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
+  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
+  ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
+  uint32_t marker = Memory::uint32_at(object->address());
+  if (marker == MarkCompactCollector::kSingleFreeEncoding) {
+    return kIntSize;
+  } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
+    return Memory::int_at(object->address() + kIntSize);
+  } else {
+    MapWord map_word = object->map_word();
+    Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
+    Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
+    return object->SizeFromMap(map);
+  }
+}
+
+
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
   // Is global GC requested?
   if (space != NEW_SPACE || FLAG_gc_global) {
     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
-    *reason = "GC in old space requested";
     return MARK_COMPACTOR;
   }
 
   // Is enough data promoted to justify a global GC?
   if (OldGenerationPromotionLimitReached()) {
     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
-    *reason = "promotion limit reached";
     return MARK_COMPACTOR;
   }
 
@@ -257,7 +267,6 @@
   if (old_gen_exhausted_) {
     isolate_->counters()->
         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
-    *reason = "old generations exhausted";
     return MARK_COMPACTOR;
   }
 
@@ -273,12 +282,10 @@
   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
     isolate_->counters()->
         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
-    *reason = "scavenge might not succeed";
     return MARK_COMPACTOR;
   }
 
   // Default
-  *reason = NULL;
   return SCAVENGER;
 }
 
@@ -393,7 +400,6 @@
 #endif  // DEBUG
 
   LiveObjectList::GCPrologue();
-  store_buffer()->GCPrologue();
 }
 
 intptr_t Heap::SizeOfObjects() {
@@ -406,7 +412,6 @@
 }
 
 void Heap::GarbageCollectionEpilogue() {
-  store_buffer()->GCEpilogue();
   LiveObjectList::GCEpilogue();
 #ifdef DEBUG
   allow_allocation(true);
@@ -438,20 +443,22 @@
 }
 
 
-void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
+void Heap::CollectAllGarbage(bool force_compaction) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector_.SetFlags(flags);
-  CollectGarbage(OLD_POINTER_SPACE, gc_reason);
-  mark_compact_collector_.SetFlags(kNoGCFlags);
+  mark_compact_collector_.SetForceCompaction(force_compaction);
+  CollectGarbage(OLD_POINTER_SPACE);
+  mark_compact_collector_.SetForceCompaction(false);
 }
 
 
-void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
+void Heap::CollectAllAvailableGarbage() {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
+  mark_compact_collector()->SetForceCompaction(true);
+
   // Major GC would invoke weak handle callbacks on weakly reachable
   // handles, but won't collect weakly reachable objects until next
   // major GC.  Therefore if we collect aggressively and weak handle callback
@@ -460,27 +467,17 @@
   // Note: as weak callbacks can execute arbitrary code, we cannot
   // hope that eventually there will be no weak callbacks invocations.
   // Therefore stop recollecting after several attempts.
-  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
-                                     kReduceMemoryFootprintMask);
-  isolate_->compilation_cache()->Clear();
   const int kMaxNumberOfAttempts = 7;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
-    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
+    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
       break;
     }
   }
-  mark_compact_collector()->SetFlags(kNoGCFlags);
-  new_space_.Shrink();
-  UncommitFromSpace();
-  Shrink();
-  incremental_marking()->UncommitMarkingDeque();
+  mark_compact_collector()->SetForceCompaction(false);
 }
 
 
-bool Heap::CollectGarbage(AllocationSpace space,
-                          GarbageCollector collector,
-                          const char* gc_reason,
-                          const char* collector_reason) {
+bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
   // The VM is in the GC state until exiting this function.
   VMState state(isolate_, GC);
 
@@ -493,27 +490,9 @@
   allocation_timeout_ = Max(6, FLAG_gc_interval);
 #endif
 
-  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Scavenge during marking.\n");
-    }
-  }
-
-  if (collector == MARK_COMPACTOR &&
-      !mark_compact_collector()->abort_incremental_marking_ &&
-      !incremental_marking()->IsStopped() &&
-      !incremental_marking()->should_hurry() &&
-      FLAG_incremental_marking_steps) {
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
-    }
-    collector = SCAVENGER;
-    collector_reason = "incremental marking delaying mark-sweep";
-  }
-
   bool next_gc_likely_to_collect_more = false;
 
-  { GCTracer tracer(this, gc_reason, collector_reason);
+  { GCTracer tracer(this);
     GarbageCollectionPrologue();
     // The GC count was incremented in the prologue.  Tell the tracer about
     // it.
@@ -533,24 +512,13 @@
     GarbageCollectionEpilogue();
   }
 
-  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
-  if (incremental_marking()->IsStopped()) {
-    if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
-      incremental_marking()->Start();
-    }
-  }
-
   return next_gc_likely_to_collect_more;
 }
 
 
 void Heap::PerformScavenge() {
-  GCTracer tracer(this, NULL, NULL);
-  if (incremental_marking()->IsStopped()) {
-    PerformGarbageCollection(SCAVENGER, &tracer);
-  } else {
-    PerformGarbageCollection(MARK_COMPACTOR, &tracer);
-  }
+  GCTracer tracer(this);
+  PerformGarbageCollection(SCAVENGER, &tracer);
 }
 
 
@@ -563,7 +531,7 @@
     for (Object** p = start; p < end; p++) {
       if ((*p)->IsHeapObject()) {
         // Check that the symbol is actually a symbol.
-        ASSERT((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
+        ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
       }
     }
   }
@@ -579,17 +547,6 @@
 }
 
 
-static bool AbortIncrementalMarkingAndCollectGarbage(
-    Heap* heap,
-    AllocationSpace space,
-    const char* gc_reason = NULL) {
-  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
-  bool result = heap->CollectGarbage(space, gc_reason);
-  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
-  return result;
-}
-
-
 void Heap::ReserveSpace(
     int new_space_size,
     int pointer_space_size,
@@ -606,38 +563,30 @@
   PagedSpace* cell_space = Heap::cell_space();
   LargeObjectSpace* lo_space = Heap::lo_space();
   bool gc_performed = true;
-  int counter = 0;
-  static const int kThreshold = 20;
-  while (gc_performed && counter++ < kThreshold) {
+  while (gc_performed) {
     gc_performed = false;
     if (!new_space->ReserveSpace(new_space_size)) {
-      Heap::CollectGarbage(NEW_SPACE,
-                           "failed to reserve space in the new space");
+      Heap::CollectGarbage(NEW_SPACE);
       gc_performed = true;
     }
     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
-      AbortIncrementalMarkingAndCollectGarbage(this, OLD_POINTER_SPACE,
-          "failed to reserve space in the old pointer space");
+      Heap::CollectGarbage(OLD_POINTER_SPACE);
       gc_performed = true;
     }
     if (!(old_data_space->ReserveSpace(data_space_size))) {
-      AbortIncrementalMarkingAndCollectGarbage(this, OLD_DATA_SPACE,
-          "failed to reserve space in the old data space");
+      Heap::CollectGarbage(OLD_DATA_SPACE);
       gc_performed = true;
     }
     if (!(code_space->ReserveSpace(code_space_size))) {
-      AbortIncrementalMarkingAndCollectGarbage(this, CODE_SPACE,
-          "failed to reserve space in the code space");
+      Heap::CollectGarbage(CODE_SPACE);
       gc_performed = true;
     }
     if (!(map_space->ReserveSpace(map_space_size))) {
-      AbortIncrementalMarkingAndCollectGarbage(this, MAP_SPACE,
-          "failed to reserve space in the map space");
+      Heap::CollectGarbage(MAP_SPACE);
       gc_performed = true;
     }
     if (!(cell_space->ReserveSpace(cell_space_size))) {
-      AbortIncrementalMarkingAndCollectGarbage(this, CELL_SPACE,
-          "failed to reserve space in the cell space");
+      Heap::CollectGarbage(CELL_SPACE);
       gc_performed = true;
     }
     // We add a slack-factor of 2 in order to have space for a series of
@@ -649,16 +598,10 @@
     large_object_size += cell_space_size + map_space_size + code_space_size +
         data_space_size + pointer_space_size;
     if (!(lo_space->ReserveSpace(large_object_size))) {
-      AbortIncrementalMarkingAndCollectGarbage(this, LO_SPACE,
-          "failed to reserve space in the large object space");
+      Heap::CollectGarbage(LO_SPACE);
       gc_performed = true;
     }
   }
-
-  if (gc_performed) {
-    // Failed to reserve the space after several attempts.
-    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
-  }
 }
 
 
@@ -667,6 +610,13 @@
 
   // Committing memory to from space failed.
   // Try shrinking and try again.
+  PagedSpaces spaces;
+  for (PagedSpace* space = spaces.next();
+       space != NULL;
+       space = spaces.next()) {
+    space->RelinkPageListInChunkOrder(true);
+  }
+
   Shrink();
   if (new_space_.CommitFromSpaceIfNeeded()) return;
 
@@ -681,17 +631,13 @@
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
-    // Get the caches for this context. GC can happen when the context
-    // is not fully initialized, so the caches can be undefined.
-    Object* caches_or_undefined =
-        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
-    if (!caches_or_undefined->IsUndefined()) {
-      FixedArray* caches = FixedArray::cast(caches_or_undefined);
-      // Clear the caches:
-      int length = caches->length();
-      for (int i = 0; i < length; i++) {
-        JSFunctionResultCache::cast(caches->get(i))->Clear();
-      }
+    // Get the caches for this context:
+    FixedArray* caches =
+      Context::cast(context)->jsfunction_result_caches();
+    // Clear the caches:
+    int length = caches->length();
+    for (int i = 0; i < length; i++) {
+      JSFunctionResultCache::cast(caches->get(i))->Clear();
     }
     // Get the next context:
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
@@ -701,42 +647,45 @@
 
 
 void Heap::ClearNormalizedMapCaches() {
-  if (isolate_->bootstrapper()->IsActive() &&
-      !incremental_marking()->IsMarking()) {
-    return;
-  }
+  if (isolate_->bootstrapper()->IsActive()) return;
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
-    // GC can happen when the context is not fully initialized,
-    // so the cache can be undefined.
-    Object* cache =
-        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
-    if (!cache->IsUndefined()) {
-      NormalizedMapCache::cast(cache)->Clear();
-    }
+    Context::cast(context)->normalized_map_cache()->Clear();
     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
   }
 }
 
 
+#ifdef DEBUG
+
+enum PageWatermarkValidity {
+  ALL_VALID,
+  ALL_INVALID
+};
+
+static void VerifyPageWatermarkValidity(PagedSpace* space,
+                                        PageWatermarkValidity validity) {
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+  bool expected_value = (validity == ALL_VALID);
+  while (it.has_next()) {
+    Page* page = it.next();
+    ASSERT(page->IsWatermarkValid() == expected_value);
+  }
+}
+#endif
+
 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
   double survival_rate =
       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
       start_new_space_size;
 
-  if (survival_rate > kYoungSurvivalRateHighThreshold) {
+  if (survival_rate > kYoungSurvivalRateThreshold) {
     high_survival_rate_period_length_++;
   } else {
     high_survival_rate_period_length_ = 0;
   }
 
-  if (survival_rate < kYoungSurvivalRateLowThreshold) {
-    low_survival_rate_period_length_++;
-  } else {
-    low_survival_rate_period_length_ = 0;
-  }
-
   double survival_rate_diff = survival_rate_ - survival_rate;
 
   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
@@ -758,9 +707,7 @@
     PROFILE(isolate_, CodeMovingGCEvent());
   }
 
-  if (FLAG_verify_heap) {
-    VerifySymbolTable();
-  }
+  VerifySymbolTable();
   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
     ASSERT(!allocation_allowed_);
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
@@ -780,13 +727,6 @@
 
   int start_new_space_size = Heap::new_space()->SizeAsInt();
 
-  if (IsHighSurvivalRate()) {
-    // We speed up the incremental marker if it is running so that it
-    // does not fall behind the rate of promotion, which would cause a
-    // constantly growing old space.
-    incremental_marking()->NotifyOfHighPromotionRate();
-  }
-
   if (collector == MARK_COMPACTOR) {
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
@@ -796,7 +736,11 @@
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
+    intptr_t old_gen_size = PromotedSpaceSize();
+    old_gen_promotion_limit_ =
+        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+    old_gen_allocation_limit_ =
+        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
 
     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -806,16 +750,10 @@
       // In this case we aggressively raise old generation memory limits to
       // postpone subsequent mark-sweep collection and thus trade memory
       // space for the mutation speed.
-      old_gen_limit_factor_ = 2;
-    } else {
-      old_gen_limit_factor_ = 1;
+      old_gen_promotion_limit_ *= 2;
+      old_gen_allocation_limit_ *= 2;
     }
 
-    old_gen_promotion_limit_ =
-        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
-    old_gen_allocation_limit_ =
-        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-
     old_gen_exhausted_ = false;
   } else {
     tracer_ = tracer;
@@ -825,37 +763,6 @@
     UpdateSurvivalRateTrend(start_new_space_size);
   }
 
-  if (!new_space_high_promotion_mode_active_ &&
-      new_space_.Capacity() == new_space_.MaximumCapacity() &&
-      IsStableOrIncreasingSurvivalTrend() &&
-      IsHighSurvivalRate()) {
-    // Stable high survival rates even though young generation is at
-    // maximum capacity indicates that most objects will be promoted.
-    // To decrease scavenger pauses and final mark-sweep pauses, we
-    // have to limit maximal capacity of the young generation.
-    new_space_high_promotion_mode_active_ = true;
-    if (FLAG_trace_gc) {
-      PrintF("Limited new space size due to high promotion rate: %d MB\n",
-             new_space_.InitialCapacity() / MB);
-    }
-  } else if (new_space_high_promotion_mode_active_ &&
-      IsStableOrDecreasingSurvivalTrend() &&
-      IsLowSurvivalRate()) {
-    // Decreasing low survival rates might indicate that the above high
-    // promotion mode is over and we should allow the young generation
-    // to grow again.
-    new_space_high_promotion_mode_active_ = false;
-    if (FLAG_trace_gc) {
-      PrintF("Unlimited new space size due to low promotion rate: %d MB\n",
-             new_space_.MaximumCapacity() / MB);
-    }
-  }
-
-  if (new_space_high_promotion_mode_active_ &&
-      new_space_.Capacity() > new_space_.InitialCapacity()) {
-    new_space_.Shrink();
-  }
-
   isolate_->counters()->objs_since_last_young()->Set(0);
 
   gc_post_processing_depth_++;
@@ -875,7 +782,9 @@
         amount_of_external_allocated_memory_;
   }
 
-  GCCallbackFlags callback_flags = kNoGCCallbackFlags;
+  GCCallbackFlags callback_flags = tracer->is_compacting()
+      ? kGCCallbackFlagCompacted
+      : kNoGCCallbackFlags;
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -887,9 +796,7 @@
     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     global_gc_epilogue_callback_();
   }
-  if (FLAG_verify_heap) {
-    VerifySymbolTable();
-  }
+  VerifySymbolTable();
 
   return next_gc_likely_to_collect_more;
 }
@@ -901,26 +808,34 @@
 
   mark_compact_collector_.Prepare(tracer);
 
-  ms_count_++;
-  tracer->set_full_gc_count(ms_count_);
+  bool is_compacting = mark_compact_collector_.IsCompacting();
 
-  MarkCompactPrologue();
+  if (is_compacting) {
+    mc_count_++;
+  } else {
+    ms_count_++;
+  }
+  tracer->set_full_gc_count(mc_count_ + ms_count_);
 
+  MarkCompactPrologue(is_compacting);
+
+  is_safe_to_read_maps_ = false;
   mark_compact_collector_.CollectGarbage();
+  is_safe_to_read_maps_ = true;
 
   LOG(isolate_, ResourceEvent("markcompact", "end"));
 
   gc_state_ = NOT_IN_GC;
 
+  Shrink();
+
   isolate_->counters()->objs_since_last_full()->Set(0);
 
   contexts_disposed_ = 0;
-
-  isolate_->set_context_exit_happened(false);
 }
 
 
-void Heap::MarkCompactPrologue() {
+void Heap::MarkCompactPrologue(bool is_compacting) {
   // At any old GC clear the keyed lookup cache to enable collection of unused
   // maps.
   isolate_->keyed_lookup_cache()->Clear();
@@ -932,7 +847,7 @@
 
   CompletelyClearInstanceofCache();
 
-  FlushNumberStringCache();
+  if (is_compacting) FlushNumberStringCache();
   if (FLAG_cleanup_code_caches_at_gc) {
     polymorphic_code_cache()->set_cache(undefined_value());
   }
@@ -942,8 +857,13 @@
 
 
 Object* Heap::FindCodeObject(Address a) {
-  return isolate()->inner_pointer_to_code_cache()->
-      GcSafeFindCodeForInnerPointer(a);
+  Object* obj = NULL;  // Initialization to please compiler.
+  { MaybeObject* maybe_obj = code_space_->FindObject(a);
+    if (!maybe_obj->ToObject(&obj)) {
+      obj = lo_space_->FindObject(a)->ToObjectUnchecked();
+    }
+  }
+  return obj;
 }
 
 
@@ -991,29 +911,23 @@
   // do not expect them.
   VerifyNonPointerSpacePointersVisitor v;
   HeapObjectIterator code_it(HEAP->code_space());
-  for (HeapObject* object = code_it.Next();
-       object != NULL; object = code_it.Next())
+  for (HeapObject* object = code_it.next();
+       object != NULL; object = code_it.next())
     object->Iterate(&v);
 
-  // The old data space was normally swept conservatively so that the iterator
-  // doesn't work, so we normally skip the next bit.
-  if (!HEAP->old_data_space()->was_swept_conservatively()) {
-    HeapObjectIterator data_it(HEAP->old_data_space());
-    for (HeapObject* object = data_it.Next();
-         object != NULL; object = data_it.Next())
-      object->Iterate(&v);
-  }
+  HeapObjectIterator data_it(HEAP->old_data_space());
+  for (HeapObject* object = data_it.next();
+       object != NULL; object = data_it.next())
+    object->Iterate(&v);
 }
 #endif
 
 
 void Heap::CheckNewSpaceExpansionCriteria() {
   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
-      survived_since_last_expansion_ > new_space_.Capacity() &&
-      !new_space_high_promotion_mode_active_) {
-    // Grow the size of new space if there is room to grow, enough data
-    // has survived scavenge since the last expansion and we are not in
-    // high promotion mode.
+      survived_since_last_expansion_ > new_space_.Capacity()) {
+    // Grow the size of new space if there is room to grow and enough
+    // data has survived scavenge since the last expansion.
     new_space_.Grow();
     survived_since_last_expansion_ = 0;
   }
@@ -1026,107 +940,29 @@
 }
 
 
-void Heap::ScavengeStoreBufferCallback(
-    Heap* heap,
-    MemoryChunk* page,
-    StoreBufferEvent event) {
-  heap->store_buffer_rebuilder_.Callback(page, event);
-}
-
-
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
-  if (event == kStoreBufferStartScanningPagesEvent) {
-    start_of_current_page_ = NULL;
-    current_page_ = NULL;
-  } else if (event == kStoreBufferScanningPageEvent) {
-    if (current_page_ != NULL) {
-      // If this page already overflowed the store buffer during this iteration.
-      if (current_page_->scan_on_scavenge()) {
-        // Then we should wipe out the entries that have been added for it.
-        store_buffer_->SetTop(start_of_current_page_);
-      } else if (store_buffer_->Top() - start_of_current_page_ >=
-                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
-        // Did we find too many pointers in the previous page?  The heuristic is
-        // that no page can take more then 1/5 the remaining slots in the store
-        // buffer.
-        current_page_->set_scan_on_scavenge(true);
-        store_buffer_->SetTop(start_of_current_page_);
-      } else {
-        // In this case the page we scanned took a reasonable number of slots in
-        // the store buffer.  It has now been rehabilitated and is no longer
-        // marked scan_on_scavenge.
-        ASSERT(!current_page_->scan_on_scavenge());
-      }
-    }
-    start_of_current_page_ = store_buffer_->Top();
-    current_page_ = page;
-  } else if (event == kStoreBufferFullEvent) {
-    // The current page overflowed the store buffer again.  Wipe out its entries
-    // in the store buffer and mark it scan-on-scavenge again.  This may happen
-    // several times while scanning.
-    if (current_page_ == NULL) {
-      // Store Buffer overflowed while scanning promoted objects.  These are not
-      // in any particular page, though they are likely to be clustered by the
-      // allocation routines.
-      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
-    } else {
-      // Store Buffer overflowed while scanning a particular old space page for
-      // pointers to new space.
-      ASSERT(current_page_ == page);
-      ASSERT(page != NULL);
-      current_page_->set_scan_on_scavenge(true);
-      ASSERT(start_of_current_page_ != store_buffer_->Top());
-      store_buffer_->SetTop(start_of_current_page_);
-    }
-  } else {
-    UNREACHABLE();
-  }
-}
-
-
-void PromotionQueue::Initialize() {
-  // Assumes that a NewSpacePage exactly fits a number of promotion queue
-  // entries (where each is a pair of intptr_t). This allows us to simplify
-  // the test fpr when to switch pages.
-  ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
-         == 0);
-  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
-  front_ = rear_ =
-      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
-  emergency_stack_ = NULL;
-  guard_ = false;
-}
-
-
-void PromotionQueue::RelocateQueueHead() {
-  ASSERT(emergency_stack_ == NULL);
-
-  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
-  intptr_t* head_start = rear_;
-  intptr_t* head_end =
-      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
-
-  int entries_count =
-      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
-
-  emergency_stack_ = new List<Entry>(2 * entries_count);
-
-  while (head_start != head_end) {
-    int size = static_cast<int>(*(head_start++));
-    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
-    emergency_stack_->Add(Entry(obj, size));
-  }
-  rear_ = head_end;
-}
-
-
 void Heap::Scavenge() {
 #ifdef DEBUG
-  if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
+  if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
 #endif
 
   gc_state_ = SCAVENGE;
 
+  SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
+  Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
+#ifdef DEBUG
+  VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
+  VerifyPageWatermarkValidity(map_space_, ALL_VALID);
+#endif
+
+  // We do not update an allocation watermark of the top page during linear
+  // allocation to avoid overhead. So to maintain the watermark invariant
+  // we have to manually cache the watermark and mark the top page as having an
+  // invalid watermark. This guarantees that dirty regions iteration will use a
+  // correct watermark even if a linear allocation happens.
+  old_pointer_space_->FlushTopPageWatermark();
+  map_space_->FlushTopPageWatermark();
+
   // Implements Cheney's copying algorithm
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
 
@@ -1134,16 +970,10 @@
   isolate_->descriptor_lookup_cache()->Clear();
 
   // Used for updating survived_since_last_expansion_ at function end.
-  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
+  intptr_t survived_watermark = PromotedSpaceSize();
 
   CheckNewSpaceExpansionCriteria();
 
-  SelectScavengingVisitorsTable();
-
-  incremental_marking()->PrepareForScavenge();
-
-  AdvanceSweepers(static_cast<int>(new_space_.Size()));
-
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
   new_space_.Flip();
@@ -1166,29 +996,32 @@
   // for the addresses of promoted objects: every object promoted
   // frees up its size in bytes from the top of the new space, and
   // objects are at least one pointer in size.
-  Address new_space_front = new_space_.ToSpaceStart();
-  promotion_queue_.Initialize();
+  Address new_space_front = new_space_.ToSpaceLow();
+  promotion_queue_.Initialize(new_space_.ToSpaceHigh());
 
-#ifdef DEBUG
-  store_buffer()->Clean();
-#endif
-
+  is_safe_to_read_maps_ = false;
   ScavengeVisitor scavenge_visitor(this);
   // Copy roots.
   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
 
-  // Copy objects reachable from the old generation.
-  {
-    StoreBufferRebuildScope scope(this,
-                                  store_buffer(),
-                                  &ScavengeStoreBufferCallback);
-    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
-  }
+  // Copy objects reachable from the old generation.  By definition,
+  // there are no intergenerational pointers in code or data spaces.
+  IterateDirtyRegions(old_pointer_space_,
+                      &Heap::IteratePointersInDirtyRegion,
+                      &ScavengePointer,
+                      WATERMARK_CAN_BE_INVALID);
+
+  IterateDirtyRegions(map_space_,
+                      &IteratePointersInDirtyMapsRegion,
+                      &ScavengePointer,
+                      WATERMARK_CAN_BE_INVALID);
+
+  lo_space_->IterateDirtyRegions(&ScavengePointer);
 
   // Copy objects reachable from cells by scavenging cell values directly.
   HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* cell = cell_iterator.Next();
-       cell != NULL; cell = cell_iterator.Next()) {
+  for (HeapObject* cell = cell_iterator.next();
+       cell != NULL; cell = cell_iterator.next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -1207,34 +1040,27 @@
       &scavenge_visitor);
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
+
   UpdateNewSpaceReferencesInExternalStringTable(
       &UpdateNewSpaceReferenceInExternalStringTableEntry);
 
-  promotion_queue_.Destroy();
-
   LiveObjectList::UpdateReferencesForScavengeGC();
-  if (!FLAG_watch_ic_patching) {
-    isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
-  }
-  incremental_marking()->UpdateMarkingDequeAfterScavenge();
+  isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
 
   ASSERT(new_space_front == new_space_.top());
 
+  is_safe_to_read_maps_ = true;
+
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
-  new_space_.LowerInlineAllocationLimit(
-      new_space_.inline_allocation_limit_step());
-
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
-      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
+      (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
 
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
   gc_state_ = NOT_IN_GC;
-
-  scavenges_since_last_idle_round_++;
 }
 
 
@@ -1255,9 +1081,7 @@
 
 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
     ExternalStringTableUpdaterCallback updater_func) {
-  if (FLAG_verify_heap) {
-    external_string_table_.Verify();
-  }
+  external_string_table_.Verify();
 
   if (external_string_table_.new_space_strings_.is_empty()) return;
 
@@ -1288,56 +1112,35 @@
 }
 
 
-void Heap::UpdateReferencesInExternalStringTable(
-    ExternalStringTableUpdaterCallback updater_func) {
-
-  // Update old space string references.
-  if (external_string_table_.old_space_strings_.length() > 0) {
-    Object** start = &external_string_table_.old_space_strings_[0];
-    Object** end = start + external_string_table_.old_space_strings_.length();
-    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
-  }
-
-  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
-}
-
-
 static Object* ProcessFunctionWeakReferences(Heap* heap,
                                              Object* function,
                                              WeakObjectRetainer* retainer) {
-  Object* undefined = heap->undefined_value();
-  Object* head = undefined;
+  Object* head = heap->undefined_value();
   JSFunction* tail = NULL;
   Object* candidate = function;
-  while (candidate != undefined) {
+  while (candidate != heap->undefined_value()) {
     // Check whether to keep the candidate in the list.
     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == undefined) {
+      if (head == heap->undefined_value()) {
         // First element in the list.
-        head = retain;
+        head = candidate_function;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
-        tail->set_next_function_link(retain);
+        tail->set_next_function_link(candidate_function);
       }
       // Retained function is new tail.
-      candidate_function = reinterpret_cast<JSFunction*>(retain);
       tail = candidate_function;
-
-      ASSERT(retain->IsUndefined() || retain->IsJSFunction());
-
-      if (retain == undefined) break;
     }
-
     // Move to next element in the list.
     candidate = candidate_function->next_function_link();
   }
 
   // Terminate the list if there is one or more elements.
   if (tail != NULL) {
-    tail->set_next_function_link(undefined);
+    tail->set_next_function_link(heap->undefined_value());
   }
 
   return head;
@@ -1345,32 +1148,28 @@
 
 
 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
-  Object* undefined = undefined_value();
-  Object* head = undefined;
+  Object* head = undefined_value();
   Context* tail = NULL;
   Object* candidate = global_contexts_list_;
-  while (candidate != undefined) {
+  while (candidate != undefined_value()) {
     // Check whether to keep the candidate in the list.
     Context* candidate_context = reinterpret_cast<Context*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == undefined) {
+      if (head == undefined_value()) {
         // First element in the list.
-        head = retain;
+        head = candidate_context;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
         tail->set_unchecked(this,
                             Context::NEXT_CONTEXT_LINK,
-                            retain,
+                            candidate_context,
                             UPDATE_WRITE_BARRIER);
       }
       // Retained context is new tail.
-      candidate_context = reinterpret_cast<Context*>(retain);
       tail = candidate_context;
 
-      if (retain == undefined) break;
-
       // Process the weak list of optimized functions for the context.
       Object* function_list_head =
           ProcessFunctionWeakReferences(
@@ -1382,7 +1181,6 @@
                                        function_list_head,
                                        UPDATE_WRITE_BARRIER);
     }
-
     // Move to next element in the list.
     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
   }
@@ -1400,28 +1198,6 @@
 }
 
 
-void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
-  AssertNoAllocation no_allocation;
-
-  class VisitorAdapter : public ObjectVisitor {
-   public:
-    explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
-        : visitor_(visitor) {}
-    virtual void VisitPointers(Object** start, Object** end) {
-      for (Object** p = start; p < end; p++) {
-        if ((*p)->IsExternalString()) {
-          visitor_->VisitExternalString(Utils::ToLocal(
-              Handle<String>(String::cast(*p))));
-        }
-      }
-    }
-   private:
-    v8::ExternalResourceVisitor* visitor_;
-  } visitor_adapter(visitor);
-  external_string_table_.Iterate(&visitor_adapter);
-}
-
-
 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
  public:
   static inline void VisitPointer(Heap* heap, Object** p) {
@@ -1436,45 +1212,35 @@
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
                          Address new_space_front) {
   do {
-    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
+    ASSERT(new_space_front <= new_space_.top());
+
     // The addresses new_space_front and new_space_.top() define a
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
-    while (new_space_front != new_space_.top()) {
-      if (!NewSpacePage::IsAtEnd(new_space_front)) {
-        HeapObject* object = HeapObject::FromAddress(new_space_front);
-        new_space_front +=
-          NewSpaceScavenger::IterateBody(object->map(), object);
-      } else {
-        new_space_front =
-            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
-      }
+    while (new_space_front < new_space_.top()) {
+      HeapObject* object = HeapObject::FromAddress(new_space_front);
+      new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
     }
 
     // Promote and process all the to-be-promoted objects.
-    {
-      StoreBufferRebuildScope scope(this,
-                                    store_buffer(),
-                                    &ScavengeStoreBufferCallback);
-      while (!promotion_queue()->is_empty()) {
-        HeapObject* target;
-        int size;
-        promotion_queue()->remove(&target, &size);
+    while (!promotion_queue_.is_empty()) {
+      HeapObject* target;
+      int size;
+      promotion_queue_.remove(&target, &size);
 
-        // Promoted object might be already partially visited
-        // during old space pointer iteration. Thus we search specificly
-        // for pointers to from semispace instead of looking for pointers
-        // to new space.
-        ASSERT(!target->IsMap());
-        IterateAndMarkPointersToFromSpace(target->address(),
-                                          target->address() + size,
-                                          &ScavengeObject);
-      }
+      // Promoted object might be already partially visited
+      // during dirty regions iteration. Thus we search specificly
+      // for pointers to from semispace instead of looking for pointers
+      // to new space.
+      ASSERT(!target->IsMap());
+      IterateAndMarkPointersToFromSpace(target->address(),
+                                        target->address() + size,
+                                        &ScavengePointer);
     }
 
     // Take another spin if there are now unswept objects in new space
     // (there are currently no more unswept promoted objects).
-  } while (new_space_front != new_space_.top());
+  } while (new_space_front < new_space_.top());
 
   return new_space_front;
 }
@@ -1486,11 +1252,26 @@
 };
 
 
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
+typedef void (*ScavengingCallback)(Map* map,
+                                   HeapObject** slot,
+                                   HeapObject* object);
 
 
-template<MarksHandling marks_handling,
-         LoggingAndProfiling logging_and_profiling_mode>
+static Atomic32 scavenging_visitors_table_mode_;
+static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+
+INLINE(static void DoScavengeObject(Map* map,
+                                    HeapObject** slot,
+                                    HeapObject* obj));
+
+
+void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+  scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+}
+
+
+template<LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1525,13 +1306,9 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                     Visit);
 
-    if (marks_handling == IGNORE_MARKS) {
-      table_.Register(kVisitJSFunction,
-                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                          template VisitSpecialized<JSFunction::kSize>);
-    } else {
-      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
-    }
+    table_.Register(kVisitJSFunction,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                        template VisitSpecialized<JSFunction::kSize>);
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
                                    kVisitDataObject,
@@ -1572,10 +1349,10 @@
   // Helper function used by CopyObject to copy a source object to an
   // allocated target object and update the forwarding pointer in the source
   // object.  Returns the target object.
-  INLINE(static void MigrateObject(Heap* heap,
-                                   HeapObject* source,
-                                   HeapObject* target,
-                                   int size)) {
+  INLINE(static HeapObject* MigrateObject(Heap* heap,
+                                          HeapObject* source,
+                                          HeapObject* target,
+                                          int size)) {
     // Copy the content of source to target.
     heap->CopyBlock(target->address(), source->address(), size);
 
@@ -1596,30 +1373,26 @@
       }
     }
 
-    if (marks_handling == TRANSFER_MARKS) {
-      if (Marking::TransferColor(source, target)) {
-        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
-      }
-    }
+    return target;
   }
 
+
   template<ObjectContents object_contents, SizeRestriction size_restriction>
   static inline void EvacuateObject(Map* map,
                                     HeapObject** slot,
                                     HeapObject* object,
                                     int object_size) {
-    SLOW_ASSERT((size_restriction != SMALL) ||
-                (object_size <= Page::kMaxNonCodeHeapObjectSize));
-    SLOW_ASSERT(object->Size() == object_size);
+    ASSERT((size_restriction != SMALL) ||
+           (object_size <= Page::kMaxHeapObjectSize));
+    ASSERT(object->Size() == object_size);
 
-    Heap* heap = map->GetHeap();
+    Heap* heap = map->heap();
     if (heap->ShouldBePromoted(object->address(), object_size)) {
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
-          (object_size > Page::kMaxNonCodeHeapObjectSize)) {
-        maybe_result = heap->lo_space()->AllocateRaw(object_size,
-                                                     NOT_EXECUTABLE);
+          (object_size > Page::kMaxHeapObjectSize)) {
+        maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
       } else {
         if (object_contents == DATA_OBJECT) {
           maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1631,12 +1404,7 @@
       Object* result = NULL;  // Initialization to please compiler.
       if (maybe_result->ToObject(&result)) {
         HeapObject* target = HeapObject::cast(result);
-
-        // Order is important: slot might be inside of the target if target
-        // was allocated over a dead object and slot comes from the store
-        // buffer.
-        *slot = target;
-        MigrateObject(heap, object, target, object_size);
+        *slot = MigrateObject(heap, object , target, object_size);
 
         if (object_contents == POINTER_OBJECT) {
           heap->promotion_queue()->insert(target, object_size);
@@ -1646,42 +1414,13 @@
         return;
       }
     }
-    MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
-    heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-    Object* result = allocation->ToObjectUnchecked();
-    HeapObject* target = HeapObject::cast(result);
-
-    // Order is important: slot might be inside of the target if target
-    // was allocated over a dead object and slot comes from the store
-    // buffer.
-    *slot = target;
-    MigrateObject(heap, object, target, object_size);
+    Object* result =
+        heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
+    *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
     return;
   }
 
 
-  static inline void EvacuateJSFunction(Map* map,
-                                        HeapObject** slot,
-                                        HeapObject* object) {
-    ObjectEvacuationStrategy<POINTER_OBJECT>::
-        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
-
-    HeapObject* target = *slot;
-    MarkBit mark_bit = Marking::MarkBitFrom(target);
-    if (Marking::IsBlack(mark_bit)) {
-      // This object is black and it might not be rescanned by marker.
-      // We should explicitly record code entry slot for compaction because
-      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
-      // miss it as it is not HeapObject-tagged.
-      Address code_entry_slot =
-          target->address() + JSFunction::kCodeEntryOffset;
-      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
-      map->GetHeap()->mark_compact_collector()->
-          RecordCodeEntrySlot(code_entry_slot, code);
-    }
-  }
-
-
   static inline void EvacuateFixedArray(Map* map,
                                         HeapObject** slot,
                                         HeapObject* object) {
@@ -1740,17 +1479,14 @@
                                                HeapObject* object) {
     ASSERT(IsShortcutCandidate(map->instance_type()));
 
-    Heap* heap = map->GetHeap();
-
-    if (marks_handling == IGNORE_MARKS &&
-        ConsString::cast(object)->unchecked_second() ==
-        heap->empty_string()) {
+    if (ConsString::cast(object)->unchecked_second() ==
+        map->heap()->empty_string()) {
       HeapObject* first =
           HeapObject::cast(ConsString::cast(object)->unchecked_first());
 
       *slot = first;
 
-      if (!heap->InNewSpace(first)) {
+      if (!map->heap()->InNewSpace(first)) {
         object->set_map_word(MapWord::FromForwardingAddress(first));
         return;
       }
@@ -1764,7 +1500,7 @@
         return;
       }
 
-      heap->DoScavengeObject(first->map(), slot, first);
+      DoScavengeObject(first->map(), slot, first);
       object->set_map_word(MapWord::FromForwardingAddress(*slot));
       return;
     }
@@ -1795,70 +1531,55 @@
 };
 
 
-template<MarksHandling marks_handling,
-         LoggingAndProfiling logging_and_profiling_mode>
+template<LoggingAndProfiling logging_and_profiling_mode>
 VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
+    ScavengingVisitor<logging_and_profiling_mode>::table_;
 
 
 static void InitializeScavengingVisitorsTables() {
-  ScavengingVisitor<TRANSFER_MARKS,
-                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<TRANSFER_MARKS,
-                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  scavenging_visitors_table_.CopyFrom(
+      ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
+  scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
 }
 
 
-void Heap::SelectScavengingVisitorsTable() {
-  bool logging_and_profiling =
-      isolate()->logger()->is_logging() ||
+void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
+  if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
+    // Table was already updated by some isolate.
+    return;
+  }
+
+  if (isolate()->logger()->is_logging() |
       CpuProfiler::is_profiling(isolate()) ||
       (isolate()->heap_profiler() != NULL &&
-       isolate()->heap_profiler()->is_profiling());
+       isolate()->heap_profiler()->is_profiling())) {
+    // If one of the isolates is doing scavenge at this moment of time
+    // it might see this table in an inconsitent state when
+    // some of the callbacks point to
+    // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
+    // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
+    // However this does not lead to any bugs as such isolate does not have
+    // profiling enabled and any isolate with enabled profiling is guaranteed
+    // to see the table in the consistent state.
+    scavenging_visitors_table_.CopyFrom(
+        ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
 
-  if (!incremental_marking()->IsMarking()) {
-    if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
-                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
-    } else {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
-                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
-    }
-  } else {
-    if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
-                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
-    } else {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
-                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
-    }
-
-    if (incremental_marking()->IsCompacting()) {
-      // When compacting forbid short-circuiting of cons-strings.
-      // Scavenging code relies on the fact that new space object
-      // can't be evacuated into evacuation candidate but
-      // short-circuiting violates this assumption.
-      scavenging_visitors_table_.Register(
-          StaticVisitorBase::kVisitShortcutCandidate,
-          scavenging_visitors_table_.GetVisitorById(
-              StaticVisitorBase::kVisitConsString));
-    }
+    // We use Release_Store to prevent reordering of this write before writes
+    // to the table.
+    Release_Store(&scavenging_visitors_table_mode_,
+                  LOGGING_AND_PROFILING_ENABLED);
   }
 }
 
 
 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
-  SLOW_ASSERT(HEAP->InFromSpace(object));
+  ASSERT(HEAP->InFromSpace(object));
   MapWord first_word = object->map_word();
-  SLOW_ASSERT(!first_word.IsForwardingAddress());
+  ASSERT(!first_word.IsForwardingAddress());
   Map* map = first_word.ToMap();
-  map->GetHeap()->DoScavengeObject(map, p, object);
+  DoScavengeObject(map, p, object);
 }
 
 
@@ -1884,31 +1605,29 @@
 }
 
 
-MaybeObject* Heap::AllocateMap(InstanceType instance_type,
-                               int instance_size,
-                               ElementsKind elements_kind) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
   Object* result;
   { MaybeObject* maybe_result = AllocateRawMap();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
   Map* map = reinterpret_cast<Map*>(result);
-  map->set_map_no_write_barrier(meta_map());
+  map->set_map(meta_map());
   map->set_instance_type(instance_type);
   map->set_visitor_id(
       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
-  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
-  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
+  map->set_prototype(null_value());
+  map->set_constructor(null_value());
   map->set_instance_size(instance_size);
   map->set_inobject_properties(0);
   map->set_pre_allocated_property_fields(0);
   map->init_instance_descriptors();
-  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  map->set_prototype_transitions(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  map->set_code_cache(empty_fixed_array());
+  map->set_prototype_transitions(empty_fixed_array());
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
-  map->set_elements_kind(elements_kind);
+  map->set_elements_kind(FAST_ELEMENTS);
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -1921,12 +1640,13 @@
 
 
 MaybeObject* Heap::AllocateCodeCache() {
-  CodeCache* code_cache;
-  { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
-    if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
+  Object* result;
+  { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
+  CodeCache* code_cache = CodeCache::cast(result);
+  code_cache->set_default_cache(empty_fixed_array());
+  code_cache->set_normal_type_cache(undefined_value());
   return code_cache;
 }
 
@@ -1936,40 +1656,6 @@
 }
 
 
-MaybeObject* Heap::AllocateAccessorPair() {
-  AccessorPair* accessors;
-  { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
-    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
-  }
-  accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
-  accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
-  return accessors;
-}
-
-
-MaybeObject* Heap::AllocateTypeFeedbackInfo() {
-  TypeFeedbackInfo* info;
-  { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
-    if (!maybe_info->To(&info)) return maybe_info;
-  }
-  info->set_ic_total_count(0);
-  info->set_ic_with_typeinfo_count(0);
-  info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
-                                SKIP_WRITE_BARRIER);
-  return info;
-}
-
-
-MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
-  AliasedArgumentsEntry* entry;
-  { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
-    if (!maybe_entry->To(&entry)) return maybe_entry;
-  }
-  entry->set_aliased_context_slot(aliased_context_slot);
-  return entry;
-}
-
-
 const Heap::StringTypeTable Heap::string_type_table[] = {
 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
   {type, size, k##camel_name##MapRootIndex},
@@ -2021,19 +1707,12 @@
   }
   set_empty_fixed_array(FixedArray::cast(obj));
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_null_value(Oddball::cast(obj));
+  set_null_value(obj);
   Oddball::cast(obj)->set_kind(Oddball::kNull);
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
-    if (!maybe_obj->ToObject(&obj)) return false;
-  }
-  set_undefined_value(Oddball::cast(obj));
-  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
-  ASSERT(!InNewSpace(undefined_value()));
-
   // Allocate the empty descriptor array.
   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2074,7 +1753,7 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_scope_info_map(Map::cast(obj));
+  set_serialized_scope_info_map(Map::cast(obj));
 
   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2119,12 +1798,6 @@
   }
   set_byte_array_map(Map::cast(obj));
 
-  { MaybeObject* maybe_obj =
-        AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
-    if (!maybe_obj->ToObject(&obj)) return false;
-  }
-  set_free_space_map(Map::cast(obj));
-
   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -2253,12 +1926,6 @@
         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_module_context_map(Map::cast(obj));
-
-  { MaybeObject* maybe_obj =
-        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
-    if (!maybe_obj->ToObject(&obj)) return false;
-  }
   Map* global_context_map = Map::cast(obj);
   global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
   set_global_context_map(global_context_map);
@@ -2283,7 +1950,7 @@
 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
 
   Object* result;
@@ -2292,7 +1959,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
+  HeapObject::cast(result)->set_map(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -2304,13 +1971,13 @@
 
   // This version of AllocateHeapNumber is optimized for
   // allocation in new space.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   Object* result;
   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
+  HeapObject::cast(result)->set_map(heap_number_map());
   HeapNumber::cast(result)->set_value(value);
   return result;
 }
@@ -2321,8 +1988,7 @@
   { MaybeObject* maybe_result = AllocateRawCell();
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  HeapObject::cast(result)->set_map_no_write_barrier(
-      global_property_cell_map());
+  HeapObject::cast(result)->set_map(global_property_cell_map());
   JSGlobalPropertyCell::cast(result)->set_value(value);
   return result;
 }
@@ -2332,7 +1998,7 @@
                                  Object* to_number,
                                  byte kind) {
   Object* result;
-  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
+  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2345,13 +2011,7 @@
   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  // Don't use Smi-only elements optimizations for objects with the neander
-  // map. There are too many cases where element values are set directly with a
-  // bottleneck to trap the Smi-only -> fast elements transition, and there
-  // appears to be no benefit for optimize this case.
-  Map* new_neander_map = Map::cast(obj);
-  new_neander_map->set_elements_kind(FAST_ELEMENTS);
-  set_neander_map(new_neander_map);
+  set_neander_map(Map::cast(obj));
 
   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2396,12 +2056,6 @@
   // To workaround the problem, make separate functions without inlining.
   Heap::CreateJSEntryStub();
   Heap::CreateJSConstructEntryStub();
-
-  // Create stubs that should be there, so we don't unexpectedly have to
-  // create them if we need them during the creation of another stub.
-  // Stub creation mixes raw pointers and handles in an unsafe manner so
-  // we cannot create stubs while we are creating stubs.
-  CodeStub::GenerateStubsAheadOfTime();
 }
 
 
@@ -2412,22 +2066,20 @@
   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_minus_zero_value(HeapNumber::cast(obj));
+  set_minus_zero_value(obj);
   ASSERT(signbit(minus_zero_value()->Number()) != 0);
 
   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_nan_value(HeapNumber::cast(obj));
+  set_nan_value(obj);
 
-  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_infinity_value(HeapNumber::cast(obj));
-
-  // The hole has not been created yet, but we want to put something
-  // predictable in the gaps in the symbol table, so lets make that Smi zero.
-  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
+  set_undefined_value(obj);
+  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+  ASSERT(!InNewSpace(undefined_value()));
 
   // Allocate initial symbol table.
   { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
@@ -2436,17 +2088,19 @@
   // Don't use set_symbol_table() due to asserts.
   roots_[kSymbolTableRootIndex] = obj;
 
-  // Finish initializing oddballs after creating symboltable.
-  { MaybeObject* maybe_obj =
-        undefined_value()->Initialize("undefined",
-                                      nan_value(),
-                                      Oddball::kUndefined);
-    if (!maybe_obj->ToObject(&obj)) return false;
+  // Assign the print strings for oddballs after creating symboltable.
+  Object* symbol;
+  { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
+    if (!maybe_symbol->ToObject(&symbol)) return false;
   }
+  Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
+  Oddball::cast(undefined_value())->set_to_number(nan_value());
 
-  // Initialize the null_value.
+  // Allocate the null_value
   { MaybeObject* maybe_obj =
-        null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
+        Oddball::cast(null_value())->Initialize("null",
+                                                Smi::FromInt(0),
+                                                Oddball::kNull);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
 
@@ -2455,28 +2109,28 @@
                                            Oddball::kTrue);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_true_value(Oddball::cast(obj));
+  set_true_value(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("false",
                                            Smi::FromInt(0),
                                            Oddball::kFalse);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_false_value(Oddball::cast(obj));
+  set_false_value(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("hole",
                                            Smi::FromInt(-1),
                                            Oddball::kTheHole);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_the_hole_value(Oddball::cast(obj));
+  set_the_hole_value(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
                                            Smi::FromInt(-4),
                                            Oddball::kArgumentMarker);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_arguments_marker(Oddball::cast(obj));
+  set_arguments_marker(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
                                            Smi::FromInt(-2),
@@ -2532,7 +2186,6 @@
   }
   set_code_stubs(UnseededNumberDictionary::cast(obj));
 
-
   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
   // is set to avoid expanding the dictionary during bootstrapping.
   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64);
@@ -2561,10 +2214,7 @@
   }
   set_intrinsic_function_names(StringDictionary::cast(obj));
 
-  { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
-    if (!maybe_obj->ToObject(&obj)) return false;
-  }
-  set_number_string_cache(FixedArray::cast(obj));
+  if (InitializeNumberStringCache()->IsFailure()) return false;
 
   // Allocate cache for single character ASCII strings.
   { MaybeObject* maybe_obj =
@@ -2663,7 +2313,7 @@
       }
     }
   }
-  array->set_map_no_write_barrier(heap->fixed_cow_array_map());
+  array->set_map(heap->fixed_cow_array_map());
 }
 
 
@@ -2674,41 +2324,17 @@
 }
 
 
-MaybeObject* Heap::AllocateInitialNumberStringCache() {
-  MaybeObject* maybe_obj =
-      AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
-  return maybe_obj;
-}
-
-
-int Heap::FullSizeNumberStringCacheLength() {
-  // Compute the size of the number string cache based on the max newspace size.
-  // The number string cache has a minimum size based on twice the initial cache
-  // size to ensure that it is bigger after being made 'full size'.
+MaybeObject* Heap::InitializeNumberStringCache() {
+  // Compute the size of the number string cache based on the max heap size.
+  // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
+  // max_semispace_size_ ==   8 MB => number_string_cache_size = 16KB.
   int number_string_cache_size = max_semispace_size_ / 512;
-  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
-                                 Min(0x4000, number_string_cache_size));
-  // There is a string and a number per entry so the length is twice the number
-  // of entries.
-  return number_string_cache_size * 2;
-}
-
-
-void Heap::AllocateFullSizeNumberStringCache() {
-  // The idea is to have a small number string cache in the snapshot to keep
-  // boot-time memory usage down.  If we expand the number string cache already
-  // while creating the snapshot then that didn't work out.
-  ASSERT(!Serializer::enabled());
+  number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
+  Object* obj;
   MaybeObject* maybe_obj =
-      AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
-  Object* new_cache;
-  if (maybe_obj->ToObject(&new_cache)) {
-    // We don't bother to repopulate the cache with entries from the old cache.
-    // It will be repopulated soon enough with new strings.
-    set_number_string_cache(FixedArray::cast(new_cache));
-  }
-  // If allocation fails then we just return without doing anything.  It is only
-  // a cache, so best effort is OK here.
+      AllocateFixedArray(number_string_cache_size * 2, TENURED);
+  if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
+  return maybe_obj;
 }
 
 
@@ -2757,17 +2383,11 @@
   int mask = (number_string_cache()->length() >> 1) - 1;
   if (number->IsSmi()) {
     hash = smi_get_hash(Smi::cast(number)) & mask;
+    number_string_cache()->set(hash * 2, Smi::cast(number));
   } else {
     hash = double_get_hash(number->Number()) & mask;
+    number_string_cache()->set(hash * 2, number);
   }
-  if (number_string_cache()->get(hash * 2) != undefined_value() &&
-      number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
-    // The first time we have a hash collision, we move to the full sized
-    // number string cache.
-    AllocateFullSizeNumberStringCache();
-    return;
-  }
-  number_string_cache()->set(hash * 2, number);
   number_string_cache()->set(hash * 2 + 1, string);
 }
 
@@ -2802,15 +2422,6 @@
 }
 
 
-MaybeObject* Heap::Uint32ToString(uint32_t value,
-                                  bool check_number_string_cache) {
-  Object* number;
-  MaybeObject* maybe = NumberFromUint32(value);
-  if (!maybe->To<Object>(&number)) return maybe;
-  return NumberToString(number, check_number_string_cache);
-}
-
-
 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
 }
@@ -2867,12 +2478,14 @@
 
 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
-  STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-  Foreign* result;
-  MaybeObject* maybe_result = Allocate(foreign_map(), space);
-  if (!maybe_result->To(&result)) return maybe_result;
-  result->set_foreign_address(address);
+  Object* result;
+  { MaybeObject* maybe_result = Allocate(foreign_map(), space);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+
+  Foreign::cast(result)->set_address(address);
   return result;
 }
 
@@ -2886,20 +2499,18 @@
   share->set_name(name);
   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
   share->set_code(illegal);
-  share->set_scope_info(ScopeInfo::Empty());
+  share->set_scope_info(SerializedScopeInfo::Empty());
   Code* construct_stub =
       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
   share->set_construct_stub(construct_stub);
   share->set_instance_class_name(Object_symbol());
-  share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
-  share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_this_property_assignments(undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_deopt_counter(FLAG_deopt_every_n_times);
-  share->set_profiler_ticks(0);
-  share->set_ast_node_count(0);
+  share->set_function_data(undefined_value());
+  share->set_script(undefined_value());
+  share->set_debug_info(undefined_value());
+  share->set_inferred_name(empty_string());
+  share->set_initial_map(undefined_value());
+  share->set_this_property_assignments(undefined_value());
+  share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
 
   // Set integer fields (smi or int, depending on the architecture).
   share->set_length(0);
@@ -2930,8 +2541,8 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   JSMessageObject* message = JSMessageObject::cast(result);
-  message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
-  message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
+  message->set_properties(Heap::empty_fixed_array());
+  message->set_elements(Heap::empty_fixed_array());
   message->set_type(type);
   message->set_arguments(arguments);
   message->set_start_position(start_position);
@@ -3022,8 +2633,8 @@
   bool is_ascii_data_in_two_byte_string = false;
   if (!is_ascii) {
     // At least one of the strings uses two-byte representation so we
-    // can't use the fast case code for short ASCII strings below, but
-    // we can try to save memory if all chars actually fit in ASCII.
+    // can't use the fast case code for short ascii strings below, but
+    // we can try to save memory if all chars actually fit in ascii.
     is_ascii_data_in_two_byte_string =
         first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
     if (is_ascii_data_in_two_byte_string) {
@@ -3032,9 +2643,9 @@
   }
 
   // If the resulting string is small make a flat string.
-  if (length < ConsString::kMinLength) {
+  if (length < String::kMinNonFlatLength) {
     // Note that neither of the two inputs can be a slice because:
-    STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
+    STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
     ASSERT(first->IsFlat());
     ASSERT(second->IsFlat());
     if (is_ascii) {
@@ -3047,14 +2658,14 @@
       // Copy first part.
       const char* src;
       if (first->IsExternalString()) {
-        src = ExternalAsciiString::cast(first)->GetChars();
+        src = ExternalAsciiString::cast(first)->resource()->data();
       } else {
         src = SeqAsciiString::cast(first)->GetChars();
       }
       for (int i = 0; i < first_length; i++) *dest++ = src[i];
       // Copy second part.
       if (second->IsExternalString()) {
-        src = ExternalAsciiString::cast(second)->GetChars();
+        src = ExternalAsciiString::cast(second)->resource()->data();
       } else {
         src = SeqAsciiString::cast(second)->GetChars();
       }
@@ -3110,7 +2721,7 @@
                                      int end,
                                      PretenureFlag pretenure) {
   int length = end - start;
-  if (length <= 0) {
+  if (length == 0) {
     return empty_string();
   } else if (length == 1) {
     return LookupSingleCharacterStringFromCode(buffer->Get(start));
@@ -3126,23 +2737,25 @@
   // Make an attempt to flatten the buffer to reduce access time.
   buffer = buffer->TryFlattenGetString();
 
+  // TODO(1626): For now slicing external strings is not supported.  However,
+  // a flat cons string can have an external string as first part in some cases.
+  // Therefore we have to single out this case as well.
   if (!FLAG_string_slices ||
-      !buffer->IsFlat() ||
+      (buffer->IsConsString() &&
+        (!buffer->IsFlat() ||
+         !ConsString::cast(buffer)->first()->IsSeqString())) ||
+      buffer->IsExternalString() ||
       length < SlicedString::kMinLength ||
       pretenure == TENURED) {
     Object* result;
-    // WriteToFlat takes care of the case when an indirect string has a
-    // different encoding from its underlying string.  These encodings may
-    // differ because of externalization.
-    bool is_ascii = buffer->IsAsciiRepresentation();
-    { MaybeObject* maybe_result = is_ascii
-                                  ? AllocateRawAsciiString(length, pretenure)
-                                  : AllocateRawTwoByteString(length, pretenure);
+    { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
+                     ? AllocateRawAsciiString(length, pretenure)
+                     : AllocateRawTwoByteString(length, pretenure);
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     String* string_result = String::cast(result);
     // Copy the characters into the new object.
-    if (is_ascii) {
+    if (buffer->IsAsciiRepresentation()) {
       ASSERT(string_result->IsAsciiRepresentation());
       char* dest = SeqAsciiString::cast(string_result)->GetChars();
       String::WriteToFlat(buffer, dest, start, end);
@@ -3155,19 +2768,12 @@
   }
 
   ASSERT(buffer->IsFlat());
+  ASSERT(!buffer->IsExternalString());
 #if DEBUG
-  if (FLAG_verify_heap) {
-    buffer->StringVerify();
-  }
+  buffer->StringVerify();
 #endif
 
   Object* result;
-  // When slicing an indirect string we use its encoding for a newly created
-  // slice and don't check the encoding of the underlying string.  This is safe
-  // even if the encodings are different because of externalization.  If an
-  // indirect ASCII string is pointing to a two-byte string, the two-byte char
-  // codes of the underlying string must still fit into ASCII (because
-  // externalization must not change char codes).
   { Map* map = buffer->IsAsciiRepresentation()
                  ? sliced_ascii_string_map()
                  : sliced_string_map();
@@ -3193,14 +2799,13 @@
     sliced_string->set_parent(buffer);
     sliced_string->set_offset(start);
   }
-  ASSERT(sliced_string->parent()->IsSeqString() ||
-         sliced_string->parent()->IsExternalString());
+  ASSERT(sliced_string->parent()->IsSeqString());
   return result;
 }
 
 
 MaybeObject* Heap::AllocateExternalStringFromAscii(
-    const ExternalAsciiString::Resource* resource) {
+    ExternalAsciiString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -3223,7 +2828,7 @@
 
 
 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
-    const ExternalTwoByteString::Resource* resource) {
+    ExternalTwoByteString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -3285,14 +2890,13 @@
   }
   int size = ByteArray::SizeFor(length);
   Object* result;
-  { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
+  { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
                    ? old_data_space_->AllocateRaw(size)
-                   : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+                   : lo_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
-      byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -3304,14 +2908,13 @@
   }
   int size = ByteArray::SizeFor(length);
   AllocationSpace space =
-      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
+      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
   Object* result;
   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
-      byte_array_map());
+  reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
   reinterpret_cast<ByteArray*>(result)->set_length(length);
   return result;
 }
@@ -3321,12 +2924,12 @@
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
-    filler->set_map_no_write_barrier(one_pointer_filler_map());
+    filler->set_map(one_pointer_filler_map());
   } else if (size == 2 * kPointerSize) {
-    filler->set_map_no_write_barrier(two_pointer_filler_map());
+    filler->set_map(two_pointer_filler_map());
   } else {
-    filler->set_map_no_write_barrier(free_space_map());
-    FreeSpace::cast(filler)->set_size(size);
+    filler->set_map(byte_array_map());
+    ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
   }
 }
 
@@ -3343,7 +2946,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
+  reinterpret_cast<ExternalArray*>(result)->set_map(
       MapForExternalArrayType(array_type));
   reinterpret_cast<ExternalArray*>(result)->set_length(length);
   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@@ -3359,9 +2962,10 @@
                               bool immovable) {
   // Allocate ByteArray before the Code object, so that we do not risk
   // leaving uninitialized Code object (and breaking the heap).
-  ByteArray* reloc_info;
-  MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
-  if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
+  Object* reloc_info;
+  { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
+    if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
+  }
 
   // Compute size.
   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
@@ -3370,8 +2974,8 @@
   MaybeObject* maybe_result;
   // Large code objects and code objects which should stay at a fixed address
   // are allocated in large object space.
-  if (obj_size > code_space()->AreaSize() || immovable) {
-    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+  if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
+    maybe_result = lo_space_->AllocateRawCode(obj_size);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3380,21 +2984,18 @@
   if (!maybe_result->ToObject(&result)) return maybe_result;
 
   // Initialize the object
-  HeapObject::cast(result)->set_map_no_write_barrier(code_map());
+  HeapObject::cast(result)->set_map(code_map());
   Code* code = Code::cast(result);
   ASSERT(!isolate_->code_range()->exists() ||
       isolate_->code_range()->contains(code->address()));
   code->set_instruction_size(desc.instr_size);
-  code->set_relocation_info(reloc_info);
+  code->set_relocation_info(ByteArray::cast(reloc_info));
   code->set_flags(flags);
   if (code->is_call_stub() || code->is_keyed_call_stub()) {
     code->set_check_type(RECEIVER_MAP_CHECK);
   }
-  code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
-  code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  code->set_gc_metadata(Smi::FromInt(0));
-  code->set_ic_age(global_ic_age_);
+  code->set_deoptimization_data(empty_fixed_array());
+  code->set_next_code_flushing_candidate(undefined_value());
   // Allow self references to created code object by patching the handle to
   // point to the newly allocated Code object.
   if (!self_reference.is_null()) {
@@ -3408,9 +3009,7 @@
   code->CopyFrom(desc);
 
 #ifdef DEBUG
-  if (FLAG_verify_heap) {
-    code->Verify();
-  }
+  code->Verify();
 #endif
   return code;
 }
@@ -3420,8 +3019,8 @@
   // Allocate an object the same size as the code object.
   int obj_size = code->Size();
   MaybeObject* maybe_result;
-  if (obj_size > code_space()->AreaSize()) {
-    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
+  if (obj_size > MaxObjectSizeInPagedSpace()) {
+    maybe_result = lo_space_->AllocateRawCode(obj_size);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3463,8 +3062,8 @@
       static_cast<size_t>(code->instruction_end() - old_addr);
 
   MaybeObject* maybe_result;
-  if (new_obj_size > code_space()->AreaSize()) {
-    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
+  if (new_obj_size > MaxObjectSizeInPagedSpace()) {
+    maybe_result = lo_space_->AllocateRawCode(new_obj_size);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
   }
@@ -3490,9 +3089,7 @@
   new_code->Relocate(new_addr - old_addr);
 
 #ifdef DEBUG
-  if (FLAG_verify_heap) {
-    code->Verify();
-  }
+  code->Verify();
 #endif
   return new_code;
 }
@@ -3510,15 +3107,14 @@
         AllocateRaw(map->instance_size(), space, retry_space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  // No need for write barrier since object is white and map is in old space.
-  HeapObject::cast(result)->set_map_no_write_barrier(map);
+  HeapObject::cast(result)->set_map(map);
   return result;
 }
 
 
-void Heap::InitializeFunction(JSFunction* function,
-                              SharedFunctionInfo* shared,
-                              Object* prototype) {
+MaybeObject* Heap::InitializeFunction(JSFunction* function,
+                                      SharedFunctionInfo* shared,
+                                      Object* prototype) {
   ASSERT(!prototype->IsMap());
   function->initialize_properties();
   function->initialize_elements();
@@ -3526,8 +3122,9 @@
   function->set_code(shared->code());
   function->set_prototype_or_initial_map(prototype);
   function->set_context(undefined_value());
-  function->set_literals_or_bindings(empty_fixed_array());
+  function->set_literals(empty_fixed_array());
   function->set_next_function_link(undefined_value());
+  return function;
 }
 
 
@@ -3537,18 +3134,8 @@
   // different context.
   JSFunction* object_function =
       function->context()->global_context()->object_function();
-
-  // Each function prototype gets a copy of the object function map.
-  // This avoid unwanted sharing of maps between prototypes of different
-  // constructors.
-  Map* new_map;
-  ASSERT(object_function->has_initial_map());
-  { MaybeObject* maybe_map =
-        object_function->initial_map()->CopyDropTransitions();
-    if (!maybe_map->To<Map>(&new_map)) return maybe_map;
-  }
   Object* prototype;
-  { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
+  { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
   }
   // When creating the prototype for the function we must set its
@@ -3573,8 +3160,7 @@
   { MaybeObject* maybe_result = Allocate(function_map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  InitializeFunction(JSFunction::cast(result), shared, prototype);
-  return result;
+  return InitializeFunction(JSFunction::cast(result), shared, prototype);
 }
 
 
@@ -3585,7 +3171,7 @@
   JSObject* boilerplate;
   int arguments_object_size;
   bool strict_mode_callee = callee->IsJSFunction() &&
-      !JSFunction::cast(callee)->shared()->is_classic_mode();
+                            JSFunction::cast(callee)->shared()->strict_mode();
   if (strict_mode_callee) {
     boilerplate =
         isolate()->context()->global_context()->
@@ -3691,22 +3277,22 @@
       // Inline constructor can only handle inobject properties.
       fun->shared()->ForbidInlineConstructor();
     } else {
-      DescriptorArray* descriptors;
+      Object* descriptors_obj;
       { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
-        if (!maybe_descriptors_obj->To<DescriptorArray>(&descriptors)) {
+        if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
           return maybe_descriptors_obj;
         }
       }
-      DescriptorArray::WhitenessWitness witness(descriptors);
+      DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
       for (int i = 0; i < count; i++) {
         String* name = fun->shared()->GetThisPropertyAssignmentName(i);
         ASSERT(name->IsSymbol());
         FieldDescriptor field(name, i, NONE);
         field.SetEnumerationIndex(i);
-        descriptors->Set(i, &field, witness);
+        descriptors->Set(i, &field);
       }
       descriptors->SetNextEnumerationIndex(count);
-      descriptors->SortUnchecked(witness);
+      descriptors->SortUnchecked();
 
       // The descriptors may contain duplicates because the compiler does not
       // guarantee the uniqueness of property names (it would have required
@@ -3736,17 +3322,14 @@
   // TODO(1240798): Initialize the object's body using valid initial values
   // according to the object's initial map.  For example, if the map's
   // instance type is JS_ARRAY_TYPE, the length field should be initialized
-  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
-  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
+  // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
+  // fixed array (eg, Heap::empty_fixed_array()).  Currently, the object
   // verification code has to cope with (temporarily) invalid objects.  See
   // for example, JSArray::JSArrayVerify).
   Object* filler;
   // We cannot always fill with one_pointer_filler_map because objects
   // created from API functions expect their internal fields to be initialized
   // with undefined_value.
-  // Pre-allocated fields need to be initialized with undefined_value as well
-  // so that object accesses before the constructor completes (e.g. in the
-  // debugger) will not cause a crash.
   if (map->constructor()->IsJSFunction() &&
       JSFunction::cast(map->constructor())->shared()->
           IsInobjectSlackTrackingInProgress()) {
@@ -3756,7 +3339,7 @@
   } else {
     filler = Heap::undefined_value();
   }
-  obj->InitializeBody(map, Heap::undefined_value(), filler);
+  obj->InitializeBody(map->instance_size(), filler);
 }
 
 
@@ -3784,7 +3367,7 @@
   // Allocate the JSObject.
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
-  if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+  if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
   Object* obj;
   { MaybeObject* maybe_obj = Allocate(map, space);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -3794,8 +3377,7 @@
   InitializeJSObjectFromMap(JSObject::cast(obj),
                             FixedArray::cast(properties),
                             map);
-  ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
-         JSObject::cast(obj)->HasFastElements());
+  ASSERT(JSObject::cast(obj)->HasFastElements());
   return obj;
 }
 
@@ -3812,8 +3394,8 @@
     Map::cast(initial_map)->set_constructor(constructor);
   }
   // Allocate the object based on the constructors initial map.
-  MaybeObject* result = AllocateJSObjectFromMap(
-      constructor->initial_map(), pretenure);
+  MaybeObject* result =
+      AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
 #ifdef DEBUG
   // Make sure result is NOT a global object if valid.
   Object* non_failure;
@@ -3823,64 +3405,6 @@
 }
 
 
-MaybeObject* Heap::AllocateJSArrayAndStorage(
-    ElementsKind elements_kind,
-    int length,
-    int capacity,
-    ArrayStorageAllocationMode mode,
-    PretenureFlag pretenure) {
-  ASSERT(capacity >= length);
-  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
-  JSArray* array;
-  if (!maybe_array->To(&array)) return maybe_array;
-
-  if (capacity == 0) {
-    array->set_length(Smi::FromInt(0));
-    array->set_elements(empty_fixed_array());
-    return array;
-  }
-
-  FixedArrayBase* elms;
-  MaybeObject* maybe_elms = NULL;
-  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
-      maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
-    } else {
-      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
-      maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
-    }
-  } else {
-    ASSERT(elements_kind == FAST_ELEMENTS ||
-           elements_kind == FAST_SMI_ONLY_ELEMENTS);
-    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
-      maybe_elms = AllocateUninitializedFixedArray(capacity);
-    } else {
-      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
-      maybe_elms = AllocateFixedArrayWithHoles(capacity);
-    }
-  }
-  if (!maybe_elms->To(&elms)) return maybe_elms;
-
-  array->set_elements(elms);
-  array->set_length(Smi::FromInt(length));
-  return array;
-}
-
-
-MaybeObject* Heap::AllocateJSArrayWithElements(
-    FixedArrayBase* elements,
-    ElementsKind elements_kind,
-    PretenureFlag pretenure) {
-  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
-  JSArray* array;
-  if (!maybe_array->To(&array)) return maybe_array;
-
-  array->set_elements(elements);
-  array->set_length(Smi::FromInt(elements->length()));
-  return array;
-}
-
-
 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
   // Allocate map.
   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
@@ -3896,7 +3420,6 @@
   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
-  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   return result;
 }
 
@@ -3920,7 +3443,6 @@
   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
-  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
   result->set_call_trap(call_trap);
   result->set_construct_trap(construct_trap);
   return result;
@@ -3988,7 +3510,7 @@
   }
   Map* new_map = Map::cast(obj);
 
-  // Set up the global object as a normalized object.
+  // Setup the global object as a normalized object.
   global->set_map(new_map);
   global->map()->clear_instance_descriptors();
   global->set_properties(dictionary);
@@ -4003,15 +3525,13 @@
 MaybeObject* Heap::CopyJSObject(JSObject* source) {
   // Never used to copy functions.  If functions need to be copied we
   // have to be careful to clear the literals array.
-  SLOW_ASSERT(!source->IsJSFunction());
+  ASSERT(!source->IsJSFunction());
 
   // Make the clone.
   Map* map = source->map();
   int object_size = map->instance_size();
   Object* clone;
 
-  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
   // If we're forced to always allocate, we use the general allocation
   // functions which may leave us with an object in old space.
   if (always_allocate()) {
@@ -4028,11 +3548,10 @@
                  JSObject::kHeaderSize,
                  (object_size - JSObject::kHeaderSize) / kPointerSize);
   } else {
-    wb_mode = SKIP_WRITE_BARRIER;
     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
     }
-    SLOW_ASSERT(InNewSpace(clone));
+    ASSERT(InNewSpace(clone));
     // Since we know the clone is allocated in new space, we can copy
     // the contents without worrying about updating the write barrier.
     CopyBlock(HeapObject::cast(clone)->address(),
@@ -4040,8 +3559,6 @@
               object_size);
   }
 
-  SLOW_ASSERT(
-      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
@@ -4057,7 +3574,7 @@
       }
       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
     }
-    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
+    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
   }
   // Update properties if necessary.
   if (properties->length() > 0) {
@@ -4065,7 +3582,7 @@
     { MaybeObject* maybe_prop = CopyFixedArray(properties);
       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
     }
-    JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
+    JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
   }
   // Return the new clone.
   return clone;
@@ -4074,13 +3591,13 @@
 
 MaybeObject* Heap::ReinitializeJSReceiver(
     JSReceiver* object, InstanceType type, int size) {
-  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
+  ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
 
   // Allocate fresh map.
   // TODO(rossberg): Once we optimize proxies, cache these maps.
   Map* map;
-  MaybeObject* maybe = AllocateMap(type, size);
-  if (!maybe->To<Map>(&map)) return maybe;
+  MaybeObject* maybe_map_obj = AllocateMap(type, size);
+  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
 
   // Check that the receiver has at least the size of the fresh object.
   int size_difference = object->map()->instance_size() - map->instance_size();
@@ -4091,35 +3608,30 @@
   // Allocate the backing storage for the properties.
   int prop_size = map->unused_property_fields() - map->inobject_properties();
   Object* properties;
-  maybe = AllocateFixedArray(prop_size, TENURED);
-  if (!maybe->ToObject(&properties)) return maybe;
-
-  // Functions require some allocation, which might fail here.
-  SharedFunctionInfo* shared = NULL;
-  if (type == JS_FUNCTION_TYPE) {
-    String* name;
-    maybe = LookupAsciiSymbol("<freezing call trap>");
-    if (!maybe->To<String>(&name)) return maybe;
-    maybe = AllocateSharedFunctionInfo(name);
-    if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
+  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
+    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
   }
 
-  // Because of possible retries of this function after failure,
-  // we must NOT fail after this point, where we have changed the type!
-
   // Reset the map for the object.
   object->set_map(map);
-  JSObject* jsobj = JSObject::cast(object);
 
   // Reinitialize the object from the constructor map.
-  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
+  InitializeJSObjectFromMap(JSObject::cast(object),
+                            FixedArray::cast(properties), map);
 
   // Functions require some minimal initialization.
   if (type == JS_FUNCTION_TYPE) {
-    map->set_function_with_prototype(true);
-    InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
-    JSFunction::cast(object)->set_context(
-        isolate()->context()->global_context());
+    String* name;
+    MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
+    if (!maybe_name->To<String>(&name)) return maybe_name;
+    SharedFunctionInfo* shared;
+    MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
+    if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
+    JSFunction* func;
+    MaybeObject* maybe_func =
+        InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+    if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
+    func->set_context(isolate()->context()->global_context());
   }
 
   // Put in filler if the new object is smaller than the old.
@@ -4180,6 +3692,8 @@
 
 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
                                               PretenureFlag pretenure) {
+  // V8 only supports characters in the Basic Multilingual Plane.
+  const uc32 kMaxSupportedChar = 0xFFFF;
   // Count the number of characters in the UTF-8 string and check if
   // it is an ASCII string.
   Access<UnicodeCache::Utf8Decoder>
@@ -4187,12 +3701,8 @@
   decoder->Reset(string.start(), string.length());
   int chars = 0;
   while (decoder->has_more()) {
-    uint32_t r = decoder->GetNext();
-    if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      chars++;
-    } else {
-      chars += 2;
-    }
+    decoder->GetNext();
+    chars++;
   }
 
   Object* result;
@@ -4203,15 +3713,10 @@
   // Convert and copy the characters into the new object.
   String* string_result = String::cast(result);
   decoder->Reset(string.start(), string.length());
-  int i = 0;
-  while (i < chars) {
-    uint32_t r = decoder->GetNext();
-    if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      string_result->Set(i++, unibrow::Utf16::LeadSurrogate(r));
-      string_result->Set(i++, unibrow::Utf16::TrailSurrogate(r));
-    } else {
-      string_result->Set(i++, r);
-    }
+  for (int i = 0; i < chars; i++) {
+    uc32 r = decoder->GetNext();
+    if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
+    string_result->Set(i, r);
   }
   return result;
 }
@@ -4244,22 +3749,31 @@
   if (InNewSpace(string)) return NULL;
 
   // Find the corresponding symbol map for strings.
-  switch (string->map()->instance_type()) {
-    case STRING_TYPE: return symbol_map();
-    case ASCII_STRING_TYPE: return ascii_symbol_map();
-    case CONS_STRING_TYPE: return cons_symbol_map();
-    case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
-    case EXTERNAL_STRING_TYPE: return external_symbol_map();
-    case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
-    case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
-      return external_symbol_with_ascii_data_map();
-    case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
-    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
-      return short_external_ascii_symbol_map();
-    case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
-      return short_external_symbol_with_ascii_data_map();
-    default: return NULL;  // No match found.
+  Map* map = string->map();
+  if (map == ascii_string_map()) {
+    return ascii_symbol_map();
   }
+  if (map == string_map()) {
+    return symbol_map();
+  }
+  if (map == cons_string_map()) {
+    return cons_symbol_map();
+  }
+  if (map == cons_ascii_string_map()) {
+    return cons_ascii_symbol_map();
+  }
+  if (map == external_string_map()) {
+    return external_symbol_map();
+  }
+  if (map == external_ascii_string_map()) {
+    return external_ascii_symbol_map();
+  }
+  if (map == external_string_with_ascii_data_map()) {
+    return external_symbol_with_ascii_data_map();
+  }
+
+  // No match found.
+  return NULL;
 }
 
 
@@ -4268,8 +3782,8 @@
                                           uint32_t hash_field) {
   ASSERT(chars >= 0);
   // Ensure the chars matches the number of characters in the buffer.
-  ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
-  // Determine whether the string is ASCII.
+  ASSERT(static_cast<unsigned>(chars) == buffer->Length());
+  // Determine whether the string is ascii.
   bool is_ascii = true;
   while (buffer->has_more()) {
     if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
@@ -4299,13 +3813,13 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
-                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
+  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+                   ? lo_space_->AllocateRaw(size)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
+  reinterpret_cast<HeapObject*>(result)->set_map(map);
   // Set length and hash fields of the allocated string.
   String* answer = String::cast(result);
   answer->set_length(chars);
@@ -4314,15 +3828,8 @@
   ASSERT_EQ(size, answer->Size());
 
   // Fill in the characters.
-  int i = 0;
-  while (i < chars) {
-    uint32_t character = buffer->GetNext();
-    if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
-      answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
-    } else {
-      answer->Set(i++, character);
-    }
+  for (int i = 0; i < chars; i++) {
+    answer->Set(i, buffer->GetNext());
   }
   return answer;
 }
@@ -4343,12 +3850,11 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
+    } else if (size > MaxObjectSizeInPagedSpace()) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
+  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
     space = LO_SPACE;
   }
   Object* result;
@@ -4357,7 +3863,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
+  HeapObject::cast(result)->set_map(ascii_string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4379,12 +3885,11 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
+    } else if (size > MaxObjectSizeInPagedSpace()) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
+  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
     space = LO_SPACE;
   }
   Object* result;
@@ -4393,7 +3898,7 @@
   }
 
   // Partially initialize the object.
-  HeapObject::cast(result)->set_map_no_write_barrier(string_map());
+  HeapObject::cast(result)->set_map(string_map());
   String::cast(result)->set_length(length);
   String::cast(result)->set_hash_field(String::kEmptyHashField);
   ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4401,25 +3906,6 @@
 }
 
 
-MaybeObject* Heap::AllocateJSArray(
-    ElementsKind elements_kind,
-    PretenureFlag pretenure) {
-  Context* global_context = isolate()->context()->global_context();
-  JSFunction* array_function = global_context->array_function();
-  Map* map = array_function->initial_map();
-  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-    map = Map::cast(global_context->double_js_array_map());
-  } else if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
-    map = Map::cast(global_context->object_js_array_map());
-  } else {
-    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
-    ASSERT(map == global_context->smi_js_array_map());
-  }
-
-  return AllocateJSObjectFromMap(map, pretenure);
-}
-
-
 MaybeObject* Heap::AllocateEmptyFixedArray() {
   int size = FixedArray::SizeFor(0);
   Object* result;
@@ -4428,8 +3914,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
-      fixed_array_map());
+  reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
   reinterpret_cast<FixedArray*>(result)->set_length(0);
   return result;
 }
@@ -4446,7 +3931,7 @@
   int size = FixedArray::SizeFor(length);
   return size <= kMaxObjectSizeInNewSpace
       ? new_space_.AllocateRaw(size)
-      : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
+      : lo_space_->AllocateRawFixedArray(size);
 }
 
 
@@ -4458,13 +3943,13 @@
   }
   if (InNewSpace(obj)) {
     HeapObject* dst = HeapObject::cast(obj);
-    dst->set_map_no_write_barrier(map);
+    dst->set_map(map);
     CopyBlock(dst->address() + kPointerSize,
               src->address() + kPointerSize,
               FixedArray::SizeFor(len) - kPointerSize);
     return obj;
   }
-  HeapObject::cast(obj)->set_map_no_write_barrier(map);
+  HeapObject::cast(obj)->set_map(map);
   FixedArray* result = FixedArray::cast(obj);
   result->set_length(len);
 
@@ -4484,7 +3969,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   HeapObject* dst = HeapObject::cast(obj);
-  dst->set_map_no_write_barrier(map);
+  dst->set_map(map);
   CopyBlock(
       dst->address() + FixedDoubleArray::kLengthOffset,
       src->address() + FixedDoubleArray::kLengthOffset,
@@ -4502,7 +3987,7 @@
   }
   // Initialize header.
   FixedArray* array = reinterpret_cast<FixedArray*>(result);
-  array->set_map_no_write_barrier(fixed_array_map());
+  array->set_map(fixed_array_map());
   array->set_length(length);
   // Initialize body.
   ASSERT(!InNewSpace(undefined_value()));
@@ -4523,13 +4008,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_POINTER_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
+             size > MaxObjectSizeInPagedSpace()) {
     // Too big for old pointer space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
+      (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4550,7 +4035,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
+  HeapObject::cast(result)->set_map(heap->fixed_array_map());
   FixedArray* array = FixedArray::cast(result);
   array->set_length(length);
   MemsetPointer(array->data_start(), filler, length);
@@ -4583,8 +4068,7 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
-      fixed_array_map());
+  reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
   FixedArray::cast(obj)->set_length(length);
   return obj;
 }
@@ -4598,7 +4082,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   // Initialize the object.
-  reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
+  reinterpret_cast<FixedDoubleArray*>(result)->set_map(
       fixed_double_array_map());
   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
   return result;
@@ -4608,38 +4092,16 @@
 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
     int length,
     PretenureFlag pretenure) {
-  if (length == 0) return empty_fixed_array();
+  if (length == 0) return empty_fixed_double_array();
 
-  Object* elements_object;
-  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
-  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
-  FixedDoubleArray* elements =
-      reinterpret_cast<FixedDoubleArray*>(elements_object);
-
-  elements->set_map_no_write_barrier(fixed_double_array_map());
-  elements->set_length(length);
-  return elements;
-}
-
-
-MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
-    int length,
-    PretenureFlag pretenure) {
-  if (length == 0) return empty_fixed_array();
-
-  Object* elements_object;
-  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
-  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
-  FixedDoubleArray* elements =
-      reinterpret_cast<FixedDoubleArray*>(elements_object);
-
-  for (int i = 0; i < length; ++i) {
-    elements->set_the_hole(i);
+  Object* obj;
+  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
-  elements->set_map_no_write_barrier(fixed_double_array_map());
-  elements->set_length(length);
-  return elements;
+  reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
+  FixedDoubleArray::cast(obj)->set_length(length);
+  return obj;
 }
 
 
@@ -4656,13 +4118,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_DATA_SPACE &&
-             size > Page::kMaxNonCodeHeapObjectSize) {
+             size > MaxObjectSizeInPagedSpace()) {
     // Too big for old data space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
+      (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4673,8 +4135,7 @@
   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
-      hash_table_map());
+  reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
   ASSERT(result->IsHashTable());
   return result;
 }
@@ -4687,10 +4148,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_no_write_barrier(global_context_map());
-  context->set_smi_js_array_map(undefined_value());
-  context->set_double_js_array_map(undefined_value());
-  context->set_object_js_array_map(undefined_value());
+  context->set_map(global_context_map());
   ASSERT(context->IsGlobalContext());
   ASSERT(result->IsContext());
   return result;
@@ -4704,7 +4162,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_no_write_barrier(function_context_map());
+  context->set_map(function_context_map());
   context->set_closure(function);
   context->set_previous(function->context());
   context->set_extension(NULL);
@@ -4724,7 +4182,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_no_write_barrier(catch_context_map());
+  context->set_map(catch_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(name);
@@ -4742,7 +4200,7 @@
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_no_write_barrier(with_context_map());
+  context->set_map(with_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(extension);
@@ -4753,14 +4211,14 @@
 
 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
                                         Context* previous,
-                                        ScopeInfo* scope_info) {
+                                        SerializedScopeInfo* scope_info) {
   Object* result;
   { MaybeObject* maybe_result =
-        AllocateFixedArrayWithHoles(scope_info->ContextLength());
+        AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   Context* context = reinterpret_cast<Context*>(result);
-  context->set_map_no_write_barrier(block_context_map());
+  context->set_map(block_context_map());
   context->set_closure(function);
   context->set_previous(previous);
   context->set_extension(scope_info);
@@ -4769,11 +4227,14 @@
 }
 
 
-MaybeObject* Heap::AllocateScopeInfo(int length) {
-  FixedArray* scope_info;
-  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
-  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
-  scope_info->set_map_no_write_barrier(scope_info_map());
+MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
+  Object* result;
+  { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  SerializedScopeInfo* scope_info =
+      reinterpret_cast<SerializedScopeInfo*>(result);
+  scope_info->set_map(serialized_scope_info_map());
   return scope_info;
 }
 
@@ -4791,7 +4252,7 @@
   }
   int size = map->instance_size();
   AllocationSpace space =
-      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
+      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
   Object* result;
   { MaybeObject* maybe_result = Allocate(map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4801,127 +4262,7 @@
 }
 
 
-bool Heap::IsHeapIterable() {
-  return (!old_pointer_space()->was_swept_conservatively() &&
-          !old_data_space()->was_swept_conservatively());
-}
-
-
-void Heap::EnsureHeapIsIterable() {
-  ASSERT(IsAllocationAllowed());
-  if (!IsHeapIterable()) {
-    CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
-  }
-  ASSERT(IsHeapIterable());
-}
-
-
-void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
-  // This flag prevents incremental marking from requesting GC via stack guard
-  idle_notification_will_schedule_next_gc_ = true;
-  incremental_marking()->Step(step_size);
-  idle_notification_will_schedule_next_gc_ = false;
-
-  if (incremental_marking()->IsComplete()) {
-    bool uncommit = false;
-    if (gc_count_at_last_idle_gc_ == gc_count_) {
-      // No GC since the last full GC, the mutator is probably not active.
-      isolate_->compilation_cache()->Clear();
-      uncommit = true;
-    }
-    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
-    gc_count_at_last_idle_gc_ = gc_count_;
-    if (uncommit) {
-      new_space_.Shrink();
-      UncommitFromSpace();
-    }
-  }
-}
-
-
-bool Heap::IdleNotification(int hint) {
-  const int kMaxHint = 1000;
-  intptr_t size_factor = Min(Max(hint, 30), kMaxHint) / 10;
-  // The size factor is in range [3..100].
-  intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
-
-  if (contexts_disposed_ > 0) {
-    if (hint >= kMaxHint) {
-      // The embedder is requesting a lot of GC work after context disposal,
-      // we age inline caches so that they don't keep objects from
-      // the old context alive.
-      AgeInlineCaches();
-    }
-    int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
-    if (hint >= mark_sweep_time && !FLAG_expose_gc &&
-        incremental_marking()->IsStopped()) {
-      HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(kReduceMemoryFootprintMask,
-                        "idle notification: contexts disposed");
-    } else {
-      AdvanceIdleIncrementalMarking(step_size);
-      contexts_disposed_ = 0;
-    }
-    // Make sure that we have no pending context disposals.
-    // Take into account that we might have decided to delay full collection
-    // because incremental marking is in progress.
-    ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
-    return false;
-  }
-
-  if (hint >= kMaxHint || !FLAG_incremental_marking ||
-      FLAG_expose_gc || Serializer::enabled()) {
-    return IdleGlobalGC();
-  }
-
-  // By doing small chunks of GC work in each IdleNotification,
-  // perform a round of incremental GCs and after that wait until
-  // the mutator creates enough garbage to justify a new round.
-  // An incremental GC progresses as follows:
-  // 1. many incremental marking steps,
-  // 2. one old space mark-sweep-compact,
-  // 3. many lazy sweep steps.
-  // Use mark-sweep-compact events to count incremental GCs in a round.
-
-
-  if (incremental_marking()->IsStopped()) {
-    if (!IsSweepingComplete() &&
-        !AdvanceSweepers(static_cast<int>(step_size))) {
-      return false;
-    }
-  }
-
-  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
-    if (EnoughGarbageSinceLastIdleRound()) {
-      StartIdleRound();
-    } else {
-      return true;
-    }
-  }
-
-  int new_mark_sweeps = ms_count_ - ms_count_at_last_idle_notification_;
-  mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
-  ms_count_at_last_idle_notification_ = ms_count_;
-
-  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
-    FinishIdleRound();
-    return true;
-  }
-
-  if (incremental_marking()->IsStopped()) {
-    if (!WorthStartingGCWhenIdle()) {
-      FinishIdleRound();
-      return true;
-    }
-    incremental_marking()->Start();
-  }
-
-  AdvanceIdleIncrementalMarking(step_size);
-  return false;
-}
-
-
-bool Heap::IdleGlobalGC() {
+bool Heap::IdleNotification() {
   static const int kIdlesBeforeScavenge = 4;
   static const int kIdlesBeforeMarkSweep = 7;
   static const int kIdlesBeforeMarkCompact = 8;
@@ -4949,7 +4290,12 @@
   }
 
   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
-    CollectGarbage(NEW_SPACE, "idle notification");
+    if (contexts_disposed_ > 0) {
+      HistogramTimerScope scope(isolate_->counters()->gc_context());
+      CollectAllGarbage(false);
+    } else {
+      CollectGarbage(NEW_SPACE);
+    }
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
@@ -4958,16 +4304,32 @@
     // generated code for cached functions.
     isolate_->compilation_cache()->Clear();
 
-    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
+    CollectAllGarbage(false);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
 
   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
-    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
+    CollectAllGarbage(true);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
     number_idle_notifications_ = 0;
     finished = true;
+  } else if (contexts_disposed_ > 0) {
+    if (FLAG_expose_gc) {
+      contexts_disposed_ = 0;
+    } else {
+      HistogramTimerScope scope(isolate_->counters()->gc_context());
+      CollectAllGarbage(false);
+      last_idle_notification_gc_count_ = gc_count_;
+    }
+    // If this is the first idle notification, we reset the
+    // notification count to avoid letting idle notifications for
+    // context disposal garbage collections start a potentially too
+    // aggressive idle GC cycle.
+    if (number_idle_notifications_ <= 1) {
+      number_idle_notifications_ = 0;
+      uncommit = false;
+    }
   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
     // If we have received more than kIdlesBeforeMarkCompact idle
     // notifications we do not perform any cleanup because we don't
@@ -4975,8 +4337,10 @@
     finished = true;
   }
 
+  // Make sure that we have no pending context disposals and
+  // conditionally uncommit from space.
+  ASSERT(contexts_disposed_ == 0);
   if (uncommit) UncommitFromSpace();
-
   return finished;
 }
 
@@ -4984,7 +4348,7 @@
 #ifdef DEBUG
 
 void Heap::Print() {
-  if (!HasBeenSetUp()) return;
+  if (!HasBeenSetup()) return;
   isolate()->PrintStack();
   AllSpaces spaces;
   for (Space* space = spaces.next(); space != NULL; space = spaces.next())
@@ -5010,11 +4374,11 @@
   USE(title);
   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
          title, gc_count_);
+  PrintF("mark-compact GC : %d\n", mc_count_);
   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_promotion_limit_);
   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_allocation_limit_);
-  PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
 
   PrintF("\n");
   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -5049,7 +4413,7 @@
 
 bool Heap::Contains(Address addr) {
   if (OS::IsOutsideAllocatedSpace(addr)) return false;
-  return HasBeenSetUp() &&
+  return HasBeenSetup() &&
     (new_space_.ToSpaceContains(addr) ||
      old_pointer_space_->Contains(addr) ||
      old_data_space_->Contains(addr) ||
@@ -5067,7 +4431,7 @@
 
 bool Heap::InSpace(Address addr, AllocationSpace space) {
   if (OS::IsOutsideAllocatedSpace(addr)) return false;
-  if (!HasBeenSetUp()) return false;
+  if (!HasBeenSetup()) return false;
 
   switch (space) {
     case NEW_SPACE:
@@ -5091,18 +4455,69 @@
 
 
 #ifdef DEBUG
-void Heap::Verify() {
-  ASSERT(HasBeenSetUp());
+static void DummyScavengePointer(HeapObject** p) {
+}
 
-  store_buffer()->Verify();
+
+static void VerifyPointersUnderWatermark(
+    PagedSpace* space,
+    DirtyRegionCallback visit_dirty_region) {
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+  while (it.has_next()) {
+    Page* page = it.next();
+    Address start = page->ObjectAreaStart();
+    Address end = page->AllocationWatermark();
+
+    HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+                              start,
+                              end,
+                              visit_dirty_region,
+                              &DummyScavengePointer);
+  }
+}
+
+
+static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
+  LargeObjectIterator it(space);
+  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+    if (object->IsFixedArray()) {
+      Address slot_address = object->address();
+      Address end = object->address() + object->Size();
+
+      while (slot_address < end) {
+        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+        // When we are not in GC the Heap::InNewSpace() predicate
+        // checks that pointers which satisfy predicate point into
+        // the active semispace.
+        HEAP->InNewSpace(*slot);
+        slot_address += kPointerSize;
+      }
+    }
+  }
+}
+
+
+void Heap::Verify() {
+  ASSERT(HasBeenSetup());
 
   VerifyPointersVisitor visitor;
   IterateRoots(&visitor, VISIT_ONLY_STRONG);
 
   new_space_.Verify();
 
-  old_pointer_space_->Verify(&visitor);
-  map_space_->Verify(&visitor);
+  VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
+  old_pointer_space_->Verify(&dirty_regions_visitor);
+  map_space_->Verify(&dirty_regions_visitor);
+
+  VerifyPointersUnderWatermark(old_pointer_space_,
+                               &IteratePointersInDirtyRegion);
+  VerifyPointersUnderWatermark(map_space_,
+                               &IteratePointersInDirtyMapsRegion);
+  VerifyPointersUnderWatermark(lo_space_);
+
+  VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
+  VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
 
   VerifyPointersVisitor no_dirty_regions_visitor;
   old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -5110,36 +4525,6 @@
   cell_space_->Verify(&no_dirty_regions_visitor);
 
   lo_space_->Verify();
-
-  VerifyNoAccessorPairSharing();
-}
-
-
-void Heap::VerifyNoAccessorPairSharing() {
-  // Verification is done in 2 phases: First we mark all AccessorPairs, checking
-  // that we mark only unmarked pairs, then we clear all marks, restoring the
-  // initial state. We use the Smi tag of the AccessorPair's getter as the
-  // marking bit, because we can never see a Smi as the getter.
-  for (int phase = 0; phase < 2; phase++) {
-    HeapObjectIterator iter(map_space());
-    for (HeapObject* obj = iter.Next(); obj != NULL; obj = iter.Next()) {
-      if (obj->IsMap()) {
-        DescriptorArray* descs = Map::cast(obj)->instance_descriptors();
-        for (int i = 0; i < descs->number_of_descriptors(); i++) {
-          if (descs->GetType(i) == CALLBACKS &&
-              descs->GetValue(i)->IsAccessorPair()) {
-            AccessorPair* accessors = AccessorPair::cast(descs->GetValue(i));
-            uintptr_t before = reinterpret_cast<intptr_t>(accessors->getter());
-            uintptr_t after = (phase == 0) ?
-                ((before & ~kSmiTagMask) | kSmiTag) :
-                ((before & ~kHeapObjectTag) | kHeapObjectTag);
-            CHECK(before != after);
-            accessors->set_getter(reinterpret_cast<Object*>(after));
-          }
-        }
-      }
-    }
-  }
 }
 #endif  // DEBUG
 
@@ -5236,221 +4621,275 @@
 
 #ifdef DEBUG
 void Heap::ZapFromSpace() {
-  NewSpacePageIterator it(new_space_.FromSpaceStart(),
-                          new_space_.FromSpaceEnd());
-  while (it.has_next()) {
-    NewSpacePage* page = it.next();
-    for (Address cursor = page->area_start(), limit = page->area_end();
-         cursor < limit;
-         cursor += kPointerSize) {
-      Memory::Address_at(cursor) = kFromSpaceZapValue;
-    }
+  ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
+  for (Address a = new_space_.FromSpaceLow();
+       a < new_space_.FromSpaceHigh();
+       a += kPointerSize) {
+    Memory::Address_at(a) = kFromSpaceZapValue;
   }
 }
 #endif  // DEBUG
 
 
+bool Heap::IteratePointersInDirtyRegion(Heap* heap,
+                                        Address start,
+                                        Address end,
+                                        ObjectSlotCallback copy_object_func) {
+  Address slot_address = start;
+  bool pointers_to_new_space_found = false;
+
+  while (slot_address < end) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    if (heap->InNewSpace(*slot)) {
+      ASSERT((*slot)->IsHeapObject());
+      copy_object_func(reinterpret_cast<HeapObject**>(slot));
+      if (heap->InNewSpace(*slot)) {
+        ASSERT((*slot)->IsHeapObject());
+        pointers_to_new_space_found = true;
+      }
+    }
+    slot_address += kPointerSize;
+  }
+  return pointers_to_new_space_found;
+}
+
+
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+  Address page = Page::FromAddress(addr)->ObjectAreaStart();
+  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
+
+
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+  return page + ((addr - page) / Map::kSize * Map::kSize);
+}
+
+
+static bool IteratePointersInDirtyMaps(Address start,
+                                       Address end,
+                                       ObjectSlotCallback copy_object_func) {
+  ASSERT(MapStartAlign(start) == start);
+  ASSERT(MapEndAlign(end) == end);
+
+  Address map_address = start;
+  bool pointers_to_new_space_found = false;
+
+  Heap* heap = HEAP;
+  while (map_address < end) {
+    ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
+    ASSERT(Memory::Object_at(map_address)->IsMap());
+
+    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+    if (Heap::IteratePointersInDirtyRegion(heap,
+                                           pointer_fields_start,
+                                           pointer_fields_end,
+                                           copy_object_func)) {
+      pointers_to_new_space_found = true;
+    }
+
+    map_address += Map::kSize;
+  }
+
+  return pointers_to_new_space_found;
+}
+
+
+bool Heap::IteratePointersInDirtyMapsRegion(
+    Heap* heap,
+    Address start,
+    Address end,
+    ObjectSlotCallback copy_object_func) {
+  Address map_aligned_start = MapStartAlign(start);
+  Address map_aligned_end   = MapEndAlign(end);
+
+  bool contains_pointers_to_new_space = false;
+
+  if (map_aligned_start != start) {
+    Address prev_map = map_aligned_start - Map::kSize;
+    ASSERT(Memory::Object_at(prev_map)->IsMap());
+
+    Address pointer_fields_start =
+        Max(start, prev_map + Map::kPointerFieldsBeginOffset);
+
+    Address pointer_fields_end =
+        Min(prev_map + Map::kPointerFieldsEndOffset, end);
+
+    contains_pointers_to_new_space =
+      IteratePointersInDirtyRegion(heap,
+                                   pointer_fields_start,
+                                   pointer_fields_end,
+                                   copy_object_func)
+        || contains_pointers_to_new_space;
+  }
+
+  contains_pointers_to_new_space =
+    IteratePointersInDirtyMaps(map_aligned_start,
+                               map_aligned_end,
+                               copy_object_func)
+      || contains_pointers_to_new_space;
+
+  if (map_aligned_end != end) {
+    ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
+
+    Address pointer_fields_start =
+        map_aligned_end + Map::kPointerFieldsBeginOffset;
+
+    Address pointer_fields_end =
+        Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
+
+    contains_pointers_to_new_space =
+      IteratePointersInDirtyRegion(heap,
+                                   pointer_fields_start,
+                                   pointer_fields_end,
+                                   copy_object_func)
+        || contains_pointers_to_new_space;
+  }
+
+  return contains_pointers_to_new_space;
+}
+
+
 void Heap::IterateAndMarkPointersToFromSpace(Address start,
                                              Address end,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
+  Page* page = Page::FromAddress(start);
 
-  // We are not collecting slots on new space objects during mutation
-  // thus we have to scan for pointers to evacuation candidates when we
-  // promote objects. But we should not record any slots in non-black
-  // objects. Grey object's slots would be rescanned.
-  // White object might not survive until the end of collection
-  // it would be a violation of the invariant to record it's slots.
-  bool record_slots = false;
-  if (incremental_marking()->IsCompacting()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
-    record_slots = Marking::IsBlack(mark_bit);
-  }
+  uint32_t marks = page->GetRegionMarks();
 
   while (slot_address < end) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* object = *slot;
-    // If the store buffer becomes overfull we mark pages as being exempt from
-    // the store buffer.  These pages are scanned to find pointers that point
-    // to the new space.  In that case we may hit newly promoted objects and
-    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
-    if (object->IsHeapObject()) {
-      if (Heap::InFromSpace(object)) {
-        callback(reinterpret_cast<HeapObject**>(slot),
-                 HeapObject::cast(object));
-        Object* new_object = *slot;
-        if (InNewSpace(new_object)) {
-          SLOW_ASSERT(Heap::InToSpace(new_object));
-          SLOW_ASSERT(new_object->IsHeapObject());
-          store_buffer_.EnterDirectlyIntoStoreBuffer(
-              reinterpret_cast<Address>(slot));
-        }
-        SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
-      } else if (record_slots &&
-                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
-        mark_compact_collector()->RecordSlot(slot, slot, object);
+    if (InFromSpace(*slot)) {
+      ASSERT((*slot)->IsHeapObject());
+      callback(reinterpret_cast<HeapObject**>(slot));
+      if (InNewSpace(*slot)) {
+        ASSERT((*slot)->IsHeapObject());
+        marks |= page->GetRegionMaskForAddress(slot_address);
       }
     }
     slot_address += kPointerSize;
   }
+
+  page->SetRegionMarks(marks);
 }
 
 
-#ifdef DEBUG
-typedef bool (*CheckStoreBufferFilter)(Object** addr);
+uint32_t Heap::IterateDirtyRegions(
+    uint32_t marks,
+    Address area_start,
+    Address area_end,
+    DirtyRegionCallback visit_dirty_region,
+    ObjectSlotCallback copy_object_func) {
+  uint32_t newmarks = 0;
+  uint32_t mask = 1;
 
+  if (area_start >= area_end) {
+    return newmarks;
+  }
 
-bool IsAMapPointerAddress(Object** addr) {
-  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
-  int mod = a % Map::kSize;
-  return mod >= Map::kPointerFieldsBeginOffset &&
-         mod < Map::kPointerFieldsEndOffset;
-}
+  Address region_start = area_start;
 
+  // area_start does not necessarily coincide with start of the first region.
+  // Thus to calculate the beginning of the next region we have to align
+  // area_start by Page::kRegionSize.
+  Address second_region =
+      reinterpret_cast<Address>(
+          reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
+          ~Page::kRegionAlignmentMask);
 
-bool EverythingsAPointer(Object** addr) {
-  return true;
-}
+  // Next region might be beyond area_end.
+  Address region_end = Min(second_region, area_end);
 
-
-static void CheckStoreBuffer(Heap* heap,
-                             Object** current,
-                             Object** limit,
-                             Object**** store_buffer_position,
-                             Object*** store_buffer_top,
-                             CheckStoreBufferFilter filter,
-                             Address special_garbage_start,
-                             Address special_garbage_end) {
-  Map* free_space_map = heap->free_space_map();
-  for ( ; current < limit; current++) {
-    Object* o = *current;
-    Address current_address = reinterpret_cast<Address>(current);
-    // Skip free space.
-    if (o == free_space_map) {
-      Address current_address = reinterpret_cast<Address>(current);
-      FreeSpace* free_space =
-          FreeSpace::cast(HeapObject::FromAddress(current_address));
-      int skip = free_space->Size();
-      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
-      ASSERT(skip > 0);
-      current_address += skip - kPointerSize;
-      current = reinterpret_cast<Object**>(current_address);
-      continue;
-    }
-    // Skip the current linear allocation space between top and limit which is
-    // unmarked with the free space map, but can contain junk.
-    if (current_address == special_garbage_start &&
-        special_garbage_end != special_garbage_start) {
-      current_address = special_garbage_end - kPointerSize;
-      current = reinterpret_cast<Object**>(current_address);
-      continue;
-    }
-    if (!(*filter)(current)) continue;
-    ASSERT(current_address < special_garbage_start ||
-           current_address >= special_garbage_end);
-    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
-    // We have to check that the pointer does not point into new space
-    // without trying to cast it to a heap object since the hash field of
-    // a string can contain values like 1 and 3 which are tagged null
-    // pointers.
-    if (!heap->InNewSpace(o)) continue;
-    while (**store_buffer_position < current &&
-           *store_buffer_position < store_buffer_top) {
-      (*store_buffer_position)++;
-    }
-    if (**store_buffer_position != current ||
-        *store_buffer_position == store_buffer_top) {
-      Object** obj_start = current;
-      while (!(*obj_start)->IsMap()) obj_start--;
-      UNREACHABLE();
+  if (marks & mask) {
+    if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
+      newmarks |= mask;
     }
   }
-}
+  mask <<= 1;
 
+  // Iterate subsequent regions which fully lay inside [area_start, area_end[.
+  region_start = region_end;
+  region_end = region_start + Page::kRegionSize;
 
-// Check that the store buffer contains all intergenerational pointers by
-// scanning a page and ensuring that all pointers to young space are in the
-// store buffer.
-void Heap::OldPointerSpaceCheckStoreBuffer() {
-  OldSpace* space = old_pointer_space();
-  PageIterator pages(space);
+  while (region_end <= area_end) {
+    if (marks & mask) {
+      if (visit_dirty_region(this,
+                             region_start,
+                             region_end,
+                             copy_object_func)) {
+        newmarks |= mask;
+      }
+    }
 
-  store_buffer()->SortUniq();
+    region_start = region_end;
+    region_end = region_start + Page::kRegionSize;
 
-  while (pages.has_next()) {
-    Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->area_start());
-
-    Address end = page->area_end();
-
-    Object*** store_buffer_position = store_buffer()->Start();
-    Object*** store_buffer_top = store_buffer()->Top();
-
-    Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this,
-                     current,
-                     limit,
-                     &store_buffer_position,
-                     store_buffer_top,
-                     &EverythingsAPointer,
-                     space->top(),
-                     space->limit());
+    mask <<= 1;
   }
-}
 
-
-void Heap::MapSpaceCheckStoreBuffer() {
-  MapSpace* space = map_space();
-  PageIterator pages(space);
-
-  store_buffer()->SortUniq();
-
-  while (pages.has_next()) {
-    Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->area_start());
-
-    Address end = page->area_end();
-
-    Object*** store_buffer_position = store_buffer()->Start();
-    Object*** store_buffer_top = store_buffer()->Top();
-
-    Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this,
-                     current,
-                     limit,
-                     &store_buffer_position,
-                     store_buffer_top,
-                     &IsAMapPointerAddress,
-                     space->top(),
-                     space->limit());
-  }
-}
-
-
-void Heap::LargeObjectSpaceCheckStoreBuffer() {
-  LargeObjectIterator it(lo_space());
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    // We only have code, sequential strings, or fixed arrays in large
-    // object space, and only fixed arrays can possibly contain pointers to
-    // the young generation.
-    if (object->IsFixedArray()) {
-      Object*** store_buffer_position = store_buffer()->Start();
-      Object*** store_buffer_top = store_buffer()->Top();
-      Object** current = reinterpret_cast<Object**>(object->address());
-      Object** limit =
-          reinterpret_cast<Object**>(object->address() + object->Size());
-      CheckStoreBuffer(this,
-                       current,
-                       limit,
-                       &store_buffer_position,
-                       store_buffer_top,
-                       &EverythingsAPointer,
-                       NULL,
-                       NULL);
+  if (region_start != area_end) {
+    // A small piece of area left uniterated because area_end does not coincide
+    // with region end. Check whether region covering last part of area is
+    // dirty.
+    if (marks & mask) {
+      if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
+        newmarks |= mask;
+      }
     }
   }
+
+  return newmarks;
 }
-#endif
+
+
+
+void Heap::IterateDirtyRegions(
+    PagedSpace* space,
+    DirtyRegionCallback visit_dirty_region,
+    ObjectSlotCallback copy_object_func,
+    ExpectedPageWatermarkState expected_page_watermark_state) {
+
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+  while (it.has_next()) {
+    Page* page = it.next();
+    uint32_t marks = page->GetRegionMarks();
+
+    if (marks != Page::kAllRegionsCleanMarks) {
+      Address start = page->ObjectAreaStart();
+
+      // Do not try to visit pointers beyond page allocation watermark.
+      // Page can contain garbage pointers there.
+      Address end;
+
+      if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
+          page->IsWatermarkValid()) {
+        end = page->AllocationWatermark();
+      } else {
+        end = page->CachedAllocationWatermark();
+      }
+
+      ASSERT(space == old_pointer_space_ ||
+             (space == map_space_ &&
+              ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
+
+      page->SetRegionMarks(IterateDirtyRegions(marks,
+                                               start,
+                                               end,
+                                               visit_dirty_region,
+                                               copy_object_func));
+    }
+
+    // Mark page watermark as invalid to maintain watermark validity invariant.
+    // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
+    page->InvalidateWatermark(true);
+  }
+}
 
 
 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
@@ -5461,29 +4900,29 @@
 
 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
-  v->Synchronize(VisitorSynchronization::kSymbolTable);
+  v->Synchronize("symbol_table");
   if (mode != VISIT_ALL_IN_SCAVENGE &&
       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
     external_string_table_.Iterate(v);
   }
-  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
+  v->Synchronize("external_string_table");
 }
 
 
 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
-  v->Synchronize(VisitorSynchronization::kStrongRootList);
+  v->Synchronize("strong_root_list");
 
   v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
-  v->Synchronize(VisitorSynchronization::kSymbol);
+  v->Synchronize("symbol");
 
   isolate_->bootstrapper()->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kBootstrapper);
+  v->Synchronize("bootstrapper");
   isolate_->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kTop);
+  v->Synchronize("top");
   Relocatable::Iterate(v);
-  v->Synchronize(VisitorSynchronization::kRelocatable);
+  v->Synchronize("relocatable");
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   isolate_->debug()->Iterate(v);
@@ -5491,21 +4930,22 @@
     isolate_->deoptimizer_data()->Iterate(v);
   }
 #endif
-  v->Synchronize(VisitorSynchronization::kDebug);
+  v->Synchronize("debug");
   isolate_->compilation_cache()->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kCompilationCache);
+  v->Synchronize("compilationcache");
 
   // Iterate over local handles in handle scopes.
   isolate_->handle_scope_implementer()->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kHandleScope);
+  v->Synchronize("handlescope");
 
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
   // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE &&
+      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
-  v->Synchronize(VisitorSynchronization::kBuiltins);
+  v->Synchronize("builtins");
 
   // Iterate over global handles.
   switch (mode) {
@@ -5520,11 +4960,11 @@
       isolate_->global_handles()->IterateAllRoots(v);
       break;
   }
-  v->Synchronize(VisitorSynchronization::kGlobalHandles);
+  v->Synchronize("globalhandles");
 
   // Iterate over pointers being held by inactive threads.
   isolate_->thread_manager()->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kThreadManager);
+  v->Synchronize("threadmanager");
 
   // Iterate over the pointers the Serialization/Deserialization code is
   // holding.
@@ -5546,20 +4986,11 @@
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
 bool Heap::ConfigureHeap(int max_semispace_size,
-                         intptr_t max_old_gen_size,
-                         intptr_t max_executable_size) {
-  if (HasBeenSetUp()) return false;
+                         int max_old_gen_size,
+                         int max_executable_size) {
+  if (HasBeenSetup()) return false;
 
-  if (max_semispace_size > 0) {
-    if (max_semispace_size < Page::kPageSize) {
-      max_semispace_size = Page::kPageSize;
-      if (FLAG_trace_gc) {
-        PrintF("Max semispace size cannot be less than %dkbytes\n",
-               Page::kPageSize >> 10);
-      }
-    }
-    max_semispace_size_ = max_semispace_size;
-  }
+  if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
 
   if (Snapshot::IsEnabled()) {
     // If we are using a snapshot we always reserve the default amount
@@ -5569,10 +5000,6 @@
     // than the default reserved semispace size.
     if (max_semispace_size_ > reserved_semispace_size_) {
       max_semispace_size_ = reserved_semispace_size_;
-      if (FLAG_trace_gc) {
-        PrintF("Max semispace size cannot be more than %dkbytes\n",
-               reserved_semispace_size_ >> 10);
-      }
     }
   } else {
     // If we are not using snapshots we reserve space for the actual
@@ -5598,12 +5025,8 @@
   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
   external_allocation_limit_ = 10 * max_semispace_size_;
 
-  // The old generation is paged and needs at least one page for each space.
-  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
-  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
-                                                       Page::kPageSize),
-                                 RoundUp(max_old_generation_size_,
-                                         Page::kPageSize));
+  // The old generation is paged.
+  max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
 
   configured_ = true;
   return true;
@@ -5611,9 +5034,9 @@
 
 
 bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
-                       static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
-                       static_cast<intptr_t>(FLAG_max_executable_size) * MB);
+  return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
+                       FLAG_max_old_space_size * MB,
+                       FLAG_max_executable_size * MB);
 }
 
 
@@ -5622,15 +5045,15 @@
   *stats->end_marker = HeapStats::kEndMarker;
   *stats->new_space_size = new_space_.SizeAsInt();
   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
-  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
+  *stats->old_pointer_space_size = old_pointer_space_->Size();
   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
-  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
+  *stats->old_data_space_size = old_data_space_->Size();
   *stats->old_data_space_capacity = old_data_space_->Capacity();
-  *stats->code_space_size = code_space_->SizeOfObjects();
+  *stats->code_space_size = code_space_->Size();
   *stats->code_space_capacity = code_space_->Capacity();
-  *stats->map_space_size = map_space_->SizeOfObjects();
+  *stats->map_space_size = map_space_->Size();
   *stats->map_space_capacity = map_space_->Capacity();
-  *stats->cell_space_size = cell_space_->SizeOfObjects();
+  *stats->cell_space_size = cell_space_->Size();
   *stats->cell_space_capacity = cell_space_->Capacity();
   *stats->lo_space_size = lo_space_->Size();
   isolate_->global_handles()->RecordStats(stats);
@@ -5641,7 +5064,7 @@
   *stats->os_error = OS::GetLastError();
       isolate()->memory_allocator()->Available();
   if (take_snapshot) {
-    HeapIterator iterator;
+    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
@@ -5664,16 +5087,6 @@
 }
 
 
-intptr_t Heap::PromotedSpaceSizeOfObjects() {
-  return old_pointer_space_->SizeOfObjects()
-      + old_data_space_->SizeOfObjects()
-      + code_space_->SizeOfObjects()
-      + map_space_->SizeOfObjects()
-      + cell_space_->SizeOfObjects()
-      + lo_space_->SizeOfObjects();
-}
-
-
 int Heap::PromotedExternalMemorySize() {
   if (amount_of_external_allocated_memory_
       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
@@ -5734,7 +5147,7 @@
 
     Address map_addr = map_p->address();
 
-    obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
+    obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
 
     MarkObjectRecursively(&map);
 
@@ -5781,7 +5194,7 @@
 
     HeapObject* map_p = HeapObject::FromAddress(map_addr);
 
-    obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
+    obj->set_map(reinterpret_cast<Map*>(map_p));
 
     UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
 
@@ -5847,9 +5260,8 @@
 
 #endif
 
-bool Heap::SetUp(bool create_heap_objects) {
+bool Heap::Setup(bool create_heap_objects) {
 #ifdef DEBUG
-  allocation_timeout_ = FLAG_gc_interval;
   debug_utils_ = new HeapDebugUtils(this);
 #endif
 
@@ -5857,7 +5269,7 @@
   // goes wrong, just return false. The caller should check the results and
   // call Heap::TearDown() to release allocated memory.
   //
-  // If the heap is not yet configured (e.g. through the API), configure it.
+  // If the heap is not yet configured (eg, through the API), configure it.
   // Configuration is based on the flags new-space-size (really the semispace
   // size) and old-space-size if set or the initial values of semispace_size_
   // and old_generation_size_ otherwise.
@@ -5865,24 +5277,34 @@
     if (!ConfigureHeapDefault()) return false;
   }
 
-  gc_initializer_mutex.Pointer()->Lock();
+  gc_initializer_mutex->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
-      initialized_gc = true;
-      InitializeScavengingVisitorsTables();
-      NewSpaceScavenger::Initialize();
-      MarkCompactCollector::Initialize();
+    initialized_gc = true;
+    InitializeScavengingVisitorsTables();
+    NewSpaceScavenger::Initialize();
+    MarkCompactCollector::Initialize();
   }
-  gc_initializer_mutex.Pointer()->Unlock();
+  gc_initializer_mutex->Unlock();
 
   MarkMapPointersAsEncoded(false);
 
-  // Set up memory allocator.
-  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
+  // Setup memory allocator and reserve a chunk of memory for new
+  // space.  The chunk is double the size of the requested reserved
+  // new space size to ensure that we can find a pair of semispaces that
+  // are contiguous and aligned to their size.
+  if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
       return false;
+  void* chunk =
+      isolate_->memory_allocator()->ReserveInitialChunk(
+          4 * reserved_semispace_size_);
+  if (chunk == NULL) return false;
 
-  // Set up new space.
-  if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
+  // Align the pair of semispaces to their size, which must be a power
+  // of 2.
+  Address new_space_start =
+      RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
+  if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
     return false;
   }
 
@@ -5893,7 +5315,7 @@
                    OLD_POINTER_SPACE,
                    NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
-  if (!old_pointer_space_->SetUp()) return false;
+  if (!old_pointer_space_->Setup(NULL, 0)) return false;
 
   // Initialize old data space.
   old_data_space_ =
@@ -5902,14 +5324,14 @@
                    OLD_DATA_SPACE,
                    NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
-  if (!old_data_space_->SetUp()) return false;
+  if (!old_data_space_->Setup(NULL, 0)) return false;
 
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
   // On 64-bit platform(s), we put all code objects in a 2 GB range of
   // virtual address space, so that they can call each other with near calls.
   if (code_range_size_ > 0) {
-    if (!isolate_->code_range()->SetUp(code_range_size_)) {
+    if (!isolate_->code_range()->Setup(code_range_size_)) {
       return false;
     }
   }
@@ -5917,26 +5339,30 @@
   code_space_ =
       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
-  if (!code_space_->SetUp()) return false;
+  if (!code_space_->Setup(NULL, 0)) return false;
 
   // Initialize map space.
-  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
+  map_space_ = new MapSpace(this, FLAG_use_big_map_space
+      ? max_old_generation_size_
+      : MapSpace::kMaxMapPageIndex * Page::kPageSize,
+      FLAG_max_map_space_pages,
+      MAP_SPACE);
   if (map_space_ == NULL) return false;
-  if (!map_space_->SetUp()) return false;
+  if (!map_space_->Setup(NULL, 0)) return false;
 
   // Initialize global property cell space.
   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
   if (cell_space_ == NULL) return false;
-  if (!cell_space_->SetUp()) return false;
+  if (!cell_space_->Setup(NULL, 0)) return false;
 
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
   // explicitly when allocating large code objects.
-  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
+  lo_space_ = new LargeObjectSpace(this, LO_SPACE);
   if (lo_space_ == NULL) return false;
-  if (!lo_space_->SetUp()) return false;
+  if (!lo_space_->Setup()) return false;
 
-  // Set up the seed that is used to randomize the string hash function.
+  // Setup the seed that is used to randomize the string hash function.
   ASSERT(hash_seed() == 0);
   if (FLAG_randomize_hashes) {
     if (FLAG_hash_seed == 0) {
@@ -5961,8 +5387,6 @@
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
-  store_buffer()->SetUp();
-
   return true;
 }
 
@@ -5989,6 +5413,7 @@
     PrintF("\n\n");
     PrintF("gc_count=%d ", gc_count_);
     PrintF("mark_sweep_count=%d ", ms_count_);
+    PrintF("mark_compact_count=%d ", mc_count_);
     PrintF("max_gc_pause=%d ", get_max_gc_pause());
     PrintF("min_in_mutator=%d ", get_min_in_mutator());
     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -6038,9 +5463,6 @@
     lo_space_ = NULL;
   }
 
-  store_buffer()->TearDown();
-  incremental_marking()->TearDown();
-
   isolate_->memory_allocator()->TearDown();
 
 #ifdef DEBUG
@@ -6053,11 +5475,8 @@
 void Heap::Shrink() {
   // Try to shrink all paged spaces.
   PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->ReleaseAllUnusedPages();
-  }
+  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+    space->Shrink();
 }
 
 
@@ -6260,54 +5679,98 @@
 };
 
 
-class UnreachableObjectsFilter : public HeapObjectsFilter {
+class FreeListNodesFilter : public HeapObjectsFilter {
  public:
-  UnreachableObjectsFilter() {
-    MarkReachableObjects();
-  }
-
-  ~UnreachableObjectsFilter() {
-    Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
+  FreeListNodesFilter() {
+    MarkFreeListNodes();
   }
 
   bool SkipObject(HeapObject* object) {
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    return !mark_bit.Get();
+    if (object->IsMarked()) {
+      object->ClearMark();
+      return true;
+    } else {
+      return false;
+    }
   }
 
  private:
-  class MarkingVisitor : public ObjectVisitor {
+  void MarkFreeListNodes() {
+    Heap* heap = HEAP;
+    heap->old_pointer_space()->MarkFreeListNodes();
+    heap->old_data_space()->MarkFreeListNodes();
+    MarkCodeSpaceFreeListNodes(heap);
+    heap->map_space()->MarkFreeListNodes();
+    heap->cell_space()->MarkFreeListNodes();
+  }
+
+  void MarkCodeSpaceFreeListNodes(Heap* heap) {
+    // For code space, using FreeListNode::IsFreeListNode is OK.
+    HeapObjectIterator iter(heap->code_space());
+    for (HeapObject* obj = iter.next_object();
+         obj != NULL;
+         obj = iter.next_object()) {
+      if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
+    }
+  }
+
+  AssertNoAllocation no_alloc;
+};
+
+
+class UnreachableObjectsFilter : public HeapObjectsFilter {
+ public:
+  UnreachableObjectsFilter() {
+    MarkUnreachableObjects();
+  }
+
+  bool SkipObject(HeapObject* object) {
+    if (object->IsMarked()) {
+      object->ClearMark();
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+ private:
+  class UnmarkingVisitor : public ObjectVisitor {
    public:
-    MarkingVisitor() : marking_stack_(10) {}
+    UnmarkingVisitor() : list_(10) {}
 
     void VisitPointers(Object** start, Object** end) {
       for (Object** p = start; p < end; p++) {
         if (!(*p)->IsHeapObject()) continue;
         HeapObject* obj = HeapObject::cast(*p);
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        if (!mark_bit.Get()) {
-          mark_bit.Set();
-          marking_stack_.Add(obj);
+        if (obj->IsMarked()) {
+          obj->ClearMark();
+          list_.Add(obj);
         }
       }
     }
 
-    void TransitiveClosure() {
-      while (!marking_stack_.is_empty()) {
-        HeapObject* obj = marking_stack_.RemoveLast();
-        obj->Iterate(this);
-      }
+    bool can_process() { return !list_.is_empty(); }
+
+    void ProcessNext() {
+      HeapObject* obj = list_.RemoveLast();
+      obj->Iterate(this);
     }
 
    private:
-    List<HeapObject*> marking_stack_;
+    List<HeapObject*> list_;
   };
 
-  void MarkReachableObjects() {
-    Heap* heap = Isolate::Current()->heap();
-    MarkingVisitor visitor;
-    heap->IterateRoots(&visitor, VISIT_ALL);
-    visitor.TransitiveClosure();
+  void MarkUnreachableObjects() {
+    HeapIterator iterator;
+    for (HeapObject* obj = iterator.next();
+         obj != NULL;
+         obj = iterator.next()) {
+      obj->SetMark();
+    }
+    UnmarkingVisitor visitor;
+    HEAP->IterateRoots(&visitor, VISIT_ALL);
+    while (visitor.can_process())
+      visitor.ProcessNext();
   }
 
   AssertNoAllocation no_alloc;
@@ -6335,8 +5798,12 @@
 
 void HeapIterator::Init() {
   // Start the iteration.
-  space_iterator_ = new SpaceIterator;
+  space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
+      new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
   switch (filtering_) {
+    case kFilterFreeListNodes:
+      filter_ = new FreeListNodesFilter;
+      break;
     case kFilterUnreachable:
       filter_ = new UnreachableObjectsFilter;
       break;
@@ -6472,11 +5939,6 @@
 }
 
 
-static bool SafeIsGlobalContext(HeapObject* obj) {
-  return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
-}
-
-
 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   if (!(*p)->IsHeapObject()) return;
 
@@ -6495,14 +5957,14 @@
     return;
   }
 
-  bool is_global_context = SafeIsGlobalContext(obj);
+  bool is_global_context = obj->IsGlobalContext();
 
   // not visited yet
   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
 
   Address map_addr = map_p->address();
 
-  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
+  obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
 
   // Scan the object body.
   if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
@@ -6544,7 +6006,7 @@
 
   HeapObject* map_p = HeapObject::FromAddress(map_addr);
 
-  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
+  obj->set_map(reinterpret_cast<Map*>(map_p));
 
   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
 
@@ -6603,30 +6065,31 @@
   for (OldSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
-    holes_size += space->Waste() + space->Available();
+    holes_size += space->Waste() + space->AvailableFree();
   }
   return holes_size;
 }
 
 
-GCTracer::GCTracer(Heap* heap,
-                   const char* gc_reason,
-                   const char* collector_reason)
+GCTracer::GCTracer(Heap* heap)
     : start_time_(0.0),
-      start_object_size_(0),
-      start_memory_size_(0),
+      start_size_(0),
       gc_count_(0),
       full_gc_count_(0),
+      is_compacting_(false),
+      marked_count_(0),
       allocated_since_last_gc_(0),
       spent_in_mutator_(0),
       promoted_objects_size_(0),
-      heap_(heap),
-      gc_reason_(gc_reason),
-      collector_reason_(collector_reason) {
+      heap_(heap) {
+  // These two fields reflect the state of the previous full collection.
+  // Set them before they are changed by the collector.
+  previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
+  previous_marked_count_ =
+      heap_->mark_compact_collector_.previous_marked_count();
   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   start_time_ = OS::TimeCurrentMillis();
-  start_object_size_ = heap_->SizeOfObjects();
-  start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
+  start_size_ = heap_->SizeOfObjects();
 
   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
     scopes_[i] = 0;
@@ -6640,14 +6103,6 @@
   if (heap_->last_gc_end_timestamp_ > 0) {
     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
   }
-
-  steps_count_ = heap_->incremental_marking()->steps_count();
-  steps_took_ = heap_->incremental_marking()->steps_took();
-  longest_step_ = heap_->incremental_marking()->longest_step();
-  steps_count_since_last_gc_ =
-      heap_->incremental_marking()->steps_count_since_last_gc();
-  steps_took_since_last_gc_ =
-      heap_->incremental_marking()->steps_took_since_last_gc();
 }
 
 
@@ -6673,46 +6128,16 @@
     }
   }
 
-  PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
-
   if (!FLAG_trace_gc_nvp) {
     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
 
-    double end_memory_size_mb =
-        static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
-
-    PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
+    PrintF("%s %.1f -> %.1f MB, ",
            CollectorString(),
-           static_cast<double>(start_object_size_) / MB,
-           static_cast<double>(start_memory_size_) / MB,
-           SizeOfHeapObjects(),
-           end_memory_size_mb);
+           static_cast<double>(start_size_) / MB,
+           SizeOfHeapObjects());
 
     if (external_time > 0) PrintF("%d / ", external_time);
-    PrintF("%d ms", time);
-    if (steps_count_ > 0) {
-      if (collector_ == SCAVENGER) {
-        PrintF(" (+ %d ms in %d steps since last GC)",
-               static_cast<int>(steps_took_since_last_gc_),
-               steps_count_since_last_gc_);
-      } else {
-        PrintF(" (+ %d ms in %d steps since start of marking, "
-                   "biggest step %f ms)",
-               static_cast<int>(steps_took_),
-               steps_count_,
-               longest_step_);
-      }
-    }
-
-    if (gc_reason_ != NULL) {
-      PrintF(" [%s]", gc_reason_);
-    }
-
-    if (collector_reason_ != NULL) {
-      PrintF(" [%s]", collector_reason_);
-    }
-
-    PrintF(".\n");
+    PrintF("%d ms.\n", time);
   } else {
     PrintF("pause=%d ", time);
     PrintF("mutator=%d ",
@@ -6724,7 +6149,8 @@
         PrintF("s");
         break;
       case MARK_COMPACTOR:
-        PrintF("ms");
+        PrintF("%s",
+               heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
         break;
       default:
         UNREACHABLE();
@@ -6735,21 +6161,9 @@
     PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
     PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
     PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
-    PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
-    PrintF("new_new=%d ",
-           static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
-    PrintF("root_new=%d ",
-           static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
-    PrintF("old_new=%d ",
-           static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
-    PrintF("compaction_ptrs=%d ",
-           static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
-    PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
-        Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
-    PrintF("misc_compaction=%d ",
-           static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
+    PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
 
-    PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
+    PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
            in_free_list_or_wasted_before_gc_);
@@ -6758,14 +6172,6 @@
     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
 
-    if (collector_ == SCAVENGER) {
-      PrintF("stepscount=%d ", steps_count_since_last_gc_);
-      PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
-    } else {
-      PrintF("stepscount=%d ", steps_count_);
-      PrintF("stepstook=%d ", static_cast<int>(steps_took_));
-    }
-
     PrintF("\n");
   }
 
@@ -6778,7 +6184,8 @@
     case SCAVENGER:
       return "Scavenge";
     case MARK_COMPACTOR:
-      return "Mark-sweep";
+      return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
+                                                           : "Mark-sweep";
   }
   return "Unknown GC";
 }
@@ -6793,12 +6200,10 @@
 
 
 int KeyedLookupCache::Lookup(Map* map, String* name) {
-  int index = (Hash(map, name) & kHashMask);
-  for (int i = 0; i < kEntriesPerBucket; i++) {
-    Key& key = keys_[index + i];
-    if ((key.map == map) && key.name->Equals(name)) {
-      return field_offsets_[index + i];
-    }
+  int index = Hash(map, name);
+  Key& key = keys_[index];
+  if ((key.map == map) && key.name->Equals(name)) {
+    return field_offsets_[index];
   }
   return kNotFound;
 }
@@ -6807,29 +6212,7 @@
 void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
   String* symbol;
   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
-    int index = (Hash(map, symbol) & kHashMask);
-    // After a GC there will be free slots, so we use them in order (this may
-    // help to get the most frequently used one in position 0).
-    for (int i = 0; i< kEntriesPerBucket; i++) {
-      Key& key = keys_[index];
-      Object* free_entry_indicator = NULL;
-      if (key.map == free_entry_indicator) {
-        key.map = map;
-        key.name = symbol;
-        field_offsets_[index + i] = field_offset;
-        return;
-      }
-    }
-    // No free entry found in this bucket, so we move them all down one and
-    // put the new entry at position zero.
-    for (int i = kEntriesPerBucket - 1; i > 0; i--) {
-      Key& key = keys_[index + i];
-      Key& key2 = keys_[index + i - 1];
-      key = key2;
-      field_offsets_[index + i] = field_offsets_[index + i - 1];
-    }
-
-    // Write the new first entry.
+    int index = Hash(map, symbol);
     Key& key = keys_[index];
     key.map = map;
     key.name = symbol;
@@ -6884,9 +6267,7 @@
 void ExternalStringTable::CleanUp() {
   int last = 0;
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
-      continue;
-    }
+    if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
     if (heap_->InNewSpace(new_space_strings_[i])) {
       new_space_strings_[last++] = new_space_strings_[i];
     } else {
@@ -6896,16 +6277,12 @@
   new_space_strings_.Rewind(last);
   last = 0;
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
-      continue;
-    }
+    if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
     old_space_strings_[last++] = old_space_strings_[i];
   }
   old_space_strings_.Rewind(last);
-  if (FLAG_verify_heap) {
-    Verify();
-  }
+  Verify();
 }
 
 
@@ -6915,72 +6292,4 @@
 }
 
 
-void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
-  chunk->set_next_chunk(chunks_queued_for_free_);
-  chunks_queued_for_free_ = chunk;
-}
-
-
-void Heap::FreeQueuedChunks() {
-  if (chunks_queued_for_free_ == NULL) return;
-  MemoryChunk* next;
-  MemoryChunk* chunk;
-  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
-    next = chunk->next_chunk();
-    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-
-    if (chunk->owner()->identity() == LO_SPACE) {
-      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
-      // If FromAnyPointerAddress encounters a slot that belongs to a large
-      // chunk queued for deletion it will fail to find the chunk because
-      // it try to perform a search in the list of pages owned by of the large
-      // object space and queued chunks were detached from that list.
-      // To work around this we split large chunk into normal kPageSize aligned
-      // pieces and initialize size, owner and flags field of every piece.
-      // If FromAnyPointerAddress encounters a slot that belongs to one of
-      // these smaller pieces it will treat it as a slot on a normal Page.
-      Address chunk_end = chunk->address() + chunk->size();
-      MemoryChunk* inner = MemoryChunk::FromAddress(
-          chunk->address() + Page::kPageSize);
-      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
-      while (inner <= inner_last) {
-        // Size of a large chunk is always a multiple of
-        // OS::AllocateAlignment() so there is always
-        // enough space for a fake MemoryChunk header.
-        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
-        // Guard against overflow.
-        if (area_end < inner->address()) area_end = chunk_end;
-        inner->SetArea(inner->address(), area_end);
-        inner->set_size(Page::kPageSize);
-        inner->set_owner(lo_space());
-        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-        inner = MemoryChunk::FromAddress(
-            inner->address() + Page::kPageSize);
-      }
-    }
-  }
-  isolate_->heap()->store_buffer()->Compact();
-  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
-  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
-    next = chunk->next_chunk();
-    isolate_->memory_allocator()->Free(chunk);
-  }
-  chunks_queued_for_free_ = NULL;
-}
-
-
-void Heap::RememberUnmappedPage(Address page, bool compacted) {
-  uintptr_t p = reinterpret_cast<uintptr_t>(page);
-  // Tag the page pointer to make it findable in the dump file.
-  if (compacted) {
-    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
-  } else {
-    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
-  }
-  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
-      reinterpret_cast<Address>(p);
-  remembered_unmapped_pages_index_++;
-  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
-}
-
 } }  // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
index 2bd037f..b1948a9 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,55 +32,46 @@
 
 #include "allocation.h"
 #include "globals.h"
-#include "incremental-marking.h"
 #include "list.h"
 #include "mark-compact.h"
-#include "objects-visiting.h"
 #include "spaces.h"
 #include "splay-tree-inl.h"
-#include "store-buffer.h"
 #include "v8-counters.h"
-#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
 
+// TODO(isolates): remove HEAP here
+#define HEAP (_inline_get_heap_())
+class Heap;
+inline Heap* _inline_get_heap_();
+
+
 // Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V)                                                    \
+#define STRONG_ROOT_LIST(V)                                      \
+  /* Put the byte array map early.  We need it to be in place by the time   */ \
+  /* the deserializer hits the next page, since it wants to put a byte      */ \
+  /* array in the unused space at the end of the page.                      */ \
   V(Map, byte_array_map, ByteArrayMap)                                         \
-  V(Map, free_space_map, FreeSpaceMap)                                         \
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   /* Cluster the most popular ones in a few cache lines here at the top.    */ \
-  V(Smi, store_buffer_top, StoreBufferTop)                                     \
-  V(Oddball, undefined_value, UndefinedValue)                                  \
-  V(Oddball, the_hole_value, TheHoleValue)                                     \
-  V(Oddball, null_value, NullValue)                                            \
-  V(Oddball, true_value, TrueValue)                                            \
-  V(Oddball, false_value, FalseValue)                                          \
-  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
-  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
-  V(Map, meta_map, MetaMap)                                                    \
-  V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
-  V(Map, ascii_string_map, AsciiStringMap)                                     \
+  V(Object, undefined_value, UndefinedValue)                                   \
+  V(Object, the_hole_value, TheHoleValue)                                      \
+  V(Object, null_value, NullValue)                                             \
+  V(Object, true_value, TrueValue)                                             \
+  V(Object, false_value, FalseValue)                                           \
+  V(Object, arguments_marker, ArgumentsMarker)                                 \
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, global_context_map, GlobalContextMap)                                 \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
-  V(Map, code_map, CodeMap)                                                    \
-  V(Map, scope_info_map, ScopeInfoMap)                                         \
+  V(Map, serialized_scope_info_map, SerializedScopeInfoMap)                    \
   V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
   V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
   V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel)       \
+  V(Map, meta_map, MetaMap)                                                    \
   V(Map, hash_table_map, HashTableMap)                                         \
-  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
-  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
-  V(String, empty_string, EmptyString)                                         \
-  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   V(Smi, stack_limit, StackLimit)                                              \
-  V(Oddball, arguments_marker, ArgumentsMarker)                                \
-  /* The first 32 roots above this line should be boring from a GC point of */ \
-  /* view.  This means they are never in new space and never on a page that */ \
-  /* is being compacted.                                                    */ \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
@@ -89,12 +80,19 @@
   V(FixedArray, string_split_cache, StringSplitCache)                          \
   V(Object, termination_exception, TerminationException)                       \
   V(Smi, hash_seed, HashSeed)                                                  \
+  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(FixedDoubleArray, empty_fixed_double_array, EmptyFixedDoubleArray)         \
+  V(String, empty_string, EmptyString)                                         \
+  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   V(Map, string_map, StringMap)                                                \
+  V(Map, ascii_string_map, AsciiStringMap)                                     \
   V(Map, symbol_map, SymbolMap)                                                \
   V(Map, cons_string_map, ConsStringMap)                                       \
   V(Map, cons_ascii_string_map, ConsAsciiStringMap)                            \
   V(Map, sliced_string_map, SlicedStringMap)                                   \
   V(Map, sliced_ascii_string_map, SlicedAsciiStringMap)                        \
+  V(Map, ascii_symbol_map, AsciiSymbolMap)                                     \
   V(Map, cons_symbol_map, ConsSymbolMap)                                       \
   V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap)                            \
   V(Map, external_symbol_map, ExternalSymbolMap)                               \
@@ -103,16 +101,6 @@
   V(Map, external_string_map, ExternalStringMap)                               \
   V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap)  \
   V(Map, external_ascii_string_map, ExternalAsciiStringMap)                    \
-  V(Map, short_external_symbol_map, ShortExternalSymbolMap)                    \
-  V(Map,                                                                       \
-    short_external_symbol_with_ascii_data_map,                                 \
-    ShortExternalSymbolWithAsciiDataMap)                                       \
-  V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap)         \
-  V(Map, short_external_string_map, ShortExternalStringMap)                    \
-  V(Map,                                                                       \
-    short_external_string_with_ascii_data_map,                                 \
-    ShortExternalStringWithAsciiDataMap)                                       \
-  V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap)         \
   V(Map, undetectable_string_map, UndetectableStringMap)                       \
   V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap)            \
   V(Map, external_pixel_array_map, ExternalPixelArrayMap)                      \
@@ -129,13 +117,14 @@
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, with_context_map, WithContextMap)                                     \
   V(Map, block_context_map, BlockContextMap)                                   \
-  V(Map, module_context_map, ModuleContextMap)                                 \
+  V(Map, code_map, CodeMap)                                                    \
   V(Map, oddball_map, OddballMap)                                              \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
-  V(HeapNumber, nan_value, NanValue)                                           \
-  V(HeapNumber, infinity_value, InfinityValue)                                 \
-  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
+  V(Object, nan_value, NanValue)                                               \
+  V(Object, minus_zero_value, MinusZeroValue)                                  \
   V(Map, neander_map, NeanderMap)                                              \
   V(JSObject, message_listeners, MessageListeners)                             \
   V(Foreign, prototype_accessors, PrototypeAccessors)                          \
@@ -149,8 +138,6 @@
   V(Script, empty_script, EmptyScript)                                         \
   V(Smi, real_stack_limit, RealStackLimit)                                     \
   V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames)        \
-  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
-  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)
 
 #define ROOT_LIST(V)                                  \
   STRONG_ROOT_LIST(V)                                 \
@@ -177,7 +164,6 @@
   V(eval_symbol, "eval")                                                 \
   V(function_symbol, "function")                                         \
   V(length_symbol, "length")                                             \
-  V(module_symbol, "module")                                             \
   V(name_symbol, "name")                                                 \
   V(native_symbol, "native")                                             \
   V(null_symbol, "null")                                                 \
@@ -206,10 +192,12 @@
   V(InitializeConstGlobal_symbol, "InitializeConstGlobal")               \
   V(KeyedLoadElementMonomorphic_symbol,                                  \
     "KeyedLoadElementMonomorphic")                                       \
+  V(KeyedLoadElementPolymorphic_symbol,                                  \
+    "KeyedLoadElementPolymorphic")                                       \
   V(KeyedStoreElementMonomorphic_symbol,                                 \
     "KeyedStoreElementMonomorphic")                                      \
-  V(KeyedStoreAndGrowElementMonomorphic_symbol,                          \
-    "KeyedStoreAndGrowElementMonomorphic")                               \
+  V(KeyedStoreElementPolymorphic_symbol,                                 \
+    "KeyedStoreElementPolymorphic")                                      \
   V(stack_overflow_symbol, "kStackOverflowBoilerplate")                  \
   V(illegal_access_symbol, "illegal access")                             \
   V(out_of_memory_symbol, "out-of-memory")                               \
@@ -239,11 +227,7 @@
   V(closure_symbol, "(closure)")                                         \
   V(use_strict, "use strict")                                            \
   V(dot_symbol, ".")                                                     \
-  V(anonymous_function_symbol, "(anonymous function)")                   \
-  V(compare_ic_symbol, ".compare_ic")                                    \
-  V(infinity_symbol, "Infinity")                                         \
-  V(minus_infinity_symbol, "-Infinity")                                  \
-  V(hidden_stack_trace_symbol, "v8::hidden_stack_trace")
+  V(anonymous_function_symbol, "(anonymous function)")
 
 // Forward declarations.
 class GCTracer;
@@ -255,26 +239,10 @@
 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
                                                       Object** pointer);
 
-class StoreBufferRebuilder {
- public:
-  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer) {
-  }
-
-  void Callback(MemoryChunk* page, StoreBufferEvent event);
-
- private:
-  StoreBuffer* store_buffer_;
-
-  // We record in this variable how full the store buffer was when we started
-  // iterating over the current page, finding pointers to new space.  If the
-  // store buffer overflows again we can exempt the page from the store buffer
-  // by rewinding to this point instead of having to search the store buffer.
-  Object*** start_of_current_page_;
-  // The current page we are scanning in the store buffer iterator.
-  MemoryChunk* current_page_;
-};
-
+typedef bool (*DirtyRegionCallback)(Heap* heap,
+                                    Address start,
+                                    Address end,
+                                    ObjectSlotCallback copy_object_func);
 
 
 // The all static Heap captures the interface to the global object heap.
@@ -289,103 +257,32 @@
 // by it's size to avoid dereferencing a map pointer for scanning.
 class PromotionQueue {
  public:
-  explicit PromotionQueue(Heap* heap)
-      : front_(NULL),
-        rear_(NULL),
-        limit_(NULL),
-        emergency_stack_(0),
-        heap_(heap) { }
+  PromotionQueue() : front_(NULL), rear_(NULL) { }
 
-  void Initialize();
-
-  void Destroy() {
-    ASSERT(is_empty());
-    delete emergency_stack_;
-    emergency_stack_ = NULL;
+  void Initialize(Address start_address) {
+    front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
   }
 
-  inline void ActivateGuardIfOnTheSamePage();
-
-  Page* GetHeadPage() {
-    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
-  }
-
-  void SetNewLimit(Address limit) {
-    if (!guard_) {
-      return;
-    }
-
-    ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
-    limit_ = reinterpret_cast<intptr_t*>(limit);
-
-    if (limit_ <= rear_) {
-      return;
-    }
-
-    RelocateQueueHead();
-  }
-
-  bool is_empty() {
-    return (front_ == rear_) &&
-        (emergency_stack_ == NULL || emergency_stack_->length() == 0);
-  }
+  bool is_empty() { return front_ <= rear_; }
 
   inline void insert(HeapObject* target, int size);
 
   void remove(HeapObject** target, int* size) {
-    ASSERT(!is_empty());
-    if (front_ == rear_) {
-      Entry e = emergency_stack_->RemoveLast();
-      *target = e.obj_;
-      *size = e.size_;
-      return;
-    }
-
-    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
-      NewSpacePage* front_page =
-          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
-      ASSERT(!front_page->prev_page()->is_anchor());
-      front_ =
-          reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
-    }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
     // Assert no underflow.
-    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
-                                reinterpret_cast<Address>(front_));
+    ASSERT(front_ >= rear_);
   }
 
  private:
-  // The front of the queue is higher in the memory page chain than the rear.
+  // The front of the queue is higher in memory than the rear.
   intptr_t* front_;
   intptr_t* rear_;
-  intptr_t* limit_;
-
-  bool guard_;
-
-  static const int kEntrySizeInWords = 2;
-
-  struct Entry {
-    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
-
-    HeapObject* obj_;
-    int size_;
-  };
-  List<Entry>* emergency_stack_;
-
-  Heap* heap_;
-
-  void RelocateQueueHead();
 
   DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
 };
 
 
-typedef void (*ScavengingCallback)(Map* map,
-                                   HeapObject** slot,
-                                   HeapObject* object);
-
-
 // External strings table is a place where all external strings are
 // registered.  We need to keep track of such strings to properly
 // finalize them.
@@ -426,24 +323,19 @@
 };
 
 
-enum ArrayStorageAllocationMode {
-  DONT_INITIALIZE_ARRAY_ELEMENTS,
-  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
-};
-
 class Heap {
  public:
   // Configure heap size before setup. Return false if the heap has been
-  // set up already.
+  // setup already.
   bool ConfigureHeap(int max_semispace_size,
-                     intptr_t max_old_gen_size,
-                     intptr_t max_executable_size);
+                     int max_old_gen_size,
+                     int max_executable_size);
   bool ConfigureHeapDefault();
 
   // Initializes the global object heap. If create_heap_objects is true,
   // also creates the basic non-mutable objects.
   // Returns whether it succeeded.
-  bool SetUp(bool create_heap_objects);
+  bool Setup(bool create_heap_objects);
 
   // Destroys all memory allocated by the heap.
   void TearDown();
@@ -453,8 +345,8 @@
   // jslimit_/real_jslimit_ variable in the StackGuard.
   void SetStackLimits();
 
-  // Returns whether SetUp has been called.
-  bool HasBeenSetUp();
+  // Returns whether Setup has been called.
+  bool HasBeenSetup();
 
   // Returns the maximum amount of memory reserved for the heap.  For
   // the young generation, we reserve 4 times the amount needed for a
@@ -485,6 +377,9 @@
   // all available bytes. Check MaxHeapObjectSize() instead.
   intptr_t Available();
 
+  // Returns the maximum object size in paged space.
+  inline int MaxObjectSizeInPagedSpace();
+
   // Returns of size of all objects residing in the heap.
   intptr_t SizeOfObjects();
 
@@ -529,30 +424,6 @@
   MUST_USE_RESULT MaybeObject* AllocateJSObject(
       JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
 
-  // Allocate a JSArray with no elements
-  MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
-      ElementsKind elements_kind,
-      PretenureFlag pretenure = NOT_TENURED) {
-    return AllocateJSArrayAndStorage(elements_kind, 0, 0,
-                                     DONT_INITIALIZE_ARRAY_ELEMENTS,
-                                     pretenure);
-  }
-
-  // Allocate a JSArray with a specified length but elements that are left
-  // uninitialized.
-  MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
-      ElementsKind elements_kind,
-      int length,
-      int capacity,
-      ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
-      PretenureFlag pretenure = NOT_TENURED);
-
-  // Allocate a JSArray with no elements
-  MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
-      FixedArrayBase* array_base,
-      ElementsKind elements_kind,
-      PretenureFlag pretenure = NOT_TENURED);
-
   // Allocates and initializes a new global object based on a constructor.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
@@ -586,7 +457,6 @@
   // size, but keeping the original prototype.  The receiver must have at least
   // the size of the new object.  The object is reinitialized and behaves as an
   // object that has been freshly allocated.
-  // Returns failure if an error occured, otherwise object.
   MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
                                                       InstanceType type,
                                                       int size);
@@ -615,10 +485,8 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this function does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateMap(
-      InstanceType instance_type,
-      int instance_size,
-      ElementsKind elements_kind = FAST_ELEMENTS);
+  MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
+                                           int instance_size);
 
   // Allocates a partial map for bootstrapping.
   MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -631,20 +499,11 @@
   MUST_USE_RESULT MaybeObject* AllocateCodeCache();
 
   // Allocates a serialized scope info.
-  MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
+  MUST_USE_RESULT MaybeObject* AllocateSerializedScopeInfo(int length);
 
   // Allocates an empty PolymorphicCodeCache.
   MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
 
-  // Allocates a pre-tenured empty AccessorPair.
-  MUST_USE_RESULT MaybeObject* AllocateAccessorPair();
-
-  // Allocates an empty TypeFeedbackInfo.
-  MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
-
-  // Allocates an AliasedArgumentsEntry.
-  MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot);
-
   // Clear the Instanceof cache (used when a prototype changes).
   inline void ClearInstanceofCache();
 
@@ -717,7 +576,7 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Computes a single character string where the character has code.
-  // A cache is used for ASCII codes.
+  // A cache is used for ascii codes.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed. Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* LookupSingleCharacterStringFromCode(
@@ -805,13 +664,6 @@
       int length,
       PretenureFlag pretenure = NOT_TENURED);
 
-  // Allocates a fixed double array with hole values. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles(
-      int length,
-      PretenureFlag pretenure = NOT_TENURED);
-
   // AllocateHashTable is identical to AllocateFixedArray except
   // that the resulting object has hash_table_map as map.
   MUST_USE_RESULT MaybeObject* AllocateHashTable(
@@ -837,7 +689,7 @@
   // Allocate a block context.
   MUST_USE_RESULT MaybeObject* AllocateBlockContext(JSFunction* function,
                                                     Context* previous,
-                                                    ScopeInfo* info);
+                                                    SerializedScopeInfo* info);
 
   // Allocates a new utility object in the old generation.
   MUST_USE_RESULT MaybeObject* AllocateStruct(InstanceType type);
@@ -886,15 +738,13 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
-      int32_t value, PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
 
   // Converts an int into either a Smi or a HeapNumber object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
-      uint32_t value, PretenureFlag pretenure = NOT_TENURED);
+  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
 
   // Allocates a new foreign object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
@@ -947,9 +797,9 @@
   // failed.
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
-      const ExternalAsciiString::Resource* resource);
+      ExternalAsciiString::Resource* resource);
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
-      const ExternalTwoByteString::Resource* resource);
+      ExternalTwoByteString::Resource* resource);
 
   // Finalizes an external string by deleting the associated external
   // data and clearing the resource pointer.
@@ -1028,41 +878,19 @@
   // Performs garbage collection operation.
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
-  bool CollectGarbage(AllocationSpace space,
-                      GarbageCollector collector,
-                      const char* gc_reason,
-                      const char* collector_reason);
+  bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
 
   // Performs garbage collection operation.
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
-  inline bool CollectGarbage(AllocationSpace space,
-                             const char* gc_reason = NULL);
+  inline bool CollectGarbage(AllocationSpace space);
 
-  static const int kNoGCFlags = 0;
-  static const int kSweepPreciselyMask = 1;
-  static const int kReduceMemoryFootprintMask = 2;
-  static const int kAbortIncrementalMarkingMask = 4;
-
-  // Making the heap iterable requires us to sweep precisely and abort any
-  // incremental marking as well.
-  static const int kMakeHeapIterableMask =
-      kSweepPreciselyMask | kAbortIncrementalMarkingMask;
-
-  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
-  // non-zero, then the slower precise sweeper is used, which leaves the heap
-  // in a state where we can iterate over the heap visiting all objects.
-  void CollectAllGarbage(int flags, const char* gc_reason = NULL);
+  // Performs a full garbage collection. Force compaction if the
+  // parameter is true.
+  void CollectAllGarbage(bool force_compaction);
 
   // Last hope GC, should try to squeeze as much as possible.
-  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
-
-  // Check whether the heap is currently iterable.
-  bool IsHeapIterable();
-
-  // Ensure that we have swept all spaces in such a way that we can iterate
-  // over all objects.  May cause a GC.
-  void EnsureHeapIsIterable();
+  void CollectAllAvailableGarbage();
 
   // Notify the heap that a context has been disposed.
   int NotifyContextDisposed() { return ++contexts_disposed_; }
@@ -1071,20 +899,6 @@
   // ensure correct callback for weak global handles.
   void PerformScavenge();
 
-  inline void increment_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_++;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
-  inline void decrement_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_--;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
 #ifdef DEBUG
@@ -1111,8 +925,6 @@
 
   // Heap root getters.  We have versions with and without type::cast() here.
   // You can't use type::cast during GC because the assert fails.
-  // TODO(1490): Try removing the unchecked accessors, now that GC marking does
-  // not corrupt the map.
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   type* name() {                                                               \
     return type::cast(roots_[k##camel_name##RootIndex]);                       \
@@ -1146,9 +958,6 @@
   }
   Object* global_contexts_list() { return global_contexts_list_; }
 
-  // Number of mark-sweeps.
-  int ms_count() { return ms_count_; }
-
   // Iterates over all roots in the heap.
   void IterateRoots(ObjectVisitor* v, VisitMode mode);
   // Iterates over all strong roots in the heap.
@@ -1156,16 +965,60 @@
   // Iterates over all the other roots in the heap.
   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
+  enum ExpectedPageWatermarkState {
+    WATERMARK_SHOULD_BE_VALID,
+    WATERMARK_CAN_BE_INVALID
+  };
+
+  // For each dirty region on a page in use from an old space call
+  // visit_dirty_region callback.
+  // If either visit_dirty_region or callback can cause an allocation
+  // in old space and changes in allocation watermark then
+  // can_preallocate_during_iteration should be set to true.
+  // All pages will be marked as having invalid watermark upon
+  // iteration completion.
+  void IterateDirtyRegions(
+      PagedSpace* space,
+      DirtyRegionCallback visit_dirty_region,
+      ObjectSlotCallback callback,
+      ExpectedPageWatermarkState expected_page_watermark_state);
+
+  // Interpret marks as a bitvector of dirty marks for regions of size
+  // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
+  // memory interval from start to top. For each dirty region call a
+  // visit_dirty_region callback. Return updated bitvector of dirty marks.
+  uint32_t IterateDirtyRegions(uint32_t marks,
+                               Address start,
+                               Address end,
+                               DirtyRegionCallback visit_dirty_region,
+                               ObjectSlotCallback callback);
+
   // Iterate pointers to from semispace of new space found in memory interval
   // from start to end.
+  // Update dirty marks for page containing start address.
   void IterateAndMarkPointersToFromSpace(Address start,
                                          Address end,
                                          ObjectSlotCallback callback);
 
+  // Iterate pointers to new space found in memory interval from start to end.
+  // Return true if pointers to new space was found.
+  static bool IteratePointersInDirtyRegion(Heap* heap,
+                                           Address start,
+                                           Address end,
+                                           ObjectSlotCallback callback);
+
+
+  // Iterate pointers to new space found in memory interval from start to end.
+  // This interval is considered to belong to the map space.
+  // Return true if pointers to new space was found.
+  static bool IteratePointersInDirtyMapsRegion(Heap* heap,
+                                               Address start,
+                                               Address end,
+                                               ObjectSlotCallback callback);
+
+
   // Returns whether the object resides in new space.
   inline bool InNewSpace(Object* object);
-  inline bool InNewSpace(Address addr);
-  inline bool InNewSpacePage(Address addr);
   inline bool InFromSpace(Object* object);
   inline bool InToSpace(Object* object);
 
@@ -1204,19 +1057,11 @@
     roots_[kEmptyScriptRootIndex] = script;
   }
 
-  void public_set_store_buffer_top(Address* top) {
-    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
-  }
-
   // Update the next script id.
   inline void SetLastScriptId(Object* last_script_id);
 
   // Generated code can embed this address to get access to the roots.
-  Object** roots_array_start() { return roots_; }
-
-  Address* store_buffer_top_address() {
-    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
-  }
+  Object** roots_address() { return roots_; }
 
   // Get address of global contexts list for serialization support.
   Object** global_contexts_list_address() {
@@ -1230,14 +1075,6 @@
   // Verify the heap is in its normal state before or after a GC.
   void Verify();
 
-  // Verify that AccessorPairs are not shared, i.e. make sure that they have
-  // exactly one pointer to them.
-  void VerifyNoAccessorPairSharing();
-
-  void OldPointerSpaceCheckStoreBuffer();
-  void MapSpaceCheckStoreBuffer();
-  void LargeObjectSpaceCheckStoreBuffer();
-
   // Report heap statistics.
   void ReportHeapStatistics(const char* title);
   void ReportCodeStatistics(const char* title);
@@ -1333,63 +1170,26 @@
   MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
                                                      PretenureFlag pretenure);
 
-  inline intptr_t PromotedTotalSize() {
-    return PromotedSpaceSize() + PromotedExternalMemorySize();
+  // True if we have reached the allocation limit in the old generation that
+  // should force the next GC (caused normally) to be a full one.
+  bool OldGenerationPromotionLimitReached() {
+    return (PromotedSpaceSize() + PromotedExternalMemorySize())
+           > old_gen_promotion_limit_;
+  }
+
+  intptr_t OldGenerationSpaceAvailable() {
+    return old_gen_allocation_limit_ -
+           (PromotedSpaceSize() + PromotedExternalMemorySize());
   }
 
   // True if we have reached the allocation limit in the old generation that
-  // should force the next GC (caused normally) to be a full one.
-  inline bool OldGenerationPromotionLimitReached() {
-    return PromotedTotalSize() > old_gen_promotion_limit_;
+  // should artificially cause a GC right now.
+  bool OldGenerationAllocationLimitReached() {
+    return OldGenerationSpaceAvailable() < 0;
   }
 
-  inline intptr_t OldGenerationSpaceAvailable() {
-    return old_gen_allocation_limit_ - PromotedTotalSize();
-  }
-
-  inline intptr_t OldGenerationCapacityAvailable() {
-    return max_old_generation_size_ - PromotedTotalSize();
-  }
-
-  static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
-  static const intptr_t kMinimumAllocationLimit =
-      8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
-
-  // When we sweep lazily we initially guess that there is no garbage on the
-  // heap and set the limits for the next GC accordingly.  As we sweep we find
-  // out that some of the pages contained garbage and we have to adjust
-  // downwards the size of the heap.  This means the limits that control the
-  // timing of the next GC also need to be adjusted downwards.
-  void LowerOldGenLimits(intptr_t adjustment) {
-    size_of_old_gen_at_last_old_space_gc_ -= adjustment;
-    old_gen_promotion_limit_ =
-        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
-    old_gen_allocation_limit_ =
-        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
-  }
-
-  intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
-    const int divisor = FLAG_stress_compaction ? 10 : 3;
-    intptr_t limit =
-        Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
-    limit += new_space_.Capacity();
-    limit *= old_gen_limit_factor_;
-    intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
-    return Min(limit, halfway_to_the_max);
-  }
-
-  intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
-    const int divisor = FLAG_stress_compaction ? 8 : 2;
-    intptr_t limit =
-        Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
-    limit += new_space_.Capacity();
-    limit *= old_gen_limit_factor_;
-    intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
-    return Min(limit, halfway_to_the_max);
-  }
-
-  // Implements the corresponding V8 API function.
-  bool IdleNotification(int hint);
+  // Can be called when the embedding application is idle.
+  bool IdleNotification();
 
   // Declare all the root indices.
   enum RootListIndex {
@@ -1413,8 +1213,6 @@
 
   MUST_USE_RESULT MaybeObject* NumberToString(
       Object* number, bool check_number_string_cache = true);
-  MUST_USE_RESULT MaybeObject* Uint32ToString(
-      uint32_t value, bool check_number_string_cache = true);
 
   Map* MapForExternalArrayType(ExternalArrayType array_type);
   RootListIndex RootIndexForExternalArrayType(
@@ -1426,48 +1224,31 @@
   // by pointer size.
   static inline void CopyBlock(Address dst, Address src, int byte_size);
 
+  inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+                                                      Address src,
+                                                      int byte_size);
+
   // Optimized version of memmove for blocks with pointer size aligned sizes and
   // pointer size aligned addresses.
   static inline void MoveBlock(Address dst, Address src, int byte_size);
 
+  inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+                                                      Address src,
+                                                      int byte_size);
+
   // Check new space expansion criteria and expand semispaces if it was hit.
   void CheckNewSpaceExpansionCriteria();
 
   inline void IncrementYoungSurvivorsCounter(int survived) {
-    ASSERT(survived >= 0);
     young_survivors_after_last_gc_ = survived;
     survived_since_last_expansion_ += survived;
   }
 
-  inline bool NextGCIsLikelyToBeFull() {
-    if (FLAG_gc_global) return true;
-
-    intptr_t total_promoted = PromotedTotalSize();
-
-    intptr_t adjusted_promotion_limit =
-        old_gen_promotion_limit_ - new_space_.Capacity();
-
-    if (total_promoted >= adjusted_promotion_limit) return true;
-
-    intptr_t adjusted_allocation_limit =
-        old_gen_allocation_limit_ - new_space_.Capacity() / 5;
-
-    if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
-
-    return false;
-  }
-
-
   void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
 
-  void UpdateReferencesInExternalStringTable(
-      ExternalStringTableUpdaterCallback updater_func);
-
   void ProcessWeakReferences(WeakObjectRetainer* retainer);
 
-  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
-
   // Helper function that governs the promotion policy from new space to
   // old.  If the object's old address lies below the new space's age
   // mark or if we've already filled the bottom 1/16th of the to space,
@@ -1480,19 +1261,8 @@
 
   void ClearNormalizedMapCaches();
 
-  // Clears the cache of ICs related to this map.
-  void ClearCacheOnMap(Map* map) {
-    if (FLAG_cleanup_code_caches_at_gc) {
-      map->ClearCodeCache(this);
-    }
-  }
-
   GCTracer* tracer() { return tracer_; }
 
-  // Returns the size of objects residing in non new spaces.
-  intptr_t PromotedSpaceSize();
-  intptr_t PromotedSpaceSizeOfObjects();
-
   double total_regexp_code_generated() { return total_regexp_code_generated_; }
   void IncreaseTotalRegexpCodeGenerated(int size) {
     total_regexp_code_generated_ += size;
@@ -1511,29 +1281,6 @@
     return &mark_compact_collector_;
   }
 
-  StoreBuffer* store_buffer() {
-    return &store_buffer_;
-  }
-
-  Marking* marking() {
-    return &marking_;
-  }
-
-  IncrementalMarking* incremental_marking() {
-    return &incremental_marking_;
-  }
-
-  bool IsSweepingComplete() {
-    return old_data_space()->IsSweepingComplete() &&
-           old_pointer_space()->IsSweepingComplete();
-  }
-
-  bool AdvanceSweepers(int step_size) {
-    bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
-    sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
-    return sweeping_complete;
-  }
-
   ExternalStringTable* external_string_table() {
     return &external_string_table_;
   }
@@ -1544,64 +1291,22 @@
   }
 
   inline Isolate* isolate();
+  bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
 
-  inline void CallGlobalGCPrologueCallback() {
+  void CallGlobalGCPrologueCallback() {
     if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
   }
 
-  inline void CallGlobalGCEpilogueCallback() {
+  void CallGlobalGCEpilogueCallback() {
     if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
   }
 
-  inline bool OldGenerationAllocationLimitReached();
-
-  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
-    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-  }
-
-  void QueueMemoryChunkForFree(MemoryChunk* chunk);
-  void FreeQueuedChunks();
-
-  // Completely clear the Instanceof cache (to stop it keeping objects alive
-  // around a GC).
-  inline void CompletelyClearInstanceofCache();
-
-  // The roots that have an index less than this are always in old space.
-  static const int kOldSpaceRoots = 0x20;
-
-  bool idle_notification_will_schedule_next_gc() {
-    return idle_notification_will_schedule_next_gc_;
-  }
-
   uint32_t HashSeed() {
     uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
     ASSERT(FLAG_randomize_hashes || seed == 0);
     return seed;
   }
 
-  void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
-    ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
-    set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  void SetConstructStubDeoptPCOffset(int pc_offset) {
-    ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
-    set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  // For post mortem debugging.
-  void RememberUnmappedPage(Address page, bool compacted);
-
-  // Global inline caching age: it is incremented on some GCs after context
-  // disposal. We use it to flush inline caches.
-  int global_ic_age() {
-    return global_ic_age_;
-  }
-
-  void AgeInlineCaches() {
-    ++global_ic_age_;
-  }
-
  private:
   Heap();
 
@@ -1609,12 +1314,12 @@
   // more expedient to get at the isolate directly from within Heap methods.
   Isolate* isolate_;
 
-  intptr_t code_range_size_;
   int reserved_semispace_size_;
   int max_semispace_size_;
   int initial_semispace_size_;
   intptr_t max_old_generation_size_;
   intptr_t max_executable_size_;
+  intptr_t code_range_size_;
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
@@ -1629,10 +1334,6 @@
   // For keeping track of context disposals.
   int contexts_disposed_;
 
-  int global_ic_age_;
-
-  int scan_on_scavenge_pages_;
-
 #if defined(V8_TARGET_ARCH_X64)
   static const int kMaxObjectSizeInNewSpace = 1024*KB;
 #else
@@ -1649,25 +1350,21 @@
   HeapState gc_state_;
   int gc_post_processing_depth_;
 
+  // Returns the size of object residing in non new spaces.
+  intptr_t PromotedSpaceSize();
+
   // Returns the amount of external memory registered since last global gc.
   int PromotedExternalMemorySize();
 
+  int mc_count_;  // how many mark-compact collections happened
   int ms_count_;  // how many mark-sweep collections happened
   unsigned int gc_count_;  // how many gc happened
 
-  // For post mortem debugging.
-  static const int kRememberedUnmappedPages = 128;
-  int remembered_unmapped_pages_index_;
-  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
-
   // Total length of the strings we failed to flatten since the last GC.
   int unflattened_strings_length_;
 
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  inline void set_##name(type* value) {                                        \
-    /* The deserializer makes use of the fact that these common roots are */   \
-    /* never in new space and never on a page that is being compacted.    */   \
-    ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value));  \
+  inline void set_##name(type* value) {                                 \
     roots_[k##camel_name##RootIndex] = value;                                  \
   }
   ROOT_LIST(ROOT_ACCESSOR)
@@ -1688,10 +1385,6 @@
   HeapDebugUtils* debug_utils_;
 #endif  // DEBUG
 
-  // Indicates that the new space should be kept small due to high promotion
-  // rates caused by the mutator allocating a lot of long-lived objects.
-  bool new_space_high_promotion_mode_active_;
-
   // Limit that triggers a global GC on the next (normally caused) GC.  This
   // is checked when we have already decided to do a GC to help determine
   // which collector to invoke.
@@ -1702,13 +1395,6 @@
   // every allocation in large object space.
   intptr_t old_gen_allocation_limit_;
 
-  // Sometimes the heuristics dictate that those limits are increased.  This
-  // variable records that fact.
-  int old_gen_limit_factor_;
-
-  // Used to adjust the limits that control the timing of the next GC.
-  intptr_t size_of_old_gen_at_last_old_space_gc_;
-
   // Limit on the amount of externally allocated memory allowed
   // between global GCs. If reached a global GC is forced.
   intptr_t external_allocation_limit_;
@@ -1728,8 +1414,6 @@
 
   Object* global_contexts_list_;
 
-  StoreBufferRebuilder store_buffer_rebuilder_;
-
   struct StringTypeTable {
     InstanceType type;
     int size;
@@ -1787,16 +1471,17 @@
   // Support for computing object sizes during GC.
   HeapObjectCallback gc_safe_size_of_old_object_;
   static int GcSafeSizeOfOldObject(HeapObject* object);
+  static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
 
   // Update the GC state. Called from the mark-compact collector.
   void MarkMapPointersAsEncoded(bool encoded) {
-    ASSERT(!encoded);
-    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
+    gc_safe_size_of_old_object_ = encoded
+        ? &GcSafeSizeOfOldObjectWithEncodedMap
+        : &GcSafeSizeOfOldObject;
   }
 
   // Checks whether a global GC is necessary
-  GarbageCollector SelectGarbageCollector(AllocationSpace space,
-                                          const char** reason);
+  GarbageCollector SelectGarbageCollector(AllocationSpace space);
 
   // Performs garbage collection
   // Returns whether there is a chance another major GC could
@@ -1804,6 +1489,8 @@
   bool PerformGarbageCollection(GarbageCollector collector,
                                 GCTracer* tracer);
 
+  static const intptr_t kMinimumPromotionLimit = 2 * MB;
+  static const intptr_t kMinimumAllocationLimit = 8 * MB;
 
   inline void UpdateOldSpaceLimits();
 
@@ -1835,17 +1522,14 @@
                              Object* to_number,
                              byte kind);
 
-  // Allocate a JSArray with no elements
-  MUST_USE_RESULT MaybeObject* AllocateJSArray(
-      ElementsKind elements_kind,
-      PretenureFlag pretenure = NOT_TENURED);
-
   // Allocate empty fixed array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
 
   // Allocate empty fixed double array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
 
+  void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
+
   // Performs a minor collection in new generation.
   void Scavenge();
 
@@ -1854,15 +1538,16 @@
       Object** pointer);
 
   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
-  static void ScavengeStoreBufferCallback(Heap* heap,
-                                          MemoryChunk* page,
-                                          StoreBufferEvent event);
 
   // Performs a major collection in the whole heap.
   void MarkCompact(GCTracer* tracer);
 
   // Code to be run before and after mark-compact.
-  void MarkCompactPrologue();
+  void MarkCompactPrologue(bool is_compacting);
+
+  // Completely clear the Instanceof cache (to stop it keeping objects alive
+  // around a GC).
+  inline void CompletelyClearInstanceofCache();
 
   // Record statistics before and after garbage collection.
   void ReportStatisticsBeforeGC();
@@ -1872,11 +1557,12 @@
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
 
   // Initializes a function with a shared part and prototype.
+  // Returns the function.
   // Note: this code was factored out of AllocateFunction such that
   // other parts of the VM could use it. Specifically, a function that creates
   // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
   // Please note this does not perform a garbage collection.
-  inline void InitializeFunction(
+  MUST_USE_RESULT inline MaybeObject* InitializeFunction(
       JSFunction* function,
       SharedFunctionInfo* shared,
       Object* prototype);
@@ -1887,13 +1573,8 @@
   GCTracer* tracer_;
 
 
-  // Allocates a small number to string cache.
-  MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
-  // Creates and installs the full-sized number string cache.
-  void AllocateFullSizeNumberStringCache();
-  // Get the length of the number to string cache based on the max semispace
-  // size.
-  int FullSizeNumberStringCacheLength();
+  // Initializes the number to string cache based on the max semispace size.
+  MUST_USE_RESULT MaybeObject* InitializeNumberStringCache();
   // Flush the number to string cache.
   void FlushNumberStringCache();
 
@@ -1901,13 +1582,11 @@
 
   enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
 
-  static const int kYoungSurvivalRateHighThreshold = 90;
-  static const int kYoungSurvivalRateLowThreshold = 10;
+  static const int kYoungSurvivalRateThreshold = 90;
   static const int kYoungSurvivalRateAllowedDeviation = 15;
 
   int young_survivors_after_last_gc_;
   int high_survival_rate_period_length_;
-  int low_survival_rate_period_length_;
   double survival_rate_;
   SurvivalRateTrend previous_survival_rate_trend_;
   SurvivalRateTrend survival_rate_trend_;
@@ -1940,16 +1619,6 @@
     }
   }
 
-  bool IsStableOrDecreasingSurvivalTrend() {
-    switch (survival_rate_trend()) {
-      case STABLE:
-      case DECREASING:
-        return true;
-      default:
-        return false;
-    }
-  }
-
   bool IsIncreasingSurvivalTrend() {
     return survival_rate_trend() == INCREASING;
   }
@@ -1958,54 +1627,8 @@
     return high_survival_rate_period_length_ > 0;
   }
 
-  bool IsLowSurvivalRate() {
-    return low_survival_rate_period_length_ > 0;
-  }
-
-  void SelectScavengingVisitorsTable();
-
-  void StartIdleRound() {
-    mark_sweeps_since_idle_round_started_ = 0;
-    ms_count_at_last_idle_notification_ = ms_count_;
-  }
-
-  void FinishIdleRound() {
-    mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
-    scavenges_since_last_idle_round_ = 0;
-  }
-
-  bool EnoughGarbageSinceLastIdleRound() {
-    return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
-  }
-
-  bool WorthStartingGCWhenIdle() {
-    if (contexts_disposed_ > 0) {
-      return true;
-    }
-    return incremental_marking()->WorthActivating();
-  }
-
-  // Estimates how many milliseconds a Mark-Sweep would take to complete.
-  // In idle notification handler we assume that this function will return:
-  // - a number less than 10 for small heaps, which are less than 8Mb.
-  // - a number greater than 10 for large heaps, which are greater than 32Mb.
-  int TimeMarkSweepWouldTakeInMs() {
-    // Rough estimate of how many megabytes of heap can be processed in 1 ms.
-    static const int kMbPerMs = 2;
-
-    int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
-    return heap_size_mb / kMbPerMs;
-  }
-
-  // Returns true if no more GC work is left.
-  bool IdleGlobalGC();
-
-  void AdvanceIdleIncrementalMarking(intptr_t step_size);
-
-
   static const int kInitialSymbolTableSize = 2048;
   static const int kInitialEvalCacheSize = 64;
-  static const int kInitialNumberStringCacheSize = 256;
 
   // Maximum GC pause.
   int max_gc_pause_;
@@ -2023,37 +1646,25 @@
 
   MarkCompactCollector mark_compact_collector_;
 
-  StoreBuffer store_buffer_;
-
-  Marking marking_;
-
-  IncrementalMarking incremental_marking_;
+  // This field contains the meaning of the WATERMARK_INVALIDATED flag.
+  // Instead of clearing this flag from all pages we just flip
+  // its meaning at the beginning of a scavenge.
+  intptr_t page_watermark_invalidated_mark_;
 
   int number_idle_notifications_;
   unsigned int last_idle_notification_gc_count_;
   bool last_idle_notification_gc_count_init_;
 
-  bool idle_notification_will_schedule_next_gc_;
-  int mark_sweeps_since_idle_round_started_;
-  int ms_count_at_last_idle_notification_;
-  unsigned int gc_count_at_last_idle_gc_;
-  int scavenges_since_last_idle_round_;
-
-  static const int kMaxMarkSweepsInIdleRound = 7;
-  static const int kIdleScavengeThreshold = 5;
-
   // Shared state read by the scavenge collector and set by ScavengeObject.
   PromotionQueue promotion_queue_;
 
   // Flag is set when the heap has been configured.  The heap can be repeatedly
-  // configured through the API until it is set up.
+  // configured through the API until it is setup.
   bool configured_;
 
   ExternalStringTable external_string_table_;
 
-  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-  MemoryChunk* chunks_queued_for_free_;
+  bool is_safe_to_read_maps_;
 
   friend class Factory;
   friend class GCTracer;
@@ -2105,15 +1716,32 @@
 
 class AlwaysAllocateScope {
  public:
-  inline AlwaysAllocateScope();
-  inline ~AlwaysAllocateScope();
+  AlwaysAllocateScope() {
+    // We shouldn't hit any nested scopes, because that requires
+    // non-handle code to call handle code. The code still works but
+    // performance will degrade, so we want to catch this situation
+    // in debug mode.
+    ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+    HEAP->always_allocate_scope_depth_++;
+  }
+
+  ~AlwaysAllocateScope() {
+    HEAP->always_allocate_scope_depth_--;
+    ASSERT(HEAP->always_allocate_scope_depth_ == 0);
+  }
 };
 
 
 class LinearAllocationScope {
  public:
-  inline LinearAllocationScope();
-  inline ~LinearAllocationScope();
+  LinearAllocationScope() {
+    HEAP->linear_allocation_scope_depth_++;
+  }
+
+  ~LinearAllocationScope() {
+    HEAP->linear_allocation_scope_depth_--;
+    ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
+  }
 };
 
 
@@ -2125,7 +1753,38 @@
 // objects in a heap space but above the allocation pointer.
 class VerifyPointersVisitor: public ObjectVisitor {
  public:
-  inline void VisitPointers(Object** start, Object** end);
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        ASSERT(HEAP->Contains(object));
+        ASSERT(object->map()->IsMap());
+      }
+    }
+  }
+};
+
+
+// Visitor class to verify interior pointers in spaces that use region marks
+// to keep track of intergenerational references.
+// As VerifyPointersVisitor but also checks that dirty marks are set
+// for regions covering intergenerational references.
+class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        ASSERT(HEAP->Contains(object));
+        ASSERT(object->map()->IsMap());
+        if (HEAP->InNewSpace(object)) {
+          ASSERT(HEAP->InToSpace(object));
+          Address addr = reinterpret_cast<Address>(current);
+          ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
+        }
+      }
+    }
+  }
 };
 #endif
 
@@ -2201,6 +1860,7 @@
  public:
   enum HeapObjectsFiltering {
     kNoFiltering,
+    kFilterFreeListNodes,
     kFilterUnreachable
   };
 
@@ -2240,17 +1900,11 @@
   // Clear the cache.
   void Clear();
 
-  static const int kLength = 256;
+  static const int kLength = 64;
   static const int kCapacityMask = kLength - 1;
-  static const int kMapHashShift = 5;
-  static const int kHashMask = -4;  // Zero the last two bits.
-  static const int kEntriesPerBucket = 4;
+  static const int kMapHashShift = 2;
   static const int kNotFound = -1;
 
-  // kEntriesPerBucket should be a power of 2.
-  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
-  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
-
  private:
   KeyedLookupCache() {
     for (int i = 0; i < kLength; ++i) {
@@ -2351,18 +2005,6 @@
 };
 
 
-#ifdef DEBUG
-class DisallowAllocationFailure {
- public:
-  inline DisallowAllocationFailure();
-  inline ~DisallowAllocationFailure();
-
- private:
-  bool old_state_;
-};
-#endif
-
-
 // A helper class to document/test C++ scopes where we do not
 // expect a GC. Usage:
 //
@@ -2370,29 +2012,66 @@
 // { AssertNoAllocation nogc;
 //   ...
 // }
-class AssertNoAllocation {
- public:
-  inline AssertNoAllocation();
-  inline ~AssertNoAllocation();
 
 #ifdef DEBUG
+
+class DisallowAllocationFailure {
+ public:
+  DisallowAllocationFailure() {
+    old_state_ = HEAP->disallow_allocation_failure_;
+    HEAP->disallow_allocation_failure_ = true;
+  }
+  ~DisallowAllocationFailure() {
+    HEAP->disallow_allocation_failure_ = old_state_;
+  }
  private:
   bool old_state_;
-#endif
 };
 
+class AssertNoAllocation {
+ public:
+  AssertNoAllocation() {
+    old_state_ = HEAP->allow_allocation(false);
+  }
+
+  ~AssertNoAllocation() {
+    HEAP->allow_allocation(old_state_);
+  }
+
+ private:
+  bool old_state_;
+};
 
 class DisableAssertNoAllocation {
  public:
-  inline DisableAssertNoAllocation();
-  inline ~DisableAssertNoAllocation();
+  DisableAssertNoAllocation() {
+    old_state_ = HEAP->allow_allocation(true);
+  }
 
-#ifdef DEBUG
+  ~DisableAssertNoAllocation() {
+    HEAP->allow_allocation(old_state_);
+  }
+
  private:
   bool old_state_;
-#endif
 };
 
+#else  // ndef DEBUG
+
+class AssertNoAllocation {
+ public:
+  AssertNoAllocation() { }
+  ~AssertNoAllocation() { }
+};
+
+class DisableAssertNoAllocation {
+ public:
+  DisableAssertNoAllocation() { }
+  ~DisableAssertNoAllocation() { }
+};
+
+#endif
+
 // GCTracer collects and prints ONE line after each garbage collector
 // invocation IFF --trace_gc is used.
 
@@ -2405,13 +2084,7 @@
       MC_MARK,
       MC_SWEEP,
       MC_SWEEP_NEWSPACE,
-      MC_EVACUATE_PAGES,
-      MC_UPDATE_NEW_TO_NEW_POINTERS,
-      MC_UPDATE_ROOT_TO_NEW_POINTERS,
-      MC_UPDATE_OLD_TO_NEW_POINTERS,
-      MC_UPDATE_POINTERS_TO_EVACUATED,
-      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
-      MC_UPDATE_MISC_POINTERS,
+      MC_COMPACT,
       MC_FLUSH_CODE,
       kNumberOfScopes
     };
@@ -2433,9 +2106,7 @@
     double start_time_;
   };
 
-  explicit GCTracer(Heap* heap,
-                    const char* gc_reason,
-                    const char* collector_reason);
+  explicit GCTracer(Heap* heap);
   ~GCTracer();
 
   // Sets the collector.
@@ -2447,6 +2118,16 @@
   // Sets the full GC count.
   void set_full_gc_count(int count) { full_gc_count_ = count; }
 
+  // Sets the flag that this is a compacting full GC.
+  void set_is_compacting() { is_compacting_ = true; }
+  bool is_compacting() const { return is_compacting_; }
+
+  // Increment and decrement the count of marked objects.
+  void increment_marked_count() { ++marked_count_; }
+  void decrement_marked_count() { --marked_count_; }
+
+  int marked_count() { return marked_count_; }
+
   void increment_promoted_objects_size(int object_size) {
     promoted_objects_size_ += object_size;
   }
@@ -2456,27 +2137,38 @@
   const char* CollectorString();
 
   // Returns size of object in heap (in MB).
-  inline double SizeOfHeapObjects();
+  double SizeOfHeapObjects() {
+    return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
+  }
 
-  // Timestamp set in the constructor.
-  double start_time_;
+  double start_time_;  // Timestamp set in the constructor.
+  intptr_t start_size_;  // Size of objects in heap set in constructor.
+  GarbageCollector collector_;  // Type of collector.
 
-  // Size of objects in heap set in constructor.
-  intptr_t start_object_size_;
-
-  // Size of memory allocated from OS set in constructor.
-  intptr_t start_memory_size_;
-
-  // Type of collector.
-  GarbageCollector collector_;
-
-  // A count (including this one, e.g. the first collection is 1) of the
+  // A count (including this one, eg, the first collection is 1) of the
   // number of garbage collections.
   unsigned int gc_count_;
 
   // A count (including this one) of the number of full garbage collections.
   int full_gc_count_;
 
+  // True if the current GC is a compacting full collection, false
+  // otherwise.
+  bool is_compacting_;
+
+  // True if the *previous* full GC cwas a compacting collection (will be
+  // false if there has not been a previous full GC).
+  bool previous_has_compacted_;
+
+  // On a full GC, a count of the number of marked objects.  Incremented
+  // when an object is marked and decremented when an object's mark bit is
+  // cleared.  Will be zero on a scavenge collection.
+  int marked_count_;
+
+  // The count from the end of the previous full GC.  Will be zero if there
+  // was no previous full GC.
+  int previous_marked_count_;
+
   // Amounts of time spent in different scopes during GC.
   double scopes_[Scope::kNumberOfScopes];
 
@@ -2495,17 +2187,7 @@
   // Size of objects promoted during the current collection.
   intptr_t promoted_objects_size_;
 
-  // Incremental marking steps counters.
-  int steps_count_;
-  double steps_took_;
-  double longest_step_;
-  int steps_count_since_last_gc_;
-  double steps_took_since_last_gc_;
-
   Heap* heap_;
-
-  const char* gc_reason_;
-  const char* collector_reason_;
 };
 
 
@@ -2616,46 +2298,6 @@
 };
 
 
-// Intrusive object marking uses least significant bit of
-// heap object's map word to mark objects.
-// Normally all map words have least significant bit set
-// because they contain tagged map pointer.
-// If the bit is not set object is marked.
-// All objects should be unmarked before resuming
-// JavaScript execution.
-class IntrusiveMarking {
- public:
-  static bool IsMarked(HeapObject* object) {
-    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
-  }
-
-  static void ClearMark(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
-    ASSERT(!IsMarked(object));
-  }
-
-  static void SetMark(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
-    ASSERT(IsMarked(object));
-  }
-
-  static Map* MapOfMarkedObject(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
-  }
-
-  static int SizeOfMarkedObject(HeapObject* object) {
-    return object->SizeFromMap(MapOfMarkedObject(object));
-  }
-
- private:
-  static const uintptr_t kNotMarkedBit = 0x1;
-  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
-};
-
-
 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
 // Helper class for tracing paths to a search target Object from all roots.
 // The TracePathFrom() method can be used to trace paths from a specific
@@ -2710,11 +2352,13 @@
 
   AssertNoAllocation no_alloc;  // i.e. no gc allowed.
 
- private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
 };
 #endif  // DEBUG || LIVE_OBJECT_LIST
 
+
 } }  // namespace v8::internal
 
+#undef HEAP
+
 #endif  // V8_HEAP_H_
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index f698da4..5630ce3 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,14 +67,6 @@
 }
 
 
-int HValue::LoopWeight() const {
-  const int w = FLAG_loop_weight;
-  static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
-  return weights[Min(block()->LoopNestingDepth(),
-                     static_cast<int>(ARRAY_SIZE(weights)-1))];
-}
-
-
 void HValue::AssumeRepresentation(Representation r) {
   if (CheckFlag(kFlexibleRepresentation)) {
     ChangeRepresentation(r);
@@ -134,9 +126,7 @@
   bool may_overflow = false;  // Overflow is ignored here.
   lower_ = AddWithoutOverflow(lower_, value, &may_overflow);
   upper_ = AddWithoutOverflow(upper_, value, &may_overflow);
-#ifdef DEBUG
   Verify();
-#endif
 }
 
 
@@ -183,9 +173,7 @@
   lower_ = AddWithoutOverflow(lower_, other->lower(), &may_overflow);
   upper_ = AddWithoutOverflow(upper_, other->upper(), &may_overflow);
   KeepOrder();
-#ifdef DEBUG
   Verify();
-#endif
   return may_overflow;
 }
 
@@ -195,9 +183,7 @@
   lower_ = SubWithoutOverflow(lower_, other->upper(), &may_overflow);
   upper_ = SubWithoutOverflow(upper_, other->lower(), &may_overflow);
   KeepOrder();
-#ifdef DEBUG
   Verify();
-#endif
   return may_overflow;
 }
 
@@ -211,11 +197,9 @@
 }
 
 
-#ifdef DEBUG
 void Range::Verify() const {
   ASSERT(lower_ <= upper_);
 }
-#endif
 
 
 bool Range::MulAndCheckOverflow(Range* other) {
@@ -226,9 +210,7 @@
   int v4 = MulWithoutOverflow(upper_, other->upper(), &may_overflow);
   lower_ = Min(Min(v1, v2), Min(v3, v4));
   upper_ = Max(Max(v1, v2), Max(v3, v4));
-#ifdef DEBUG
   Verify();
-#endif
   return may_overflow;
 }
 
@@ -252,6 +234,25 @@
 }
 
 
+const char* HType::ToShortString() {
+  switch (type_) {
+    case kTagged: return "t";
+    case kTaggedPrimitive: return "p";
+    case kTaggedNumber: return "n";
+    case kSmi: return "m";
+    case kHeapNumber: return "h";
+    case kString: return "s";
+    case kBoolean: return "b";
+    case kNonPrimitive: return "r";
+    case kJSArray: return "a";
+    case kJSObject: return "o";
+    case kUninitialized: return "z";
+  }
+  UNREACHABLE();
+  return "Unreachable code";
+}
+
+
 HType HType::TypeFromValue(Handle<Object> value) {
   HType result = HType::Tagged();
   if (value->IsSmi()) {
@@ -276,23 +277,6 @@
 }
 
 
-HUseListNode* HUseListNode::tail() {
-  // Skip and remove dead items in the use list.
-  while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
-    tail_ = tail_->tail_;
-  }
-  return tail_;
-}
-
-
-bool HValue::CheckUsesForFlag(Flag f) {
-  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
-    if (!it.value()->CheckFlag(f)) return false;
-  }
-  return true;
-}
-
-
 HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
   Advance();
 }
@@ -391,7 +375,7 @@
   // We replace all uses first, so Delete can assert that there are none.
   if (other != NULL) ReplaceAllUsesWith(other);
   ASSERT(HasNoUses());
-  Kill();
+  ClearOperands();
   DeleteFromGraph();
 }
 
@@ -409,17 +393,9 @@
 }
 
 
-void HValue::Kill() {
-  // Instead of going through the entire use list of each operand, we only
-  // check the first item in each use list and rely on the tail() method to
-  // skip dead items, removing them lazily next time we traverse the list.
-  SetFlag(kIsDead);
+void HValue::ClearOperands() {
   for (int i = 0; i < OperandCount(); ++i) {
-    HValue* operand = OperandAt(i);
-    HUseListNode* first = operand->use_list_;
-    if (first != NULL && first->value() == this && first->index() == i) {
-      operand->use_list_ = first->tail();
-    }
+    SetOperandAt(i, NULL);
   }
 }
 
@@ -449,18 +425,18 @@
 
 
 void HValue::PrintChangesTo(StringStream* stream) {
-  GVNFlagSet changes_flags = ChangesFlags();
-  if (changes_flags.IsEmpty()) return;
+  int changes_flags = ChangesFlags();
+  if (changes_flags == 0) return;
   stream->Add(" changes[");
-  if (changes_flags == AllSideEffectsFlagSet()) {
+  if (changes_flags == AllSideEffects()) {
     stream->Add("*");
   } else {
     bool add_comma = false;
-#define PRINT_DO(type)                            \
-    if (changes_flags.Contains(kChanges##type)) { \
-      if (add_comma) stream->Add(",");            \
-      add_comma = true;                           \
-      stream->Add(#type);                         \
+#define PRINT_DO(type)                         \
+    if (changes_flags & (1 << kChanges##type)) { \
+      if (add_comma) stream->Add(",");           \
+      add_comma = true;                          \
+      stream->Add(#type);                        \
     }
     GVN_FLAG_LIST(PRINT_DO);
 #undef PRINT_DO
@@ -503,9 +479,9 @@
 }
 
 
-void HValue::AddNewRange(Range* r, Zone* zone) {
-  if (!HasRange()) ComputeInitialRange(zone);
-  if (!HasRange()) range_ = new(zone) Range();
+void HValue::AddNewRange(Range* r) {
+  if (!HasRange()) ComputeInitialRange();
+  if (!HasRange()) range_ = new Range();
   ASSERT(HasRange());
   r->StackUpon(range_);
   range_ = r;
@@ -519,9 +495,9 @@
 }
 
 
-void HValue::ComputeInitialRange(Zone* zone) {
+void HValue::ComputeInitialRange() {
   ASSERT(!HasRange());
-  range_ = InferRange(zone);
+  range_ = InferRange();
   ASSERT(HasRange());
 }
 
@@ -588,7 +564,7 @@
   // followed by a simulate instruction, we need to insert after the
   // simulate instruction instead.
   HInstruction* next = previous->next_;
-  if (previous->HasObservableSideEffects() && next != NULL) {
+  if (previous->HasSideEffects() && next != NULL) {
     ASSERT(next->IsSimulate());
     previous = next;
     next = previous->next_;
@@ -611,10 +587,11 @@
     HBasicBlock* other_block = other_operand->block();
     if (cur_block == other_block) {
       if (!other_operand->IsPhi()) {
-        HInstruction* cur = this->previous();
+        HInstruction* cur = cur_block->first();
         while (cur != NULL) {
+          ASSERT(cur != this);  // We should reach other_operand before!
           if (cur == other_operand) break;
-          cur = cur->previous();
+          cur = cur->next();
         }
         // Must reach other operand in the same block!
         ASSERT(cur == other_operand);
@@ -628,7 +605,7 @@
 
   // Verify that instructions that may have side-effects are followed
   // by a simulate instruction.
-  if (HasObservableSideEffects() && !IsOsrEntry()) {
+  if (HasSideEffects() && !IsOsrEntry()) {
     ASSERT(next()->IsSimulate());
   }
 
@@ -730,14 +707,6 @@
 }
 
 
-void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(kind() == kStrictEquality ? " === " : " == ");
-  stream->Add(nil() == kNullValue ? "null" : "undefined");
-  HControlInstruction::PrintDataTo(stream);
-}
-
-
 void HReturn::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
 }
@@ -806,106 +775,17 @@
 
 void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
-  stream->Add(" == %o", *type_literal_);
-  HControlInstruction::PrintDataTo(stream);
-}
-
-
-void HCheckMapValue::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" ");
-  map()->PrintNameTo(stream);
-}
-
-
-void HForInPrepareMap::PrintDataTo(StringStream* stream) {
-  enumerable()->PrintNameTo(stream);
-}
-
-
-void HForInCacheArray::PrintDataTo(StringStream* stream) {
-  enumerable()->PrintNameTo(stream);
-  stream->Add(" ");
-  map()->PrintNameTo(stream);
-  stream->Add("[%d]", idx_);
-}
-
-
-void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  stream->Add(" ");
-  index()->PrintNameTo(stream);
-}
-
-
-HValue* HConstant::Canonicalize() {
-  return HasNoUses() ? NULL : this;
-}
-
-
-HValue* HTypeof::Canonicalize() {
-  return HasNoUses() ? NULL : this;
-}
-
-
-HValue* HBitwise::Canonicalize() {
-  if (!representation().IsInteger32()) return this;
-  // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
-  int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
-  if (left()->IsConstant() &&
-      HConstant::cast(left())->HasInteger32Value() &&
-      HConstant::cast(left())->Integer32Value() == nop_constant) {
-    return right();
-  }
-  if (right()->IsConstant() &&
-      HConstant::cast(right())->HasInteger32Value() &&
-      HConstant::cast(right())->Integer32Value() == nop_constant) {
-    return left();
-  }
-  return this;
-}
-
-
-HValue* HAdd::Canonicalize() {
-  if (!representation().IsInteger32()) return this;
-  if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
-  return this;
-}
-
-
-HValue* HSub::Canonicalize() {
-  if (!representation().IsInteger32()) return this;
-  if (CheckUsesForFlag(kTruncatingToInt32)) ClearFlag(kCanOverflow);
-  return this;
-}
-
-
-HValue* HChange::Canonicalize() {
-  return (from().Equals(to())) ? value() : this;
-}
-
-
-HValue* HWrapReceiver::Canonicalize() {
-  if (HasNoUses()) return NULL;
-  if (receiver()->type().IsJSObject()) {
-    return receiver();
-  }
-  return this;
-}
-
-
-void HTypeof::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
+  stream->Add(" == ");
+  stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
 }
 
 
 void HChange::PrintDataTo(StringStream* stream) {
   HUnaryOperation::PrintDataTo(stream);
-  stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
+  stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
 
   if (CanTruncateToInt32()) stream->Add(" truncating-int32");
   if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
-  if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
 }
 
 
@@ -968,13 +848,6 @@
 void HCheckMap::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
   stream->Add(" %p", *map());
-  if (mode() == REQUIRE_EXACT_MAP) {
-    stream->Add(" [EXACT]");
-  } else if (!has_element_transitions_) {
-    stream->Add(" [EXACT*]");
-  } else {
-    stream->Add(" [MATCH ELEMENTS]");
-  }
 }
 
 
@@ -984,23 +857,6 @@
 }
 
 
-const char* HCheckInstanceType::GetCheckName() {
-  switch (check_) {
-    case IS_SPEC_OBJECT: return "object";
-    case IS_JS_ARRAY: return "array";
-    case IS_STRING: return "string";
-    case IS_SYMBOL: return "symbol";
-  }
-  UNREACHABLE();
-  return "";
-}
-
-void HCheckInstanceType::PrintDataTo(StringStream* stream) {
-  stream->Add("%s ", GetCheckName());
-  HUnaryOperation::PrintDataTo(stream);
-}
-
-
 void HCallStub::PrintDataTo(StringStream* stream) {
   stream->Add("%s ",
               CodeStub::MajorName(major_key_, false));
@@ -1017,15 +873,15 @@
 }
 
 
-Range* HValue::InferRange(Zone* zone) {
+Range* HValue::InferRange() {
   // Untagged integer32 cannot be -0, all other representations can.
-  Range* result = new(zone) Range();
+  Range* result = new Range();
   result->set_can_be_minus_zero(!representation().IsInteger32());
   return result;
 }
 
 
-Range* HChange::InferRange(Zone* zone) {
+Range* HChange::InferRange() {
   Range* input_range = value()->range();
   if (from().IsInteger32() &&
       to().IsTagged() &&
@@ -1033,46 +889,46 @@
     set_type(HType::Smi());
   }
   Range* result = (input_range != NULL)
-      ? input_range->Copy(zone)
-      : HValue::InferRange(zone);
+      ? input_range->Copy()
+      : HValue::InferRange();
   if (to().IsInteger32()) result->set_can_be_minus_zero(false);
   return result;
 }
 
 
-Range* HConstant::InferRange(Zone* zone) {
+Range* HConstant::InferRange() {
   if (has_int32_value_) {
-    Range* result = new(zone) Range(int32_value_, int32_value_);
+    Range* result = new Range(int32_value_, int32_value_);
     result->set_can_be_minus_zero(false);
     return result;
   }
-  return HValue::InferRange(zone);
+  return HValue::InferRange();
 }
 
 
-Range* HPhi::InferRange(Zone* zone) {
+Range* HPhi::InferRange() {
   if (representation().IsInteger32()) {
     if (block()->IsLoopHeader()) {
-      Range* range = new(zone) Range(kMinInt, kMaxInt);
+      Range* range = new Range(kMinInt, kMaxInt);
       return range;
     } else {
-      Range* range = OperandAt(0)->range()->Copy(zone);
+      Range* range = OperandAt(0)->range()->Copy();
       for (int i = 1; i < OperandCount(); ++i) {
         range->Union(OperandAt(i)->range());
       }
       return range;
     }
   } else {
-    return HValue::InferRange(zone);
+    return HValue::InferRange();
   }
 }
 
 
-Range* HAdd::InferRange(Zone* zone) {
+Range* HAdd::InferRange() {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
     Range* b = right()->range();
-    Range* res = a->Copy(zone);
+    Range* res = a->Copy();
     if (!res->AddAndCheckOverflow(b)) {
       ClearFlag(kCanOverflow);
     }
@@ -1080,32 +936,32 @@
     res->set_can_be_minus_zero(m0);
     return res;
   } else {
-    return HValue::InferRange(zone);
+    return HValue::InferRange();
   }
 }
 
 
-Range* HSub::InferRange(Zone* zone) {
+Range* HSub::InferRange() {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
     Range* b = right()->range();
-    Range* res = a->Copy(zone);
+    Range* res = a->Copy();
     if (!res->SubAndCheckOverflow(b)) {
       ClearFlag(kCanOverflow);
     }
     res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
     return res;
   } else {
-    return HValue::InferRange(zone);
+    return HValue::InferRange();
   }
 }
 
 
-Range* HMul::InferRange(Zone* zone) {
+Range* HMul::InferRange() {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
     Range* b = right()->range();
-    Range* res = a->Copy(zone);
+    Range* res = a->Copy();
     if (!res->MulAndCheckOverflow(b)) {
       ClearFlag(kCanOverflow);
     }
@@ -1114,14 +970,14 @@
     res->set_can_be_minus_zero(m0);
     return res;
   } else {
-    return HValue::InferRange(zone);
+    return HValue::InferRange();
   }
 }
 
 
-Range* HDiv::InferRange(Zone* zone) {
+Range* HDiv::InferRange() {
   if (representation().IsInteger32()) {
-    Range* result = new(zone) Range();
+    Range* result = new Range();
     if (left()->range()->CanBeMinusZero()) {
       result->set_can_be_minus_zero(true);
     }
@@ -1139,15 +995,15 @@
     }
     return result;
   } else {
-    return HValue::InferRange(zone);
+    return HValue::InferRange();
   }
 }
 
 
-Range* HMod::InferRange(Zone* zone) {
+Range* HMod::InferRange() {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
-    Range* result = new(zone) Range();
+    Range* result = new Range();
     if (a->CanBeMinusZero() || a->CanBeNegative()) {
       result->set_can_be_minus_zero(true);
     }
@@ -1156,7 +1012,7 @@
     }
     return result;
   } else {
-    return HValue::InferRange(zone);
+    return HValue::InferRange();
   }
 }
 
@@ -1229,7 +1085,7 @@
     HValue* value = it.value();
     if (!value->IsPhi()) {
       Representation rep = value->RequiredInputRepresentation(it.index());
-      non_phi_uses_[rep.kind()] += value->LoopWeight();
+      ++non_phi_uses_[rep.kind()];
     }
   }
 }
@@ -1250,16 +1106,15 @@
 
 
 void HSimulate::PrintDataTo(StringStream* stream) {
-  stream->Add("id=%d", ast_id());
-  if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
+  stream->Add("id=%d ", ast_id());
+  if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
   if (values_.length() > 0) {
     if (pop_count_ > 0) stream->Add(" /");
     for (int i = 0; i < values_.length(); ++i) {
-      if (i > 0) stream->Add(",");
-      if (HasAssignedIndexAt(i)) {
-        stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
-      } else {
+      if (!HasAssignedIndexAt(i)) {
         stream->Add(" push ");
+      } else {
+        stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
       }
       values_[i]->PrintNameTo(stream);
     }
@@ -1340,9 +1195,7 @@
 
 
 bool HArrayLiteral::IsCopyOnWrite() const {
-  if (!boilerplate_object_->IsJSObject()) return false;
-  return Handle<JSObject>::cast(boilerplate_object_)->elements()->map() ==
-      HEAP->fixed_cow_array_map();
+  return constant_elements()->map() == HEAP->fixed_cow_array_map();
 }
 
 
@@ -1355,41 +1208,51 @@
 }
 
 
-Range* HBitwise::InferRange(Zone* zone) {
-  if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
-  const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
+Range* HBitAnd::InferRange() {
   int32_t left_mask = (left()->range() != NULL)
       ? left()->range()->Mask()
-      : kDefaultMask;
+      : 0xffffffff;
   int32_t right_mask = (right()->range() != NULL)
       ? right()->range()->Mask()
-      : kDefaultMask;
-  int32_t result_mask = (op() == Token::BIT_AND)
-      ? left_mask & right_mask
-      : left_mask | right_mask;
+      : 0xffffffff;
+  int32_t result_mask = left_mask & right_mask;
   return (result_mask >= 0)
-      ? new(zone) Range(0, result_mask)
-      : HValue::InferRange(zone);
+      ? new Range(0, result_mask)
+      : HValue::InferRange();
 }
 
 
-Range* HSar::InferRange(Zone* zone) {
+Range* HBitOr::InferRange() {
+  int32_t left_mask = (left()->range() != NULL)
+      ? left()->range()->Mask()
+      : 0xffffffff;
+  int32_t right_mask = (right()->range() != NULL)
+      ? right()->range()->Mask()
+      : 0xffffffff;
+  int32_t result_mask = left_mask | right_mask;
+  return (result_mask >= 0)
+      ? new Range(0, result_mask)
+      : HValue::InferRange();
+}
+
+
+Range* HSar::InferRange() {
   if (right()->IsConstant()) {
     HConstant* c = HConstant::cast(right());
     if (c->HasInteger32Value()) {
       Range* result = (left()->range() != NULL)
-          ? left()->range()->Copy(zone)
-          : new(zone) Range();
+          ? left()->range()->Copy()
+          : new Range();
       result->Sar(c->Integer32Value());
       result->set_can_be_minus_zero(false);
       return result;
     }
   }
-  return HValue::InferRange(zone);
+  return HValue::InferRange();
 }
 
 
-Range* HShr::InferRange(Zone* zone) {
+Range* HShr::InferRange() {
   if (right()->IsConstant()) {
     HConstant* c = HConstant::cast(right());
     if (c->HasInteger32Value()) {
@@ -1397,57 +1260,39 @@
       if (left()->range()->CanBeNegative()) {
         // Only compute bounds if the result always fits into an int32.
         return (shift_count >= 1)
-            ? new(zone) Range(0,
-                              static_cast<uint32_t>(0xffffffff) >> shift_count)
-            : new(zone) Range();
+            ? new Range(0, static_cast<uint32_t>(0xffffffff) >> shift_count)
+            : new Range();
       } else {
         // For positive inputs we can use the >> operator.
         Range* result = (left()->range() != NULL)
-            ? left()->range()->Copy(zone)
-            : new(zone) Range();
+            ? left()->range()->Copy()
+            : new Range();
         result->Sar(c->Integer32Value());
         result->set_can_be_minus_zero(false);
         return result;
       }
     }
   }
-  return HValue::InferRange(zone);
+  return HValue::InferRange();
 }
 
 
-Range* HShl::InferRange(Zone* zone) {
+Range* HShl::InferRange() {
   if (right()->IsConstant()) {
     HConstant* c = HConstant::cast(right());
     if (c->HasInteger32Value()) {
       Range* result = (left()->range() != NULL)
-          ? left()->range()->Copy(zone)
-          : new(zone) Range();
+          ? left()->range()->Copy()
+          : new Range();
       result->Shl(c->Integer32Value());
       result->set_can_be_minus_zero(false);
       return result;
     }
   }
-  return HValue::InferRange(zone);
+  return HValue::InferRange();
 }
 
 
-Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) {
-  switch (elements_kind()) {
-    case EXTERNAL_PIXEL_ELEMENTS:
-      return new(zone) Range(0, 255);
-    case EXTERNAL_BYTE_ELEMENTS:
-      return new(zone) Range(-128, 127);
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      return new(zone) Range(0, 255);
-    case EXTERNAL_SHORT_ELEMENTS:
-      return new(zone) Range(-32768, 32767);
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      return new(zone) Range(0, 65535);
-    default:
-      return HValue::InferRange(zone);
-  }
-}
-
 
 void HCompareGeneric::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
@@ -1456,13 +1301,6 @@
 }
 
 
-void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add(Token::Name(token()));
-  stream->Add(" ");
-  HControlInstruction::PrintDataTo(stream);
-}
-
-
 void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
   stream->Add(" ");
@@ -1473,14 +1311,6 @@
 }
 
 
-void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
-  left()->PrintNameTo(stream);
-  stream->Add(" ");
-  right()->PrintNameTo(stream);
-  HControlInstruction::PrintDataTo(stream);
-}
-
-
 void HGoto::PrintDataTo(StringStream* stream) {
   stream->Add("B%d", SuccessorAt(0)->block_id());
 }
@@ -1489,22 +1319,7 @@
 void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
   input_representation_ = r;
   if (r.IsDouble()) {
-    // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
-    // and !=) have special handling of undefined, e.g. undefined == undefined
-    // is 'true'. Relational comparisons have a different semantic, first
-    // calling ToPrimitive() on their arguments.  The standard Crankshaft
-    // tagged-to-double conversion to ensure the HCompareIDAndBranch's inputs
-    // are doubles caused 'undefined' to be converted to NaN. That's compatible
-    // out-of-the box with ordered relational comparisons (<, >, <=,
-    // >=). However, for equality comparisons (and for 'in' and 'instanceof'),
-    // it is not consistent with the spec. For example, it would cause undefined
-    // == undefined (should be true) to be evaluated as NaN == NaN
-    // (false). Therefore, any comparisons other than ordered relational
-    // comparisons must cause a deopt when one of their arguments is undefined.
-    // See also v8:1434
-    if (!Token::IsOrderedRelationalCompareOp(token_)) {
-      SetFlag(kDeoptimizeOnUndefined);
-    }
+    SetFlag(kDeoptimizeOnUndefined);
   } else {
     ASSERT(r.IsInteger32());
   }
@@ -1532,21 +1347,21 @@
   SetOperandAt(0, context);
   SetOperandAt(1, object);
   set_representation(Representation::Tagged());
-  SetGVNFlag(kDependsOnMaps);
+  SetFlag(kDependsOnMaps);
   for (int i = 0;
        i < types->length() && types_.length() < kMaxLoadPolymorphism;
        ++i) {
     Handle<Map> map = types->at(i);
-    LookupResult lookup(map->GetIsolate());
+    LookupResult lookup;
     map->LookupInDescriptors(NULL, *name, &lookup);
-    if (lookup.IsFound()) {
+    if (lookup.IsProperty()) {
       switch (lookup.type()) {
         case FIELD: {
           int index = lookup.GetLocalFieldIndexFromMap(*map);
           if (index < 0) {
-            SetGVNFlag(kDependsOnInobjectFields);
+            SetFlag(kDependsOnInobjectFields);
           } else {
-            SetGVNFlag(kDependsOnBackingStoreFields);
+            SetFlag(kDependsOnBackingStoreFields);
           }
           types_.Add(types->at(i));
           break;
@@ -1590,14 +1405,14 @@
 
 void HLoadNamedFieldPolymorphic::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
-  stream->Add(".");
+  stream->Add(" .");
   stream->Add(*String::cast(*name())->ToCString());
 }
 
 
 void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
-  stream->Add(".");
+  stream->Add(" .");
   stream->Add(*String::cast(*name())->ToCString());
 }
 
@@ -1610,16 +1425,11 @@
 }
 
 
-bool HLoadKeyedFastElement::RequiresHoleCheck() {
-  if (hole_check_mode_ == OMIT_HOLE_CHECK) {
-    return false;
-  }
-
+bool HLoadKeyedFastElement::RequiresHoleCheck() const {
   for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
     HValue* use = it.value();
     if (!use->IsChange()) return true;
   }
-
   return false;
 }
 
@@ -1632,6 +1442,11 @@
 }
 
 
+bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
+  return true;
+}
+
+
 void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1640,39 +1455,6 @@
 }
 
 
-HValue* HLoadKeyedGeneric::Canonicalize() {
-  // Recognize generic keyed loads that use property name generated
-  // by for-in statement as a key and rewrite them into fast property load
-  // by index.
-  if (key()->IsLoadKeyedFastElement()) {
-    HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key());
-    if (key_load->object()->IsForInCacheArray()) {
-      HForInCacheArray* names_cache =
-          HForInCacheArray::cast(key_load->object());
-
-      if (names_cache->enumerable() == object()) {
-        HForInCacheArray* index_cache =
-            names_cache->index_cache();
-        HCheckMapValue* map_check =
-            new(block()->zone()) HCheckMapValue(object(), names_cache->map());
-        HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
-            index_cache,
-            key_load->key(),
-            HLoadKeyedFastElement::OMIT_HOLE_CHECK);
-        HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
-            object(), index);
-        map_check->InsertBefore(this);
-        index->InsertBefore(this);
-        load->InsertBefore(this);
-        return load;
-      }
-    }
-  }
-
-  return this;
-}
-
-
 void HLoadKeyedSpecializedArrayElement::PrintDataTo(
     StringStream* stream) {
   external_pointer()->PrintNameTo(stream);
@@ -1706,7 +1488,6 @@
       stream->Add("pixel");
       break;
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -1732,10 +1513,10 @@
 void HStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add(".");
+  ASSERT(name()->IsString());
   stream->Add(*String::cast(*name())->ToCString());
   stream->Add(" = ");
   value()->PrintNameTo(stream);
-  stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
   if (!transition().is_null()) {
     stream->Add(" (transition map %p)", *transition());
   }
@@ -1801,7 +1582,6 @@
     case EXTERNAL_PIXEL_ELEMENTS:
       stream->Add("pixel");
       break;
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -1816,26 +1596,9 @@
 }
 
 
-void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
 void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p]", *cell());
-  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
-  if (details_.IsReadOnly()) stream->Add(" (read-only)");
-}
-
-
-bool HLoadGlobalCell::RequiresHoleCheck() {
-  if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
-  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
-    HValue* use = it.value();
-    if (!use->IsChange()) return true;
-  }
-  return false;
+  if (check_hole_value()) stream->Add(" (deleteable/read-only)");
 }
 
 
@@ -1847,8 +1610,6 @@
 void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p] = ", *cell());
   value()->PrintNameTo(stream);
-  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
-  if (details_.IsReadOnly()) stream->Add(" (read-only)");
 }
 
 
@@ -1935,12 +1696,6 @@
 }
 
 
-HType HChange::CalculateInferredType() {
-  if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
-  return type();
-}
-
-
 HType HBitwiseBinaryOperation::CalculateInferredType() {
   return HType::TaggedNumber();
 }
@@ -1956,6 +1711,21 @@
 }
 
 
+HType HBitAnd::CalculateInferredType() {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitXor::CalculateInferredType() {
+  return HType::TaggedNumber();
+}
+
+
+HType HBitOr::CalculateInferredType() {
+  return HType::TaggedNumber();
+}
+
+
 HType HBitNot::CalculateInferredType() {
   return HType::TaggedNumber();
 }
@@ -1966,39 +1736,18 @@
 }
 
 
-HType HStringCharFromCode::CalculateInferredType() {
-  return HType::String();
+HType HShl::CalculateInferredType() {
+  return HType::TaggedNumber();
 }
 
 
-HType HAllocateObject::CalculateInferredType() {
-  return HType::JSObject();
+HType HShr::CalculateInferredType() {
+  return HType::TaggedNumber();
 }
 
 
-HType HFastLiteral::CalculateInferredType() {
-  // TODO(mstarzinger): Be smarter, could also be JSArray here.
-  return HType::JSObject();
-}
-
-
-HType HArrayLiteral::CalculateInferredType() {
-  return HType::JSArray();
-}
-
-
-HType HObjectLiteral::CalculateInferredType() {
-  return HType::JSObject();
-}
-
-
-HType HRegExpLiteral::CalculateInferredType() {
-  return HType::JSObject();
-}
-
-
-HType HFunctionLiteral::CalculateInferredType() {
-  return HType::JSObject();
+HType HSar::CalculateInferredType() {
+  return HType::TaggedNumber();
 }
 
 
@@ -2089,167 +1838,6 @@
 }
 
 
-#define H_CONSTANT_INT32(val)                                                  \
-new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED),                   \
-                    Representation::Integer32())
-#define H_CONSTANT_DOUBLE(val)                                                 \
-new(zone) HConstant(FACTORY->NewNumber(val, TENURED),                          \
-                    Representation::Double())
-
-#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)                       \
-HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
-                                  HValue* context,                             \
-                                  HValue* left,                                \
-                                  HValue* right) {                             \
-  if (left->IsConstant() && right->IsConstant()) {                             \
-    HConstant* c_left = HConstant::cast(left);                                 \
-    HConstant* c_right = HConstant::cast(right);                               \
-    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
-      double double_res = c_left->DoubleValue() op c_right->DoubleValue();     \
-      if (TypeInfo::IsInt32Double(double_res)) {                               \
-        return H_CONSTANT_INT32(static_cast<int32_t>(double_res));             \
-      }                                                                        \
-      return H_CONSTANT_DOUBLE(double_res);                                    \
-    }                                                                          \
-  }                                                                            \
-  return new(zone) HInstr(context, left, right);                               \
-}
-
-
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
-DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
-
-#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
-
-
-HInstruction* HMod::NewHMod(Zone* zone,
-                            HValue* context,
-                            HValue* left,
-                            HValue* right) {
-  if (left->IsConstant() && right->IsConstant()) {
-    HConstant* c_left = HConstant::cast(left);
-    HConstant* c_right = HConstant::cast(right);
-    if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
-      int32_t dividend = c_left->Integer32Value();
-      int32_t divisor = c_right->Integer32Value();
-      if (divisor != 0) {
-        int32_t res = dividend % divisor;
-        if ((res == 0) && (dividend < 0)) {
-          return H_CONSTANT_DOUBLE(-0.0);
-        }
-        return H_CONSTANT_INT32(res);
-      }
-    }
-  }
-  return new(zone) HMod(context, left, right);
-}
-
-
-HInstruction* HDiv::NewHDiv(Zone* zone,
-                            HValue* context,
-                            HValue* left,
-                            HValue* right) {
-  // If left and right are constant values, try to return a constant value.
-  if (left->IsConstant() && right->IsConstant()) {
-    HConstant* c_left = HConstant::cast(left);
-    HConstant* c_right = HConstant::cast(right);
-    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
-      if (c_right->DoubleValue() != 0) {
-        double double_res = c_left->DoubleValue() / c_right->DoubleValue();
-        if (TypeInfo::IsInt32Double(double_res)) {
-          return H_CONSTANT_INT32(static_cast<int32_t>(double_res));
-        }
-        return H_CONSTANT_DOUBLE(double_res);
-      }
-    }
-  }
-  return new(zone) HDiv(context, left, right);
-}
-
-
-HInstruction* HBitwise::NewHBitwise(Zone* zone,
-                                    Token::Value op,
-                                    HValue* context,
-                                    HValue* left,
-                                    HValue* right) {
-  if (left->IsConstant() && right->IsConstant()) {
-    HConstant* c_left = HConstant::cast(left);
-    HConstant* c_right = HConstant::cast(right);
-    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
-      int32_t result;
-      int32_t v_left = c_left->NumberValueAsInteger32();
-      int32_t v_right = c_right->NumberValueAsInteger32();
-      switch (op) {
-        case Token::BIT_XOR:
-          result = v_left ^ v_right;
-          break;
-        case Token::BIT_AND:
-          result = v_left & v_right;
-          break;
-        case Token::BIT_OR:
-          result = v_left | v_right;
-          break;
-        default:
-          result = 0;  // Please the compiler.
-          UNREACHABLE();
-      }
-      return H_CONSTANT_INT32(result);
-    }
-  }
-  return new(zone) HBitwise(op, context, left, right);
-}
-
-
-#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result)                             \
-HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
-                                  HValue* context,                             \
-                                  HValue* left,                                \
-                                  HValue* right) {                             \
-  if (left->IsConstant() && right->IsConstant()) {                             \
-    HConstant* c_left = HConstant::cast(left);                                 \
-    HConstant* c_right = HConstant::cast(right);                               \
-    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
-      return H_CONSTANT_INT32(result);                                         \
-    }                                                                          \
-  }                                                                            \
-  return new(zone) HInstr(context, left, right);                               \
-}
-
-
-DEFINE_NEW_H_BITWISE_INSTR(HSar,
-c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
-DEFINE_NEW_H_BITWISE_INSTR(HShl,
-c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
-
-#undef DEFINE_NEW_H_BITWISE_INSTR
-
-
-HInstruction* HShr::NewHShr(Zone* zone,
-                            HValue* context,
-                            HValue* left,
-                            HValue* right) {
-  if (left->IsConstant() && right->IsConstant()) {
-    HConstant* c_left = HConstant::cast(left);
-    HConstant* c_right = HConstant::cast(right);
-    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
-      int32_t left_val = c_left->NumberValueAsInteger32();
-      int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
-      if ((right_val == 0) && (left_val < 0)) {
-        return H_CONSTANT_DOUBLE(
-            static_cast<double>(static_cast<uint32_t>(left_val)));
-      }
-      return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
-    }
-  }
-  return new(zone) HShr(context, left, right);
-}
-
-
-#undef H_CONSTANT_INT32
-#undef H_CONSTANT_DOUBLE
-
-
 void HIn::PrintDataTo(StringStream* stream) {
   key()->PrintNameTo(stream);
   stream->Add(" ");
@@ -2257,46 +1845,6 @@
 }
 
 
-Representation HPhi::InferredRepresentation() {
-  bool double_occurred = false;
-  bool int32_occurred = false;
-  for (int i = 0; i < OperandCount(); ++i) {
-    HValue* value = OperandAt(i);
-    if (value->IsUnknownOSRValue()) {
-      HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
-      if (hint_value != NULL) {
-        Representation hint = hint_value->representation();
-        if (hint.IsDouble()) double_occurred = true;
-        if (hint.IsInteger32()) int32_occurred = true;
-      }
-      continue;
-    }
-    if (value->representation().IsDouble()) double_occurred = true;
-    if (value->representation().IsInteger32()) int32_occurred = true;
-    if (value->representation().IsTagged()) {
-      if (value->IsConstant()) {
-        HConstant* constant = HConstant::cast(value);
-        if (constant->IsConvertibleToInteger()) {
-          int32_occurred = true;
-        } else if (constant->HasNumberValue()) {
-          double_occurred = true;
-        } else {
-          return Representation::Tagged();
-        }
-      } else {
-        return Representation::Tagged();
-      }
-    }
-  }
-
-  if (double_occurred) return Representation::Double();
-
-  if (int32_occurred) return Representation::Integer32();
-
-  return Representation::None();
-}
-
-
 // Node-specific verification code is only included in debug mode.
 #ifdef DEBUG
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fb5879f..0af5489 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -62,14 +62,15 @@
   V(AbnormalExit)                              \
   V(AccessArgumentsAt)                         \
   V(Add)                                       \
-  V(AllocateObject)                            \
   V(ApplyArguments)                            \
   V(ArgumentsElements)                         \
   V(ArgumentsLength)                           \
   V(ArgumentsObject)                           \
   V(ArrayLiteral)                              \
-  V(Bitwise)                                   \
+  V(BitAnd)                                    \
   V(BitNot)                                    \
+  V(BitOr)                                     \
+  V(BitXor)                                    \
   V(BlockEntry)                                \
   V(BoundsCheck)                               \
   V(Branch)                                    \
@@ -98,13 +99,11 @@
   V(CompareConstantEqAndBranch)                \
   V(Constant)                                  \
   V(Context)                                   \
-  V(DeclareGlobals)                            \
   V(DeleteProperty)                            \
   V(Deoptimize)                                \
   V(Div)                                       \
   V(ElementsKind)                              \
   V(EnterInlined)                              \
-  V(FastLiteral)                               \
   V(FixedArrayBaseLength)                      \
   V(ForceRepresentation)                       \
   V(FunctionLiteral)                           \
@@ -119,12 +118,10 @@
   V(InstanceOfKnownGlobal)                     \
   V(InvokeFunction)                            \
   V(IsConstructCallAndBranch)                  \
-  V(IsNilAndBranch)                            \
+  V(IsNullAndBranch)                           \
   V(IsObjectAndBranch)                         \
-  V(IsStringAndBranch)                         \
   V(IsSmiAndBranch)                            \
   V(IsUndetectableAndBranch)                   \
-  V(StringCompareAndBranch)                    \
   V(JSArrayLength)                             \
   V(LeaveInlined)                              \
   V(LoadContextSlot)                           \
@@ -148,7 +145,6 @@
   V(Parameter)                                 \
   V(Power)                                     \
   V(PushArgument)                              \
-  V(Random)                                    \
   V(RegExpLiteral)                             \
   V(Return)                                    \
   V(Sar)                                       \
@@ -174,26 +170,18 @@
   V(ThisFunction)                              \
   V(Throw)                                     \
   V(ToFastProperties)                          \
-  V(TransitionElementsKind)                    \
+  V(ToInt32)                                   \
   V(Typeof)                                    \
   V(TypeofIsAndBranch)                         \
   V(UnaryMathOperation)                        \
   V(UnknownOSRValue)                           \
   V(UseConst)                                  \
-  V(ValueOf)                                   \
-  V(ForInPrepareMap)                           \
-  V(ForInCacheArray)                           \
-  V(CheckMapValue)                             \
-  V(LoadFieldByIndex)                          \
-  V(DateField)                                 \
-  V(WrapReceiver)
+  V(ValueOf)
 
 #define GVN_FLAG_LIST(V)                       \
   V(Calls)                                     \
   V(InobjectFields)                            \
   V(BackingStoreFields)                        \
-  V(ElementsKind)                              \
-  V(ElementsPointer)                           \
   V(ArrayElements)                             \
   V(DoubleArrayElements)                       \
   V(SpecializedArrayElements)                  \
@@ -237,14 +225,10 @@
   int32_t upper() const { return upper_; }
   int32_t lower() const { return lower_; }
   Range* next() const { return next_; }
-  Range* CopyClearLower(Zone* zone) const {
-    return new(zone) Range(kMinInt, upper_);
-  }
-  Range* CopyClearUpper(Zone* zone) const {
-    return new(zone) Range(lower_, kMaxInt);
-  }
-  Range* Copy(Zone* zone) const {
-    Range* result = new(zone) Range(lower_, upper_);
+  Range* CopyClearLower() const { return new Range(kMinInt, upper_); }
+  Range* CopyClearUpper() const { return new Range(lower_, kMaxInt); }
+  Range* Copy() const {
+    Range* result = new Range(lower_, upper_);
     result->set_can_be_minus_zero(CanBeMinusZero());
     return result;
   }
@@ -261,9 +245,7 @@
     return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
   }
   void KeepOrder();
-#ifdef DEBUG
   void Verify() const;
-#endif
 
   void StackUpon(Range* other) {
     Intersect(other);
@@ -415,14 +397,10 @@
     return type_ == kUninitialized;
   }
 
-  bool IsHeapObject() {
-    ASSERT(type_ != kUninitialized);
-    return IsHeapNumber() || IsString() || IsNonPrimitive();
-  }
-
   static HType TypeFromValue(Handle<Object> value);
 
   const char* ToString();
+  const char* ToShortString();
 
  private:
   enum Type {
@@ -454,7 +432,7 @@
       : tail_(tail), value_(value), index_(index) {
   }
 
-  HUseListNode* tail();
+  HUseListNode* tail() const { return tail_; }
   HValue* value() const { return value_; }
   int index() const { return index_; }
 
@@ -504,26 +482,18 @@
 };
 
 
-// There must be one corresponding kDepends flag for every kChanges flag and
-// the order of the kChanges flags must be exactly the same as of the kDepends
-// flags.
-enum GVNFlag {
-  // Declare global value numbering flags.
-#define DECLARE_FLAG(type) kChanges##type, kDependsOn##type,
-  GVN_FLAG_LIST(DECLARE_FLAG)
-#undef DECLARE_FLAG
-  kAfterLastFlag,
-  kLastFlag = kAfterLastFlag - 1
-};
-
-typedef EnumSet<GVNFlag> GVNFlagSet;
-
-
 class HValue: public ZoneObject {
  public:
   static const int kNoNumber = -1;
 
+  // There must be one corresponding kDepends flag for every kChanges flag and
+  // the order of the kChanges flags must be exactly the same as of the kDepends
+  // flags.
   enum Flag {
+    // Declare global value numbering flags.
+  #define DECLARE_DO(type) kChanges##type, kDependsOn##type,
+    GVN_FLAG_LIST(DECLARE_DO)
+  #undef DECLARE_DO
     kFlexibleRepresentation,
     // Participate in Global Value Numbering, i.e. elimination of
     // unnecessary recomputations. If an instruction sets this flag, it must
@@ -536,16 +506,15 @@
     kDeoptimizeOnUndefined,
     kIsArguments,
     kTruncatingToInt32,
-    kIsDead,
-    kLastFlag = kIsDead
+    kLastFlag = kTruncatingToInt32
   };
 
   STATIC_ASSERT(kLastFlag < kBitsPerInt);
 
   static const int kChangesToDependsFlagsLeftShift = 1;
 
-  static GVNFlagSet ConvertChangesToDependsFlags(GVNFlagSet flags) {
-    return GVNFlagSet(flags.ToIntegral() << kChangesToDependsFlagsLeftShift);
+  static int ConvertChangesToDependsFlags(int flags) {
+    return flags << kChangesToDependsFlagsLeftShift;
   }
 
   static HValue* cast(HValue* value) { return value; }
@@ -582,7 +551,6 @@
 
   HBasicBlock* block() const { return block_; }
   void SetBlock(HBasicBlock* block);
-  int LoopWeight() const;
 
   int id() const { return id_; }
   void set_id(int id) { id_ = id; }
@@ -637,66 +605,27 @@
     return use_list_ != NULL && use_list_->tail() != NULL;
   }
   int UseCount() const;
-
-  // Mark this HValue as dead and to be removed from other HValues' use lists.
-  void Kill();
+  void ClearOperands();
 
   int flags() const { return flags_; }
   void SetFlag(Flag f) { flags_ |= (1 << f); }
   void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
   bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
 
-  // Returns true if the flag specified is set for all uses, false otherwise.
-  bool CheckUsesForFlag(Flag f);
+  void SetAllSideEffects() { flags_ |= AllSideEffects(); }
+  void ClearAllSideEffects() { flags_ &= ~AllSideEffects(); }
+  bool HasSideEffects() const { return (flags_ & AllSideEffects()) != 0; }
 
-  GVNFlagSet gvn_flags() const { return gvn_flags_; }
-  void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
-  void ClearGVNFlag(GVNFlag f) { gvn_flags_.Remove(f); }
-  bool CheckGVNFlag(GVNFlag f) const { return gvn_flags_.Contains(f); }
-  void SetAllSideEffects() { gvn_flags_.Add(AllSideEffectsFlagSet()); }
-  void ClearAllSideEffects() {
-    gvn_flags_.Remove(AllSideEffectsFlagSet());
-  }
-  bool HasSideEffects() const {
-    return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
-  }
-  bool HasObservableSideEffects() const {
-    return gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
-  }
-
-  GVNFlagSet DependsOnFlags() const {
-    GVNFlagSet result = gvn_flags_;
-    result.Intersect(AllDependsOnFlagSet());
-    return result;
-  }
-
-  GVNFlagSet SideEffectFlags() const {
-    GVNFlagSet result = gvn_flags_;
-    result.Intersect(AllSideEffectsFlagSet());
-    return result;
-  }
-
-  GVNFlagSet ChangesFlags() const {
-    GVNFlagSet result = gvn_flags_;
-    result.Intersect(AllChangesFlagSet());
-    return result;
-  }
-
-  GVNFlagSet ObservableChangesFlags() const {
-    GVNFlagSet result = gvn_flags_;
-    result.Intersect(AllChangesFlagSet());
-    result.Intersect(AllObservableSideEffectsFlagSet());
-    return result;
-  }
+  int ChangesFlags() const { return flags_ & ChangesFlagsMask(); }
 
   Range* range() const { return range_; }
   bool HasRange() const { return range_ != NULL; }
-  void AddNewRange(Range* r, Zone* zone);
+  void AddNewRange(Range* r);
   void RemoveLastAddedRange();
-  void ComputeInitialRange(Zone* zone);
+  void ComputeInitialRange();
 
   // Representation helpers.
-  virtual Representation RequiredInputRepresentation(int index) = 0;
+  virtual Representation RequiredInputRepresentation(int index) const = 0;
 
   virtual Representation InferredRepresentation() {
     return representation();
@@ -738,7 +667,7 @@
     return false;
   }
   virtual void RepresentationChanged(Representation to) { }
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
   virtual void DeleteFromGraph() = 0;
   virtual void InternalSetOperandAt(int index, HValue* value) = 0;
   void clear_block() {
@@ -752,39 +681,19 @@
     representation_ = r;
   }
 
-  static GVNFlagSet AllDependsOnFlagSet() {
-    GVNFlagSet result;
+ private:
+  static int ChangesFlagsMask() {
+    int result = 0;
     // Create changes mask.
-#define ADD_FLAG(type) result.Add(kDependsOn##type);
-  GVN_FLAG_LIST(ADD_FLAG)
-#undef ADD_FLAG
-    return result;
-  }
-
-  static GVNFlagSet AllChangesFlagSet() {
-    GVNFlagSet result;
-    // Create changes mask.
-#define ADD_FLAG(type) result.Add(kChanges##type);
+#define ADD_FLAG(type) result |= (1 << kChanges##type);
   GVN_FLAG_LIST(ADD_FLAG)
 #undef ADD_FLAG
     return result;
   }
 
   // A flag mask to mark an instruction as having arbitrary side effects.
-  static GVNFlagSet AllSideEffectsFlagSet() {
-    GVNFlagSet result = AllChangesFlagSet();
-    result.Remove(kChangesOsrEntries);
-    return result;
-  }
-
-  // A flag mask of all side effects that can make observable changes in
-  // an executing program (i.e. are not safe to repeat, move or remove);
-  static GVNFlagSet AllObservableSideEffectsFlagSet() {
-    GVNFlagSet result = AllChangesFlagSet();
-    result.Remove(kChangesElementsKind);
-    result.Remove(kChangesElementsPointer);
-    result.Remove(kChangesMaps);
-    return result;
+  static int AllSideEffects() {
+    return ChangesFlagsMask() & ~(1 << kChangesOsrEntries);
   }
 
   // Remove the matching use from the use list if present.  Returns the
@@ -804,9 +713,7 @@
   HUseListNode* use_list_;
   Range* range_;
   int flags_;
-  GVNFlagSet gvn_flags_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(HValue);
 };
 
@@ -828,8 +735,6 @@
   bool has_position() const { return position_ != RelocInfo::kNoPosition; }
   void set_position(int position) { position_ = position; }
 
-  bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
-
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
 
 #ifdef DEBUG
@@ -845,7 +750,7 @@
       : next_(NULL),
         previous_(NULL),
         position_(RelocInfo::kNoPosition) {
-    SetGVNFlag(kDependsOnOsrEntries);
+    SetFlag(kDependsOnOsrEntries);
   }
 
   virtual void DeleteFromGraph() { Unlink(); }
@@ -936,7 +841,7 @@
 
 class HBlockEntry: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -949,7 +854,7 @@
 // HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
 class HSoftDeoptimize: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -961,7 +866,7 @@
  public:
   explicit HDeoptimize(int environment_length) : values_(environment_length) { }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1003,10 +908,10 @@
 class HGoto: public HTemplateControlInstruction<1, 0> {
  public:
   explicit HGoto(HBasicBlock* target) {
-    SetSuccessorAt(0, target);
-  }
+        SetSuccessorAt(0, target);
+      }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1046,7 +951,7 @@
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1078,7 +983,7 @@
 
   Handle<Map> map() const { return map_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1095,7 +1000,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1109,7 +1014,7 @@
 
 class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1127,6 +1032,10 @@
     return reinterpret_cast<HUnaryOperation*>(value);
   }
 
+  virtual bool CanTruncateToInt32() const {
+    return CheckFlag(kTruncatingToInt32);
+  }
+
   HValue* value() { return OperandAt(0); }
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1140,7 +1049,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1155,7 +1064,7 @@
  public:
   explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1174,7 +1083,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return representation();  // Same as the output representation.
   }
 
@@ -1185,43 +1094,46 @@
 class HChange: public HUnaryOperation {
  public:
   HChange(HValue* value,
+          Representation from,
           Representation to,
           bool is_truncating,
           bool deoptimize_on_undefined)
-      : HUnaryOperation(value) {
-    ASSERT(!value->representation().IsNone() && !to.IsNone());
-    ASSERT(!value->representation().Equals(to));
+      : HUnaryOperation(value),
+        from_(from),
+        deoptimize_on_undefined_(deoptimize_on_undefined) {
+    ASSERT(!from.IsNone() && !to.IsNone());
+    ASSERT(!from.Equals(to));
     set_representation(to);
-    set_type(HType::TaggedNumber());
     SetFlag(kUseGVN);
-    if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
     if (is_truncating) SetFlag(kTruncatingToInt32);
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
-  virtual HType CalculateInferredType();
-  virtual HValue* Canonicalize();
 
-  Representation from() { return value()->representation(); }
-  Representation to() { return representation(); }
-  bool deoptimize_on_undefined() const {
-    return CheckFlag(kDeoptimizeOnUndefined);
-  }
-  bool deoptimize_on_minus_zero() const {
-    return CheckFlag(kBailoutOnMinusZero);
-  }
-  virtual Representation RequiredInputRepresentation(int index) {
-    return from();
+  Representation from() const { return from_; }
+  Representation to() const { return representation(); }
+  bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; }
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return from_;
   }
 
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
 
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
-  virtual bool DataEquals(HValue* other) { return true; }
+  virtual bool DataEquals(HValue* other) {
+    if (!other->IsChange()) return false;
+    HChange* change = HChange::cast(other);
+    return to().Equals(change->to())
+        && deoptimize_on_undefined() == change->deoptimize_on_undefined();
+  }
+
+ private:
+  Representation from_;
+  bool deoptimize_on_undefined_;
 };
 
 
@@ -1233,7 +1145,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1244,6 +1156,37 @@
 };
 
 
+class HToInt32: public HUnaryOperation {
+ public:
+  explicit HToInt32(HValue* value)
+      : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::None();
+  }
+
+  virtual bool CanTruncateToInt32() const {
+    return true;
+  }
+
+  virtual HValue* Canonicalize() {
+    if (value()->representation().IsInteger32()) {
+      return value();
+    } else {
+      return this;
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToInt32)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
 class HSimulate: public HInstruction {
  public:
   HSimulate(int ast_id, int pop_count)
@@ -1280,7 +1223,7 @@
   virtual int OperandCount() { return values_.length(); }
   virtual HValue* OperandAt(int index) { return values_[index]; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1325,7 +1268,7 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1350,26 +1293,20 @@
 class HEnterInlined: public HTemplateInstruction<0> {
  public:
   HEnterInlined(Handle<JSFunction> closure,
-                int arguments_count,
                 FunctionLiteral* function,
-                CallKind call_kind,
-                bool is_construct)
+                CallKind call_kind)
       : closure_(closure),
-        arguments_count_(arguments_count),
         function_(function),
-        call_kind_(call_kind),
-        is_construct_(is_construct) {
+        call_kind_(call_kind) {
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   Handle<JSFunction> closure() const { return closure_; }
-  int arguments_count() const { return arguments_count_; }
   FunctionLiteral* function() const { return function_; }
   CallKind call_kind() const { return call_kind_; }
-  bool is_construct() const { return is_construct_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1377,10 +1314,8 @@
 
  private:
   Handle<JSFunction> closure_;
-  int arguments_count_;
   FunctionLiteral* function_;
   CallKind call_kind_;
-  bool is_construct_;
 };
 
 
@@ -1388,7 +1323,7 @@
  public:
   HLeaveInlined() {}
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1402,7 +1337,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1414,27 +1349,19 @@
 
 class HThisFunction: public HTemplateInstruction<0> {
  public:
-  explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
+  HThisFunction() {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  Handle<JSFunction> closure() const { return closure_; }
-
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
 
  protected:
-  virtual bool DataEquals(HValue* other) {
-    HThisFunction* b = HThisFunction::cast(other);
-    return *closure() == *b->closure();
-  }
-
- private:
-  Handle<JSFunction> closure_;
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
@@ -1445,7 +1372,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1465,7 +1392,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(OuterContext);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1474,33 +1401,6 @@
 };
 
 
-class HDeclareGlobals: public HUnaryOperation {
- public:
-  HDeclareGlobals(HValue* context,
-                  Handle<FixedArray> pairs,
-                  int flags)
-      : HUnaryOperation(context),
-        pairs_(pairs),
-        flags_(flags) {
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
-
-  HValue* context() { return OperandAt(0); }
-  Handle<FixedArray> pairs() const { return pairs_; }
-  int flags() const { return flags_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
- private:
-  Handle<FixedArray> pairs_;
-  int flags_;
-};
-
-
 class HGlobalObject: public HUnaryOperation {
  public:
   explicit HGlobalObject(HValue* context) : HUnaryOperation(context) {
@@ -1510,7 +1410,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1529,7 +1429,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1565,7 +1465,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1585,7 +1485,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1600,7 +1500,7 @@
       : HBinaryCall(context, function, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1625,7 +1525,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1642,7 +1542,7 @@
       : HBinaryCall(context, key, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1666,7 +1566,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CallNamed)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1675,16 +1575,15 @@
 };
 
 
-class HCallFunction: public HBinaryCall {
+class HCallFunction: public HUnaryCall {
  public:
-  HCallFunction(HValue* context, HValue* function, int argument_count)
-      : HBinaryCall(context, function, argument_count) {
+  HCallFunction(HValue* context, int argument_count)
+      : HUnaryCall(context, argument_count) {
   }
 
-  HValue* context() { return first(); }
-  HValue* function() { return second(); }
+  HValue* context() { return value(); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1703,7 +1602,7 @@
   HValue* context() { return value(); }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1723,7 +1622,7 @@
 
   Handle<JSFunction> target() const { return target_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -1740,7 +1639,7 @@
       : HBinaryCall(context, constructor, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1767,7 +1666,7 @@
   const Runtime::Function* function() const { return c_function_; }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1789,11 +1688,11 @@
     SetOperandAt(1, typecheck);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnArrayLengths);
-    SetGVNFlag(kDependsOnMaps);
+    SetFlag(kDependsOnArrayLengths);
+    SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1814,10 +1713,10 @@
   explicit HFixedArrayBaseLength(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnArrayLengths);
+    SetFlag(kDependsOnArrayLengths);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1833,10 +1732,10 @@
   explicit HElementsKind(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnElementsKind);
+    SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1855,7 +1754,7 @@
     SetFlag(kTruncatingToInt32);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Integer32();
   }
   virtual HType CalculateInferredType();
@@ -1888,7 +1787,6 @@
       case kMathLog:
       case kMathSin:
       case kMathCos:
-      case kMathTan:
         set_representation(Representation::Double());
         break;
       default:
@@ -1906,7 +1804,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -1919,7 +1817,6 @@
         case kMathLog:
         case kMathSin:
         case kMathCos:
-        case kMathTan:
           return Representation::Double();
         case kMathAbs:
           return representation();
@@ -1961,10 +1858,10 @@
   explicit HLoadElements(HValue* value) : HUnaryOperation(value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnElementsPointer);
+    SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -1987,7 +1884,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2000,30 +1897,18 @@
 
 class HCheckMap: public HTemplateInstruction<2> {
  public:
-  HCheckMap(HValue* value,
-            Handle<Map> map,
-            HValue* typecheck = NULL,
-            CompareMapMode mode = REQUIRE_EXACT_MAP)
-      : map_(map),
-        mode_(mode) {
+  HCheckMap(HValue* value, Handle<Map> map, HValue* typecheck = NULL)
+      : map_(map) {
     SetOperandAt(0, value);
     // If callers don't depend on a typecheck, they can pass in NULL. In that
     // case we use a copy of the |value| argument as a dummy value.
     SetOperandAt(1, typecheck != NULL ? typecheck : value);
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnMaps);
-    // If the map to check doesn't have the untransitioned elements, it must not
-    // be hoisted above TransitionElements instructions.
-    if (mode == REQUIRE_EXACT_MAP || !map->has_fast_smi_only_elements()) {
-      SetGVNFlag(kDependsOnElementsKind);
-    }
-    has_element_transitions_ =
-        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL) != NULL ||
-        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL) != NULL;
+    SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -2031,24 +1916,17 @@
 
   HValue* value() { return OperandAt(0); }
   Handle<Map> map() const { return map_; }
-  CompareMapMode mode() const { return mode_; }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckMap)
 
  protected:
   virtual bool DataEquals(HValue* other) {
     HCheckMap* b = HCheckMap::cast(other);
-    // Two CheckMaps instructions are DataEqual if their maps are identical and
-    // they have the same mode. The mode comparison can be ignored if the map
-    // has no elements transitions.
-    return map_.is_identical_to(b->map()) &&
-        (b->mode() == mode() || !has_element_transitions_);
+    return map_.is_identical_to(b->map());
   }
 
  private:
-  bool has_element_transitions_;
   Handle<Map> map_;
-  CompareMapMode mode_;
 };
 
 
@@ -2060,7 +1938,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -2100,9 +1978,7 @@
     return new HCheckInstanceType(value, IS_SYMBOL);
   }
 
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2132,8 +2008,6 @@
     LAST_INTERVAL_CHECK = IS_JS_ARRAY
   };
 
-  const char* GetCheckName();
-
   HCheckInstanceType(HValue* value, Check check)
       : HUnaryOperation(value), check_(check) {
     set_representation(Representation::Tagged());
@@ -2151,7 +2025,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2185,7 +2059,7 @@
   HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
       : prototype_(prototype), holder_(holder) {
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnMaps);
+    SetFlag(kDependsOnMaps);
   }
 
 #ifdef DEBUG
@@ -2197,7 +2071,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -2228,7 +2102,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
   virtual HType CalculateInferredType();
@@ -2261,10 +2135,23 @@
     SetFlag(kFlexibleRepresentation);
   }
 
-  virtual Representation InferredRepresentation();
+  virtual Representation InferredRepresentation() {
+    bool double_occurred = false;
+    bool int32_occurred = false;
+    for (int i = 0; i < OperandCount(); ++i) {
+      HValue* value = OperandAt(i);
+      if (value->representation().IsDouble()) double_occurred = true;
+      if (value->representation().IsInteger32()) int32_occurred = true;
+      if (value->representation().IsTagged()) return Representation::Tagged();
+    }
 
-  virtual Range* InferRange(Zone* zone);
-  virtual Representation RequiredInputRepresentation(int index) {
+    if (double_occurred) return Representation::Double();
+    if (int32_occurred) return Representation::Integer32();
+    return Representation::None();
+  }
+
+  virtual Range* InferRange();
+  virtual Representation RequiredInputRepresentation(int index) const {
     return representation();
   }
   virtual HType CalculateInferredType();
@@ -2356,7 +2243,7 @@
     SetFlag(kIsArguments);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -2372,20 +2259,7 @@
 
   bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
 
-  bool ImmortalImmovable() const {
-    Heap* heap = HEAP;
-    if (*handle_ == heap->undefined_value()) return true;
-    if (*handle_ == heap->null_value()) return true;
-    if (*handle_ == heap->true_value()) return true;
-    if (*handle_ == heap->false_value()) return true;
-    if (*handle_ == heap->the_hole_value()) return true;
-    if (*handle_ == heap->minus_zero_value()) return true;
-    if (*handle_ == heap->nan_value()) return true;
-    if (*handle_ == heap->empty_string()) return true;
-    return false;
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -2398,7 +2272,6 @@
   }
 
   virtual bool EmitAtUses() { return !representation().IsDouble(); }
-  virtual HValue* Canonicalize();
   virtual void PrintDataTo(StringStream* stream);
   virtual HType CalculateInferredType();
   bool IsInteger() const { return handle_->IsSmi(); }
@@ -2414,24 +2287,13 @@
     ASSERT(HasDoubleValue());
     return double_value_;
   }
-  bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
-  int32_t NumberValueAsInteger32() const {
-    ASSERT(HasNumberValue());
-    if (has_int32_value_) return int32_value_;
-    return DoubleToInt32(double_value_);
-  }
   bool HasStringValue() const { return handle_->IsString(); }
 
   bool ToBoolean() const;
 
   virtual intptr_t Hashcode() {
     ASSERT(!HEAP->allow_allocation(false));
-    intptr_t hash = reinterpret_cast<intptr_t>(*handle());
-    // Prevent smis from having fewer hash values when truncated to
-    // the least significant bits.
-    const int kShiftSize = kSmiShiftSize + kSmiTagSize;
-    STATIC_ASSERT(kShiftSize != 0);
-    return hash ^ (hash >> kShiftSize);
+    return reinterpret_cast<intptr_t>(*handle());
   }
 
 #ifdef DEBUG
@@ -2441,7 +2303,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Constant)
 
  protected:
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
 
   virtual bool DataEquals(HValue* other) {
     HConstant* other_constant = HConstant::cast(other);
@@ -2491,27 +2353,6 @@
 };
 
 
-class HWrapReceiver: public HTemplateInstruction<2> {
- public:
-  HWrapReceiver(HValue* receiver, HValue* function) {
-    set_representation(Representation::Tagged());
-    SetOperandAt(0, receiver);
-    SetOperandAt(1, function);
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  HValue* receiver() { return OperandAt(0); }
-  HValue* function() { return OperandAt(1); }
-
-  virtual HValue* Canonicalize();
-
-  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
-};
-
-
 class HApplyArguments: public HTemplateInstruction<4> {
  public:
   HApplyArguments(HValue* function,
@@ -2526,7 +2367,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     // The length is untagged, all other inputs are tagged.
     return (index == 2)
         ? Representation::Integer32()
@@ -2553,7 +2394,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -2569,7 +2410,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2592,7 +2433,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     // The arguments elements is considered tagged.
     return index == 0
         ? Representation::Tagged()
@@ -2618,7 +2459,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Integer32();
   }
 
@@ -2643,7 +2484,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2681,7 +2522,7 @@
   }
 
   virtual HType CalculateInferredType();
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2708,7 +2549,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2746,7 +2587,7 @@
     return input_representation_;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return input_representation_;
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -2769,9 +2610,7 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2790,7 +2629,7 @@
   HValue* left() { return value(); }
   int right() const { return right_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Integer32();
   }
 
@@ -2802,25 +2641,21 @@
 };
 
 
-class HIsNilAndBranch: public HUnaryControlInstruction {
+class HIsNullAndBranch: public HUnaryControlInstruction {
  public:
-  HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
-      : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
+  HIsNullAndBranch(HValue* value, bool is_strict)
+      : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
 
-  EqualityKind kind() const { return kind_; }
-  NilValue nil() const { return nil_; }
+  bool is_strict() const { return is_strict_; }
 
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
 
  private:
-  EqualityKind kind_;
-  NilValue nil_;
+  bool is_strict_;
 };
 
 
@@ -2829,25 +2664,13 @@
   explicit HIsObjectAndBranch(HValue* value)
     : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
 };
 
-class HIsStringAndBranch: public HUnaryControlInstruction {
- public:
-  explicit HIsStringAndBranch(HValue* value)
-    : HUnaryControlInstruction(value, NULL, NULL) { }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
-};
-
 
 class HIsSmiAndBranch: public HUnaryControlInstruction {
  public:
@@ -2856,7 +2679,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2870,7 +2693,7 @@
   explicit HIsUndetectableAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2878,45 +2701,9 @@
 };
 
 
-class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
- public:
-  HStringCompareAndBranch(HValue* context,
-                           HValue* left,
-                           HValue* right,
-                           Token::Value token)
-      : token_(token) {
-    ASSERT(Token::IsCompareOp(token));
-    SetOperandAt(0, context);
-    SetOperandAt(1, left);
-    SetOperandAt(2, right);
-    set_representation(Representation::Tagged());
-  }
-
-  HValue* context() { return OperandAt(0); }
-  HValue* left() { return OperandAt(1); }
-  HValue* right() { return OperandAt(2); }
-  Token::Value token() const { return token_; }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  Representation GetInputRepresentation() const {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
-
- private:
-  Token::Value token_;
-};
-
-
 class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -2938,7 +2725,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2955,7 +2742,7 @@
   explicit HHasCachedArrayIndexAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2970,7 +2757,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -2989,7 +2776,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3013,7 +2800,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3030,7 +2817,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3058,7 +2845,7 @@
   HValue* left() { return OperandAt(1); }
   Handle<JSFunction> function() { return function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3083,7 +2870,7 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return index == 0
       ? Representation::Double()
       : Representation::None();
@@ -3096,23 +2883,6 @@
 };
 
 
-class HRandom: public HTemplateInstruction<1> {
- public:
-  explicit HRandom(HValue* global_object) {
-    SetOperandAt(0, global_object);
-    set_representation(Representation::Double());
-  }
-
-  HValue* global_object() { return OperandAt(0); }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Random)
-};
-
-
 class HAdd: public HArithmeticBinaryOperation {
  public:
   HAdd(HValue* context, HValue* left, HValue* right)
@@ -3128,21 +2898,14 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  static HInstruction* NewHAdd(Zone* zone,
-                               HValue* context,
-                               HValue* left,
-                               HValue* right);
-
   virtual HType CalculateInferredType();
 
-  virtual HValue* Canonicalize();
-
   DECLARE_CONCRETE_INSTRUCTION(Add)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
 };
 
 
@@ -3155,19 +2918,12 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual HValue* Canonicalize();
-
-  static HInstruction* NewHSub(Zone* zone,
-                              HValue* context,
-                              HValue* left,
-                              HValue* right);
-
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
 };
 
 
@@ -3185,17 +2941,12 @@
     return !representation().IsTagged();
   }
 
-  static HInstruction* NewHMul(Zone* zone,
-                               HValue* context,
-                               HValue* left,
-                               HValue* right);
-
   DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
 };
 
 
@@ -3218,17 +2969,12 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  static HInstruction* NewHMod(Zone* zone,
-                               HValue* context,
-                               HValue* left,
-                               HValue* right);
-
   DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
 };
 
 
@@ -3242,52 +2988,61 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  static HInstruction* NewHDiv(Zone* zone,
-                               HValue* context,
-                               HValue* left,
-                               HValue* right);
-
   DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
 };
 
 
-class HBitwise: public HBitwiseBinaryOperation {
+class HBitAnd: public HBitwiseBinaryOperation {
  public:
-  HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
-      : HBitwiseBinaryOperation(context, left, right), op_(op) {
-        ASSERT(op == Token::BIT_AND ||
-               op == Token::BIT_OR ||
-               op == Token::BIT_XOR);
-      }
-
-  Token::Value op() const { return op_; }
+  HBitAnd(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
 
   virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType();
 
-  virtual HValue* Canonicalize();
-
-  static HInstruction* NewHBitwise(Zone* zone,
-                                   Token::Value op,
-                                   HValue* context,
-                                   HValue* left,
-                                   HValue* right);
-
-  DECLARE_CONCRETE_INSTRUCTION(Bitwise)
+  DECLARE_CONCRETE_INSTRUCTION(BitAnd)
 
  protected:
-  virtual bool DataEquals(HValue* other) {
-    return op() == HBitwise::cast(other)->op();
-  }
+  virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone);
+  virtual Range* InferRange();
+};
 
- private:
-  Token::Value op_;
+
+class HBitXor: public HBitwiseBinaryOperation {
+ public:
+  HBitXor(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(BitXor)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
+class HBitOr: public HBitwiseBinaryOperation {
+ public:
+  HBitOr(HValue* context, HValue* left, HValue* right)
+      : HBitwiseBinaryOperation(context, left, right) { }
+
+  virtual bool IsCommutative() const { return true; }
+  virtual HType CalculateInferredType();
+
+  DECLARE_CONCRETE_INSTRUCTION(BitOr)
+
+ protected:
+  virtual bool DataEquals(HValue* other) { return true; }
+
+  virtual Range* InferRange();
 };
 
 
@@ -3296,12 +3051,8 @@
   HShl(HValue* context, HValue* left, HValue* right)
       : HBitwiseBinaryOperation(context, left, right) { }
 
-  virtual Range* InferRange(Zone* zone);
-
-  static HInstruction* NewHShl(Zone* zone,
-                               HValue* context,
-                               HValue* left,
-                               HValue* right);
+  virtual Range* InferRange();
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Shl)
 
@@ -3315,12 +3066,8 @@
   HShr(HValue* context, HValue* left, HValue* right)
       : HBitwiseBinaryOperation(context, left, right) { }
 
-  virtual Range* InferRange(Zone* zone);
-
-  static HInstruction* NewHShr(Zone* zone,
-                               HValue* context,
-                               HValue* left,
-                               HValue* right);
+  virtual Range* InferRange();
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Shr)
 
@@ -3334,12 +3081,8 @@
   HSar(HValue* context, HValue* left, HValue* right)
       : HBitwiseBinaryOperation(context, left, right) { }
 
-  virtual Range* InferRange(Zone* zone);
-
-  static HInstruction* NewHSar(Zone* zone,
-                               HValue* context,
-                               HValue* left,
-                               HValue* right);
+  virtual Range* InferRange();
+  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Sar)
 
@@ -3351,12 +3094,12 @@
 class HOsrEntry: public HTemplateInstruction<0> {
  public:
   explicit HOsrEntry(int ast_id) : ast_id_(ast_id) {
-    SetGVNFlag(kChangesOsrEntries);
+    SetFlag(kChangesOsrEntries);
   }
 
   int ast_id() const { return ast_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -3377,7 +3120,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -3409,7 +3152,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3423,41 +3166,27 @@
 
 class HUnknownOSRValue: public HTemplateInstruction<0> {
  public:
-  HUnknownOSRValue()
-      : incoming_value_(NULL) {
-    set_representation(Representation::Tagged());
-  }
+  HUnknownOSRValue() { set_representation(Representation::Tagged()); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
-  void set_incoming_value(HPhi* value) {
-    incoming_value_ = value;
-  }
-
-  HPhi* incoming_value() {
-    return incoming_value_;
-  }
-
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
-
- private:
-  HPhi* incoming_value_;
 };
 
 
 class HLoadGlobalCell: public HTemplateInstruction<0> {
  public:
-  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
-      : cell_(cell), details_(details) {
+  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+      : cell_(cell), check_hole_value_(check_hole_value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnGlobalVars);
+    SetFlag(kDependsOnGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell>  cell() const { return cell_; }
-  bool RequiresHoleCheck();
+  bool check_hole_value() const { return check_hole_value_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -3466,7 +3195,7 @@
     return reinterpret_cast<intptr_t>(*cell_);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::None();
   }
 
@@ -3480,7 +3209,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  PropertyDetails details_;
+  bool check_hole_value_;
 };
 
 
@@ -3505,7 +3234,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3517,33 +3246,21 @@
 };
 
 
-inline bool StoringValueNeedsWriteBarrier(HValue* value) {
-  return !value->type().IsBoolean()
-      && !value->type().IsSmi()
-      && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
-}
-
-
 class HStoreGlobalCell: public HUnaryOperation {
  public:
   HStoreGlobalCell(HValue* value,
                    Handle<JSGlobalPropertyCell> cell,
-                   PropertyDetails details)
+                   bool check_hole_value)
       : HUnaryOperation(value),
         cell_(cell),
-        details_(details) {
-    SetGVNFlag(kChangesGlobalVars);
+        check_hole_value_(check_hole_value) {
+    SetFlag(kChangesGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell> cell() const { return cell_; }
-  bool RequiresHoleCheck() {
-    return !details_.IsDontDelete() || details_.IsReadOnly();
-  }
-  bool NeedsWriteBarrier() {
-    return StoringValueNeedsWriteBarrier(value());
-  }
+  bool check_hole_value() const { return check_hole_value_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3552,7 +3269,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  PropertyDetails details_;
+  bool check_hole_value_;
 };
 
 
@@ -3562,9 +3279,9 @@
                       HValue* global_object,
                       Handle<Object> name,
                       HValue* value,
-                      StrictModeFlag strict_mode_flag)
+                      bool strict_mode)
       : name_(name),
-        strict_mode_flag_(strict_mode_flag) {
+        strict_mode_(strict_mode) {
     SetOperandAt(0, context);
     SetOperandAt(1, global_object);
     SetOperandAt(2, value);
@@ -3576,11 +3293,11 @@
   HValue* global_object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
   HValue* value() { return OperandAt(2); }
-  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+  bool strict_mode() { return strict_mode_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3588,56 +3305,22 @@
 
  private:
   Handle<Object> name_;
-  StrictModeFlag strict_mode_flag_;
+  bool strict_mode_;
 };
 
 
 class HLoadContextSlot: public HUnaryOperation {
  public:
-  enum Mode {
-    // Perform a normal load of the context slot without checking its value.
-    kNoCheck,
-    // Load and check the value of the context slot. Deoptimize if it's the
-    // hole value. This is used for checking for loading of uninitialized
-    // harmony bindings where we deoptimize into full-codegen generated code
-    // which will subsequently throw a reference error.
-    kCheckDeoptimize,
-    // Load and check the value of the context slot. Return undefined if it's
-    // the hole value. This is used for non-harmony const assignments
-    kCheckReturnUndefined
-  };
-
-  HLoadContextSlot(HValue* context, Variable* var)
-      : HUnaryOperation(context), slot_index_(var->index()) {
-    ASSERT(var->IsContextSlot());
-    switch (var->mode()) {
-      case LET:
-      case CONST_HARMONY:
-        mode_ = kCheckDeoptimize;
-        break;
-      case CONST:
-        mode_ = kCheckReturnUndefined;
-        break;
-      default:
-        mode_ = kNoCheck;
-    }
+  HLoadContextSlot(HValue* context , int slot_index)
+      : HUnaryOperation(context), slot_index_(slot_index) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnContextSlots);
+    SetFlag(kDependsOnContextSlots);
   }
 
   int slot_index() const { return slot_index_; }
-  Mode mode() const { return mode_; }
 
-  bool DeoptimizesOnHole() {
-    return mode_ == kCheckDeoptimize;
-  }
-
-  bool RequiresHoleCheck() {
-    return mode_ != kNoCheck;
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3653,50 +3336,34 @@
 
  private:
   int slot_index_;
-  Mode mode_;
 };
 
 
+static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+  return !value->type().IsBoolean()
+      && !value->type().IsSmi()
+      && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
+}
+
+
 class HStoreContextSlot: public HTemplateInstruction<2> {
  public:
-  enum Mode {
-    // Perform a normal store to the context slot without checking its previous
-    // value.
-    kNoCheck,
-    // Check the previous value of the context slot and deoptimize if it's the
-    // hole value. This is used for checking for assignments to uninitialized
-    // harmony bindings where we deoptimize into full-codegen generated code
-    // which will subsequently throw a reference error.
-    kCheckDeoptimize,
-    // Check the previous value and ignore assignment if it isn't a hole value
-    kCheckIgnoreAssignment
-  };
-
-  HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
-      : slot_index_(slot_index), mode_(mode) {
+  HStoreContextSlot(HValue* context, int slot_index, HValue* value)
+      : slot_index_(slot_index) {
     SetOperandAt(0, context);
     SetOperandAt(1, value);
-    SetGVNFlag(kChangesContextSlots);
+    SetFlag(kChangesContextSlots);
   }
 
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
   int slot_index() const { return slot_index_; }
-  Mode mode() const { return mode_; }
 
   bool NeedsWriteBarrier() {
     return StoringValueNeedsWriteBarrier(value());
   }
 
-  bool DeoptimizesOnHole() {
-    return mode_ == kCheckDeoptimize;
-  }
-
-  bool RequiresHoleCheck() {
-    return mode_ != kNoCheck;
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3706,7 +3373,6 @@
 
  private:
   int slot_index_;
-  Mode mode_;
 };
 
 
@@ -3718,11 +3384,11 @@
         offset_(offset) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnMaps);
+    SetFlag(kDependsOnMaps);
     if (is_in_object) {
-      SetGVNFlag(kDependsOnInobjectFields);
+      SetFlag(kDependsOnInobjectFields);
     } else {
-      SetGVNFlag(kDependsOnBackingStoreFields);
+      SetFlag(kDependsOnBackingStoreFields);
     }
   }
 
@@ -3730,7 +3396,7 @@
   bool is_in_object() const { return is_in_object_; }
   int offset() const { return offset_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3762,7 +3428,7 @@
   Handle<String> name() { return name_; }
   bool need_generic() { return need_generic_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3797,7 +3463,7 @@
   HValue* object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3816,12 +3482,12 @@
       : HUnaryOperation(function) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnCalls);
+    SetFlag(kDependsOnCalls);
   }
 
   HValue* function() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -3834,23 +3500,18 @@
 
 class HLoadKeyedFastElement: public HTemplateInstruction<2> {
  public:
-  enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
-
-  HLoadKeyedFastElement(HValue* obj,
-                        HValue* key,
-                        HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
-      : hole_check_mode_(hole_check_mode) {
+  HLoadKeyedFastElement(HValue* obj, HValue* key) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     set_representation(Representation::Tagged());
-    SetGVNFlag(kDependsOnArrayElements);
+    SetFlag(kDependsOnArrayElements);
     SetFlag(kUseGVN);
   }
 
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3859,19 +3520,12 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  bool RequiresHoleCheck();
+  bool RequiresHoleCheck() const;
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
 
  protected:
-  virtual bool DataEquals(HValue* other) {
-    if (!other->IsLoadKeyedFastElement()) return false;
-    HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
-    return hole_check_mode_ == other_load->hole_check_mode_;
-  }
-
- private:
-  HoleCheckMode hole_check_mode_;
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
@@ -3881,14 +3535,14 @@
     SetOperandAt(0, elements);
     SetOperandAt(1, key);
     set_representation(Representation::Double());
-    SetGVNFlag(kDependsOnDoubleArrayElements);
+    SetFlag(kDependsOnDoubleArrayElements);
     SetFlag(kUseGVN);
   }
 
   HValue* elements() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3897,6 +3551,8 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
+  bool RequiresHoleCheck() const;
+
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
 
  protected:
@@ -3918,15 +3574,15 @@
     } else {
       set_representation(Representation::Integer32());
     }
-    SetGVNFlag(kDependsOnSpecializedArrayElements);
+    SetFlag(kDependsOnSpecializedArrayElements);
     // Native code could change the specialized array.
-    SetGVNFlag(kDependsOnCalls);
+    SetFlag(kDependsOnCalls);
     SetFlag(kUseGVN);
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     // The key is supposed to be Integer32, but the base pointer
     // for the element load is a naked pointer.
     return index == 0
@@ -3938,8 +3594,6 @@
   HValue* key() { return OperandAt(1); }
   ElementsKind elements_kind() const { return elements_kind_; }
 
-  virtual Range* InferRange(Zone* zone);
-
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
 
  protected:
@@ -3971,12 +3625,10 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
-  virtual HValue* Canonicalize();
-
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
 };
 
@@ -3994,15 +3646,15 @@
     SetOperandAt(0, obj);
     SetOperandAt(1, val);
     if (is_in_object_) {
-      SetGVNFlag(kChangesInobjectFields);
+      SetFlag(kChangesInobjectFields);
     } else {
-      SetGVNFlag(kChangesBackingStoreFields);
+      SetFlag(kChangesBackingStoreFields);
     }
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -4034,9 +3686,9 @@
                      HValue* object,
                      Handle<String> name,
                      HValue* value,
-                     StrictModeFlag strict_mode_flag)
+                     bool strict_mode)
       : name_(name),
-        strict_mode_flag_(strict_mode_flag) {
+        strict_mode_(strict_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, value);
     SetOperandAt(2, context);
@@ -4047,11 +3699,11 @@
   HValue* value() { return OperandAt(1); }
   HValue* context() { return OperandAt(2); }
   Handle<String> name() { return name_; }
-  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+  bool strict_mode() { return strict_mode_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4059,22 +3711,20 @@
 
  private:
   Handle<String> name_;
-  StrictModeFlag strict_mode_flag_;
+  bool strict_mode_;
 };
 
 
 class HStoreKeyedFastElement: public HTemplateInstruction<3> {
  public:
-  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
-                         ElementsKind elements_kind = FAST_ELEMENTS)
-      : elements_kind_(elements_kind) {
+  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
-    SetGVNFlag(kChangesArrayElements);
+    SetFlag(kChangesArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     // The key is supposed to be Integer32.
     return index == 1
         ? Representation::Integer32()
@@ -4084,24 +3734,14 @@
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
-  bool value_is_smi() {
-    return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
-  }
 
   bool NeedsWriteBarrier() {
-    if (value_is_smi()) {
-      return false;
-    } else {
-      return StoringValueNeedsWriteBarrier(value());
-    }
+    return StoringValueNeedsWriteBarrier(value());
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
-
- private:
-  ElementsKind elements_kind_;
 };
 
 
@@ -4113,10 +3753,10 @@
     SetOperandAt(0, elements);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
-    SetGVNFlag(kChangesDoubleArrayElements);
+    SetFlag(kChangesDoubleArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     if (index == 1) {
       return Representation::Integer32();
     } else if (index == 2) {
@@ -4147,7 +3787,7 @@
                                      HValue* val,
                                      ElementsKind elements_kind)
       : elements_kind_(elements_kind) {
-    SetGVNFlag(kChangesSpecializedArrayElements);
+    SetFlag(kChangesSpecializedArrayElements);
     SetOperandAt(0, external_elements);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
@@ -4155,7 +3795,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     if (index == 0) {
       return Representation::External();
     } else {
@@ -4188,8 +3828,8 @@
                      HValue* object,
                      HValue* key,
                      HValue* value,
-                     StrictModeFlag strict_mode_flag)
-      : strict_mode_flag_(strict_mode_flag) {
+                     bool strict_mode)
+      : strict_mode_(strict_mode) {
     SetOperandAt(0, object);
     SetOperandAt(1, key);
     SetOperandAt(2, value);
@@ -4201,9 +3841,9 @@
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
   HValue* context() { return OperandAt(3); }
-  StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
+  bool strict_mode() { return strict_mode_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4212,46 +3852,7 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
 
  private:
-  StrictModeFlag strict_mode_flag_;
-};
-
-
-class HTransitionElementsKind: public HTemplateInstruction<1> {
- public:
-  HTransitionElementsKind(HValue* object,
-                          Handle<Map> original_map,
-                          Handle<Map> transitioned_map)
-      : original_map_(original_map),
-        transitioned_map_(transitioned_map) {
-    SetOperandAt(0, object);
-    SetFlag(kUseGVN);
-    SetGVNFlag(kChangesElementsKind);
-    SetGVNFlag(kChangesElementsPointer);
-    set_representation(Representation::Tagged());
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  HValue* object() { return OperandAt(0); }
-  Handle<Map> original_map() { return original_map_; }
-  Handle<Map> transitioned_map() { return transitioned_map_; }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
-
- protected:
-  virtual bool DataEquals(HValue* other) {
-    HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
-    return original_map_.is_identical_to(instr->original_map()) &&
-        transitioned_map_.is_identical_to(instr->transitioned_map());
-  }
-
- private:
-  Handle<Map> original_map_;
-  Handle<Map> transitioned_map_;
+  bool strict_mode_;
 };
 
 
@@ -4261,10 +3862,10 @@
       : HBinaryOperation(context, left, right) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnMaps);
+    SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4287,10 +3888,10 @@
     SetOperandAt(2, index);
     set_representation(Representation::Integer32());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnMaps);
+    SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     // The index is supposed to be Integer32.
     return index == 2
         ? Representation::Integer32()
@@ -4306,8 +3907,8 @@
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone) {
-    return new(zone) Range(0, String::kMaxUtf16CodeUnit);
+  virtual Range* InferRange() {
+    return new Range(0, String::kMaxUC16CharCode);
   }
 };
 
@@ -4317,16 +3918,15 @@
   HStringCharFromCode(HValue* context, HValue* char_code) {
     SetOperandAt(0, context);
     SetOperandAt(1, char_code);
-    set_representation(Representation::Tagged());
+     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return index == 0
         ? Representation::Tagged()
         : Representation::Integer32();
   }
-  virtual HType CalculateInferredType();
 
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
@@ -4342,10 +3942,10 @@
   explicit HStringLength(HValue* string) : HUnaryOperation(string) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnMaps);
+    SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4359,35 +3959,12 @@
  protected:
   virtual bool DataEquals(HValue* other) { return true; }
 
-  virtual Range* InferRange(Zone* zone) {
-    return new(zone) Range(0, String::kMaxLength);
+  virtual Range* InferRange() {
+    return new Range(0, String::kMaxLength);
   }
 };
 
 
-class HAllocateObject: public HTemplateInstruction<1> {
- public:
-  HAllocateObject(HValue* context, Handle<JSFunction> constructor)
-      : constructor_(constructor) {
-    SetOperandAt(0, context);
-    set_representation(Representation::Tagged());
-  }
-
-  HValue* context() { return OperandAt(0); }
-  Handle<JSFunction> constructor() { return constructor_; }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
-
- private:
-  Handle<JSFunction> constructor_;
-};
-
-
 template <int V>
 class HMaterializedLiteral: public HTemplateInstruction<V> {
  public:
@@ -4405,76 +3982,34 @@
 };
 
 
-class HFastLiteral: public HMaterializedLiteral<1> {
- public:
-  HFastLiteral(HValue* context,
-               Handle<JSObject> boilerplate,
-               int total_size,
-               int literal_index,
-               int depth)
-      : HMaterializedLiteral<1>(literal_index, depth),
-        boilerplate_(boilerplate),
-        total_size_(total_size) {
-    SetOperandAt(0, context);
-  }
-
-  // Maximum depth and total number of elements and properties for literal
-  // graphs to be considered for fast deep-copying.
-  static const int kMaxLiteralDepth = 3;
-  static const int kMaxLiteralProperties = 8;
-
-  HValue* context() { return OperandAt(0); }
-  Handle<JSObject> boilerplate() const { return boilerplate_; }
-  int total_size() const { return total_size_; }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-  virtual HType CalculateInferredType();
-
-  DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
-
- private:
-  Handle<JSObject> boilerplate_;
-  int total_size_;
-};
-
-
 class HArrayLiteral: public HMaterializedLiteral<1> {
  public:
   HArrayLiteral(HValue* context,
-                Handle<HeapObject> boilerplate_object,
+                Handle<FixedArray> constant_elements,
                 int length,
                 int literal_index,
                 int depth)
       : HMaterializedLiteral<1>(literal_index, depth),
         length_(length),
-        boilerplate_object_(boilerplate_object) {
+        constant_elements_(constant_elements) {
     SetOperandAt(0, context);
   }
 
   HValue* context() { return OperandAt(0); }
-  ElementsKind boilerplate_elements_kind() const {
-    if (!boilerplate_object_->IsJSObject()) {
-      return FAST_ELEMENTS;
-    }
-    return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind();
-  }
-  Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
+  Handle<FixedArray> constant_elements() const { return constant_elements_; }
   int length() const { return length_; }
 
   bool IsCopyOnWrite() const;
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
-  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral)
 
  private:
   int length_;
-  Handle<HeapObject> boilerplate_object_;
+  Handle<FixedArray> constant_elements_;
 };
 
 
@@ -4500,10 +4035,9 @@
   bool fast_elements() const { return fast_elements_; }
   bool has_function() const { return has_function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
-  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral)
 
@@ -4531,10 +4065,9 @@
   Handle<String> pattern() { return pattern_; }
   Handle<String> flags() { return flags_; }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
-  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral)
 
@@ -4556,10 +4089,9 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
-  virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
 
@@ -4583,10 +4115,7 @@
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
 
-  virtual HValue* Canonicalize();
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4600,11 +4129,11 @@
     // This instruction is not marked as having side effects, but
     // changes the map of the input operand. Use it only when creating
     // object literals.
-    ASSERT(value->IsObjectLiteral() || value->IsFastLiteral());
+    ASSERT(value->IsObjectLiteral());
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4618,7 +4147,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4626,26 +4155,6 @@
 };
 
 
-class HDateField: public HUnaryOperation {
- public:
-  HDateField(HValue* date, Smi* index)
-      : HUnaryOperation(date), index_(index) {
-    set_representation(Representation::Tagged());
-  }
-
-  Smi* index() const { return index_; }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(DateField)
-
- private:
-  Smi* index_;
-};
-
-
 class HDeleteProperty: public HBinaryOperation {
  public:
   HDeleteProperty(HValue* context, HValue* obj, HValue* key)
@@ -4654,7 +4163,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4681,7 +4190,7 @@
   HValue* key() { return OperandAt(1); }
   HValue* object() { return OperandAt(2); }
 
-  virtual Representation RequiredInputRepresentation(int index) {
+  virtual Representation RequiredInputRepresentation(int index) const {
     return Representation::Tagged();
   }
 
@@ -4694,134 +4203,6 @@
   DECLARE_CONCRETE_INSTRUCTION(In)
 };
 
-
-class HCheckMapValue: public HTemplateInstruction<2> {
- public:
-  HCheckMapValue(HValue* value,
-                 HValue* map) {
-    SetOperandAt(0, value);
-    SetOperandAt(1, map);
-    set_representation(Representation::Tagged());
-    SetFlag(kUseGVN);
-    SetGVNFlag(kDependsOnMaps);
-    SetGVNFlag(kDependsOnElementsKind);
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual HType CalculateInferredType() {
-    return HType::Tagged();
-  }
-
-  HValue* value() { return OperandAt(0); }
-  HValue* map() { return OperandAt(1); }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
-
- protected:
-  virtual bool DataEquals(HValue* other) {
-    return true;
-  }
-};
-
-
-class HForInPrepareMap : public HTemplateInstruction<2> {
- public:
-  HForInPrepareMap(HValue* context,
-                   HValue* object) {
-    SetOperandAt(0, context);
-    SetOperandAt(1, object);
-    set_representation(Representation::Tagged());
-    SetAllSideEffects();
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  HValue* context() { return OperandAt(0); }
-  HValue* enumerable() { return OperandAt(1); }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual HType CalculateInferredType() {
-    return HType::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
-};
-
-
-class HForInCacheArray : public HTemplateInstruction<2> {
- public:
-  HForInCacheArray(HValue* enumerable,
-                   HValue* keys,
-                   int idx) : idx_(idx) {
-    SetOperandAt(0, enumerable);
-    SetOperandAt(1, keys);
-    set_representation(Representation::Tagged());
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  HValue* enumerable() { return OperandAt(0); }
-  HValue* map() { return OperandAt(1); }
-  int idx() { return idx_; }
-
-  HForInCacheArray* index_cache() {
-    return index_cache_;
-  }
-
-  void set_index_cache(HForInCacheArray* index_cache) {
-    index_cache_ = index_cache;
-  }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual HType CalculateInferredType() {
-    return HType::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
-
- private:
-  int idx_;
-  HForInCacheArray* index_cache_;
-};
-
-
-class HLoadFieldByIndex : public HTemplateInstruction<2> {
- public:
-  HLoadFieldByIndex(HValue* object,
-                    HValue* index) {
-    SetOperandAt(0, object);
-    SetOperandAt(1, index);
-    set_representation(Representation::Tagged());
-  }
-
-  virtual Representation RequiredInputRepresentation(int index) {
-    return Representation::Tagged();
-  }
-
-  HValue* object() { return OperandAt(0); }
-  HValue* index() { return OperandAt(1); }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  virtual HType CalculateInferredType() {
-    return HType::Tagged();
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
-};
-
-
 #undef DECLARE_INSTRUCTION
 #undef DECLARE_CONCRETE_INSTRUCTION
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 34fd1bc..c625fba 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -70,8 +70,7 @@
       deleted_phis_(4),
       parent_loop_header_(NULL),
       is_inline_return_target_(false),
-      is_deoptimizing_(false),
-      dominates_loop_successors_(false) { }
+      is_deoptimizing_(false) { }
 
 
 void HBasicBlock::AttachLoopInformation() {
@@ -97,7 +96,7 @@
   ASSERT(phi->block() == this);
   ASSERT(phis_.Contains(phi));
   ASSERT(phi->HasNoUses() || !phi->is_live());
-  phi->Kill();
+  phi->ClearOperands();
   phis_.RemoveElement(phi);
   phi->SetBlock(NULL);
 }
@@ -165,10 +164,10 @@
 }
 
 
-void HBasicBlock::Goto(HBasicBlock* block, bool drop_extra) {
+void HBasicBlock::Goto(HBasicBlock* block) {
   if (block->IsInlineReturnTarget()) {
     AddInstruction(new(zone()) HLeaveInlined);
-    last_environment_ = last_environment()->DiscardInlined(drop_extra);
+    last_environment_ = last_environment()->outer();
   }
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(block);
@@ -176,13 +175,11 @@
 }
 
 
-void HBasicBlock::AddLeaveInlined(HValue* return_value,
-                                  HBasicBlock* target,
-                                  bool drop_extra) {
+void HBasicBlock::AddLeaveInlined(HValue* return_value, HBasicBlock* target) {
   ASSERT(target->IsInlineReturnTarget());
   ASSERT(return_value != NULL);
   AddInstruction(new(zone()) HLeaveInlined);
-  last_environment_ = last_environment()->DiscardInlined(drop_extra);
+  last_environment_ = last_environment()->outer();
   last_environment()->Push(return_value);
   AddSimulate(AstNode::kNoNumber);
   HGoto* instr = new(zone()) HGoto(target);
@@ -316,62 +313,6 @@
 }
 
 
-void HBasicBlock::AssignLoopSuccessorDominators() {
-  // Mark blocks that dominate all subsequent reachable blocks inside their
-  // loop. Exploit the fact that blocks are sorted in reverse post order. When
-  // the loop is visited in increasing block id order, if the number of
-  // non-loop-exiting successor edges at the dominator_candidate block doesn't
-  // exceed the number of previously encountered predecessor edges, there is no
-  // path from the loop header to any block with higher id that doesn't go
-  // through the dominator_candidate block. In this case, the
-  // dominator_candidate block is guaranteed to dominate all blocks reachable
-  // from it with higher ids.
-  HBasicBlock* last = loop_information()->GetLastBackEdge();
-  int outstanding_successors = 1;  // one edge from the pre-header
-  // Header always dominates everything.
-  MarkAsLoopSuccessorDominator();
-  for (int j = block_id(); j <= last->block_id(); ++j) {
-    HBasicBlock* dominator_candidate = graph_->blocks()->at(j);
-    for (HPredecessorIterator it(dominator_candidate); !it.Done();
-         it.Advance()) {
-      HBasicBlock* predecessor = it.Current();
-      // Don't count back edges.
-      if (predecessor->block_id() < dominator_candidate->block_id()) {
-        outstanding_successors--;
-      }
-    }
-
-    // If more successors than predecessors have been seen in the loop up to
-    // now, it's not possible to guarantee that the current block dominates
-    // all of the blocks with higher IDs. In this case, assume conservatively
-    // that those paths through loop that don't go through the current block
-    // contain all of the loop's dependencies. Also be careful to record
-    // dominator information about the current loop that's being processed,
-    // and not nested loops, which will be processed when
-    // AssignLoopSuccessorDominators gets called on their header.
-    ASSERT(outstanding_successors >= 0);
-    HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header();
-    if (outstanding_successors == 0 &&
-        (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) {
-      dominator_candidate->MarkAsLoopSuccessorDominator();
-    }
-    HControlInstruction* end = dominator_candidate->end();
-    for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
-      HBasicBlock* successor = it.Current();
-      // Only count successors that remain inside the loop and don't loop back
-      // to a loop header.
-      if (successor->block_id() > dominator_candidate->block_id() &&
-          successor->block_id() <= last->block_id()) {
-        // Backwards edges must land on loop headers.
-        ASSERT(successor->block_id() > dominator_candidate->block_id() ||
-               successor->IsLoopHeader());
-        outstanding_successors++;
-      }
-    }
-  }
-}
-
-
 int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
   for (int i = 0; i < predecessors_.length(); ++i) {
     if (predecessors_[i] == predecessor) return i;
@@ -446,7 +387,7 @@
                        HBasicBlock* dont_visit)
       : visited_count_(0),
         stack_(16),
-        reachable_(block_count, ZONE),
+        reachable_(block_count),
         dont_visit_(dont_visit) {
     PushBlock(entry_block);
     Analyze();
@@ -481,7 +422,7 @@
 };
 
 
-void HGraph::Verify(bool do_full_verify) const {
+void HGraph::Verify() const {
   for (int i = 0; i < blocks_.length(); i++) {
     HBasicBlock* block = blocks_.at(i);
 
@@ -532,27 +473,25 @@
   // Check special property of first block to have no predecessors.
   ASSERT(blocks_.at(0)->predecessors()->is_empty());
 
-  if (do_full_verify) {
-    // Check that the graph is fully connected.
-    ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
-    ASSERT(analyzer.visited_count() == blocks_.length());
+  // Check that the graph is fully connected.
+  ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+  ASSERT(analyzer.visited_count() == blocks_.length());
 
-    // Check that entry block dominator is NULL.
-    ASSERT(entry_block_->dominator() == NULL);
+  // Check that entry block dominator is NULL.
+  ASSERT(entry_block_->dominator() == NULL);
 
-    // Check dominators.
-    for (int i = 0; i < blocks_.length(); ++i) {
-      HBasicBlock* block = blocks_.at(i);
-      if (block->dominator() == NULL) {
-        // Only start block may have no dominator assigned to.
-        ASSERT(i == 0);
-      } else {
-        // Assert that block is unreachable if dominator must not be visited.
-        ReachabilityAnalyzer dominator_analyzer(entry_block_,
-                                                blocks_.length(),
-                                                block->dominator());
-        ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
-      }
+  // Check dominators.
+  for (int i = 0; i < blocks_.length(); ++i) {
+    HBasicBlock* block = blocks_.at(i);
+    if (block->dominator() == NULL) {
+      // Only start block may have no dominator assigned to.
+      ASSERT(i == 0);
+    } else {
+      // Assert that block is unreachable if dominator must not be visited.
+      ReachabilityAnalyzer dominator_analyzer(entry_block_,
+                                              blocks_.length(),
+                                              block->dominator());
+      ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
     }
   }
 }
@@ -600,7 +539,7 @@
 HGraphBuilder::HGraphBuilder(CompilationInfo* info,
                              TypeFeedbackOracle* oracle)
     : function_state_(NULL),
-      initial_function_state_(this, info, oracle, NORMAL_RETURN),
+      initial_function_state_(this, info, oracle),
       ast_context_(NULL),
       break_scope_(NULL),
       graph_(NULL),
@@ -682,28 +621,28 @@
 
 Handle<Code> HGraph::Compile(CompilationInfo* info) {
   int values = GetMaximumValueID();
-  if (values > LUnallocated::kMaxVirtualRegisters) {
-    if (FLAG_trace_bailout) {
-      PrintF("Not enough virtual registers for (values).\n");
-    }
+  if (values > LAllocator::max_initial_value_ids()) {
+    if (FLAG_trace_bailout) PrintF("Function is too big\n");
     return Handle<Code>::null();
   }
+
   LAllocator allocator(values, this);
   LChunkBuilder builder(info, this, &allocator);
   LChunk* chunk = builder.Build();
   if (chunk == NULL) return Handle<Code>::null();
 
-  if (!allocator.Allocate(chunk)) {
-    if (FLAG_trace_bailout) {
-      PrintF("Not enough virtual registers (regalloc).\n");
-    }
-    return Handle<Code>::null();
-  }
+  if (!FLAG_alloc_lithium) return Handle<Code>::null();
+
+  allocator.Allocate(chunk);
+
+  if (!FLAG_use_lithium) return Handle<Code>::null();
 
   MacroAssembler assembler(info->isolate(), NULL, 0);
   LCodeGen generator(chunk, &assembler, info);
 
-  chunk->MarkEmptyBlocks();
+  if (FLAG_eliminate_empty_blocks) {
+    chunk->MarkEmptyBlocks();
+  }
 
   if (generator.GenerateCode()) {
     if (FLAG_trace_codegen) {
@@ -730,7 +669,7 @@
 
 void HGraph::Canonicalize() {
   if (!FLAG_use_canonicalizing) return;
-  HPhase phase("H_Canonicalize", this);
+  HPhase phase("Canonicalize", this);
   for (int i = 0; i < blocks()->length(); ++i) {
     HInstruction* instr = blocks()->at(i)->first();
     while (instr != NULL) {
@@ -743,8 +682,8 @@
 
 
 void HGraph::OrderBlocks() {
-  HPhase phase("H_Block ordering");
-  BitVector visited(blocks_.length(), zone());
+  HPhase phase("Block ordering");
+  BitVector visited(blocks_.length());
 
   ZoneList<HBasicBlock*> reverse_result(8);
   HBasicBlock* start = blocks_[0];
@@ -789,7 +728,6 @@
       Postorder(it.Current(), visited, order, block);
     }
   } else {
-    ASSERT(block->IsFinished());
     for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
       Postorder(it.Current(), visited, order, loop_header);
     }
@@ -805,16 +743,14 @@
 
 
 void HGraph::AssignDominators() {
-  HPhase phase("H_Assign dominators", this);
+  HPhase phase("Assign dominators", this);
   for (int i = 0; i < blocks_.length(); ++i) {
-    HBasicBlock* block = blocks_[i];
-    if (block->IsLoopHeader()) {
+    if (blocks_[i]->IsLoopHeader()) {
       // Only the first predecessor of a loop header is from outside the loop.
       // All others are back edges, and thus cannot dominate the loop header.
-      block->AssignCommonDominator(block->predecessors()->first());
-      block->AssignLoopSuccessorDominators();
+      blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->first());
     } else {
-      for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
+      for (int j = 0; j < blocks_[i]->predecessors()->length(); ++j) {
         blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
       }
     }
@@ -824,7 +760,7 @@
 // Mark all blocks that are dominated by an unconditional soft deoptimize to
 // prevent code motion across those blocks.
 void HGraph::PropagateDeoptimizingMark() {
-  HPhase phase("H_Propagate deoptimizing mark", this);
+  HPhase phase("Propagate deoptimizing mark", this);
   MarkAsDeoptimizingRecursively(entry_block());
 }
 
@@ -837,7 +773,7 @@
 }
 
 void HGraph::EliminateRedundantPhis() {
-  HPhase phase("H_Redundant phi elimination", this);
+  HPhase phase("Redundant phi elimination", this);
 
   // Worklist of phis that can potentially be eliminated. Initialized with
   // all phi nodes. When elimination of a phi node modifies another phi node
@@ -871,7 +807,7 @@
 
 
 void HGraph::EliminateUnreachablePhis() {
-  HPhase phase("H_Unreachable phi elimination", this);
+  HPhase phase("Unreachable phi elimination", this);
 
   // Initialize worklist.
   ZoneList<HPhi*> phi_list(blocks_.length());
@@ -914,7 +850,7 @@
 }
 
 
-bool HGraph::CheckArgumentsPhiUses() {
+bool HGraph::CheckPhis() {
   int block_count = blocks_.length();
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@@ -927,11 +863,13 @@
 }
 
 
-bool HGraph::CheckConstPhiUses() {
+bool HGraph::CollectPhis() {
   int block_count = blocks_.length();
+  phi_list_ = new ZoneList<HPhi*>(block_count);
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
       HPhi* phi = blocks_[i]->phis()->at(j);
+      phi_list_->Add(phi);
       // Check for the hole value (from an uninitialized const).
       for (int k = 0; k < phi->OperandCount(); k++) {
         if (phi->OperandAt(k) == GetConstantHole()) return false;
@@ -942,20 +880,8 @@
 }
 
 
-void HGraph::CollectPhis() {
-  int block_count = blocks_.length();
-  phi_list_ = new ZoneList<HPhi*>(block_count);
-  for (int i = 0; i < block_count; ++i) {
-    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
-      HPhi* phi = blocks_[i]->phis()->at(j);
-      phi_list_->Add(phi);
-    }
-  }
-}
-
-
 void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
-  BitVector in_worklist(GetMaximumValueID(), zone());
+  BitVector in_worklist(GetMaximumValueID());
   for (int i = 0; i < worklist->length(); ++i) {
     ASSERT(!in_worklist.Contains(worklist->at(i)->id()));
     in_worklist.Add(worklist->at(i)->id());
@@ -979,8 +905,7 @@
 
 class HRangeAnalysis BASE_EMBEDDED {
  public:
-  explicit HRangeAnalysis(HGraph* graph) :
-      graph_(graph), zone_(graph->isolate()->zone()), changed_ranges_(16) { }
+  explicit HRangeAnalysis(HGraph* graph) : graph_(graph), changed_ranges_(16) {}
 
   void Analyze();
 
@@ -994,7 +919,6 @@
   void AddRange(HValue* value, Range* range);
 
   HGraph* graph_;
-  Zone* zone_;
   ZoneList<HValue*> changed_ranges_;
 };
 
@@ -1010,7 +934,7 @@
 
 
 void HRangeAnalysis::Analyze() {
-  HPhase phase("H_Range analysis", graph_);
+  HPhase phase("Range analysis", graph_);
   Analyze(graph_->entry_block());
 }
 
@@ -1081,14 +1005,14 @@
 
   if (op == Token::EQ || op == Token::EQ_STRICT) {
     // The same range has to apply for value.
-    new_range = range->Copy(zone_);
+    new_range = range->Copy();
   } else if (op == Token::LT || op == Token::LTE) {
-    new_range = range->CopyClearLower(zone_);
+    new_range = range->CopyClearLower();
     if (op == Token::LT) {
       new_range->AddConstant(-1);
     }
   } else if (op == Token::GT || op == Token::GTE) {
-    new_range = range->CopyClearUpper(zone_);
+    new_range = range->CopyClearUpper();
     if (op == Token::GT) {
       new_range->AddConstant(1);
     }
@@ -1103,7 +1027,7 @@
 void HRangeAnalysis::InferRange(HValue* value) {
   ASSERT(!value->HasRange());
   if (!value->representation().IsNone()) {
-    value->ComputeInitialRange(zone_);
+    value->ComputeInitialRange();
     Range* range = value->range();
     TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
                value->id(),
@@ -1124,7 +1048,7 @@
 
 void HRangeAnalysis::AddRange(HValue* value, Range* range) {
   Range* original_range = value->range();
-  value->AddNewRange(range, zone_);
+  value->AddNewRange(range);
   changed_ranges_.Add(value);
   Range* new_range = value->range();
   TraceRange("Updated range of %d set to [%d,%d]\n",
@@ -1165,10 +1089,10 @@
 }
 
 
-void HValueMap::Kill(GVNFlagSet flags) {
-  GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(flags);
-  if (!present_flags_.ContainsAnyOf(depends_flags)) return;
-  present_flags_.RemoveAll();
+void HValueMap::Kill(int flags) {
+  int depends_flags = HValue::ConvertChangesToDependsFlags(flags);
+  if ((present_flags_ & depends_flags) == 0) return;
+  present_flags_ = 0;
   for (int i = 0; i < array_size_; ++i) {
     HValue* value = array_[i].value;
     if (value != NULL) {
@@ -1177,8 +1101,7 @@
       int next;
       for (int current = array_[i].next; current != kNil; current = next) {
         next = lists_[current].next;
-        HValue* value = lists_[current].value;
-        if (value->gvn_flags().ContainsAnyOf(depends_flags)) {
+        if ((lists_[current].value->flags() & depends_flags) != 0) {
           // Drop it.
           count_--;
           lists_[current].next = free_list_head_;
@@ -1187,14 +1110,13 @@
           // Keep it.
           lists_[current].next = kept;
           kept = current;
-          present_flags_.Add(value->gvn_flags());
+          present_flags_ |= lists_[current].value->flags();
         }
       }
       array_[i].next = kept;
 
       // Now possibly drop directly indexed element.
-      value = array_[i].value;
-      if (value->gvn_flags().ContainsAnyOf(depends_flags)) {  // Drop it.
+      if ((array_[i].value->flags() & depends_flags) != 0) {  // Drop it.
         count_--;
         int head = array_[i].next;
         if (head == kNil) {
@@ -1206,7 +1128,7 @@
           free_list_head_ = head;
         }
       } else {
-        present_flags_.Add(value->gvn_flags());  // Keep it.
+        present_flags_ |= array_[i].value->flags();  // Keep it.
       }
     }
   }
@@ -1408,33 +1330,28 @@
   explicit HGlobalValueNumberer(HGraph* graph, CompilationInfo* info)
       : graph_(graph),
         info_(info),
-        removed_side_effects_(false),
         block_side_effects_(graph->blocks()->length()),
         loop_side_effects_(graph->blocks()->length()),
         visited_on_paths_(graph->zone(), graph->blocks()->length()) {
     ASSERT(info->isolate()->heap()->allow_allocation(false));
-    block_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
-    loop_side_effects_.AddBlock(GVNFlagSet(), graph_->blocks()->length());
+    block_side_effects_.AddBlock(0, graph_->blocks()->length());
+    loop_side_effects_.AddBlock(0, graph_->blocks()->length());
   }
   ~HGlobalValueNumberer() {
     ASSERT(!info_->isolate()->heap()->allow_allocation(true));
   }
 
-  // Returns true if values with side effects are removed.
-  bool Analyze();
+  void Analyze();
 
  private:
-  GVNFlagSet CollectSideEffectsOnPathsToDominatedBlock(
-      HBasicBlock* dominator,
-      HBasicBlock* dominated);
+  int CollectSideEffectsOnPathsToDominatedBlock(HBasicBlock* dominator,
+                                                HBasicBlock* dominated);
   void AnalyzeBlock(HBasicBlock* block, HValueMap* map);
   void ComputeBlockSideEffects();
   void LoopInvariantCodeMotion();
   void ProcessLoopBlock(HBasicBlock* block,
                         HBasicBlock* before_loop,
-                        GVNFlagSet loop_kills,
-                        GVNFlagSet* accumulated_first_time_depends,
-                        GVNFlagSet* accumulated_first_time_changes);
+                        int loop_kills);
   bool AllowCodeMotion();
   bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
 
@@ -1444,13 +1361,12 @@
 
   HGraph* graph_;
   CompilationInfo* info_;
-  bool removed_side_effects_;
 
   // A map of block IDs to their side effects.
-  ZoneList<GVNFlagSet> block_side_effects_;
+  ZoneList<int> block_side_effects_;
 
   // A map of loop header block IDs to their loop's side effects.
-  ZoneList<GVNFlagSet> loop_side_effects_;
+  ZoneList<int> loop_side_effects_;
 
   // Used when collecting side effects on paths from dominator to
   // dominated.
@@ -1458,53 +1374,39 @@
 };
 
 
-bool HGlobalValueNumberer::Analyze() {
-  removed_side_effects_ = false;
+void HGlobalValueNumberer::Analyze() {
   ComputeBlockSideEffects();
   if (FLAG_loop_invariant_code_motion) {
     LoopInvariantCodeMotion();
   }
   HValueMap* map = new(zone()) HValueMap();
   AnalyzeBlock(graph_->entry_block(), map);
-  return removed_side_effects_;
 }
 
 
 void HGlobalValueNumberer::ComputeBlockSideEffects() {
-  // The Analyze phase of GVN can be called multiple times. Clear loop side
-  // effects before computing them to erase the contents from previous Analyze
-  // passes.
-  for (int i = 0; i < loop_side_effects_.length(); ++i) {
-    loop_side_effects_[i].RemoveAll();
-  }
   for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
     // Compute side effects for the block.
     HBasicBlock* block = graph_->blocks()->at(i);
     HInstruction* instr = block->first();
     int id = block->block_id();
-    GVNFlagSet side_effects;
+    int side_effects = 0;
     while (instr != NULL) {
-      side_effects.Add(instr->ChangesFlags());
-      if (instr->IsSoftDeoptimize()) {
-        block_side_effects_[id].RemoveAll();
-        side_effects.RemoveAll();
-        break;
-      }
+      side_effects |= instr->ChangesFlags();
       instr = instr->next();
     }
-    block_side_effects_[id].Add(side_effects);
+    block_side_effects_[id] |= side_effects;
 
     // Loop headers are part of their loop.
     if (block->IsLoopHeader()) {
-      loop_side_effects_[id].Add(side_effects);
+      loop_side_effects_[id] |= side_effects;
     }
 
     // Propagate loop side effects upwards.
     if (block->HasParentLoopHeader()) {
       int header_id = block->parent_loop_header()->block_id();
-      loop_side_effects_[header_id].Add(block->IsLoopHeader()
-                                        ? loop_side_effects_[id]
-                                        : side_effects);
+      loop_side_effects_[header_id] |=
+          block->IsLoopHeader() ? loop_side_effects_[id] : side_effects;
     }
   }
 }
@@ -1514,113 +1416,49 @@
   for (int i = graph_->blocks()->length() - 1; i >= 0; --i) {
     HBasicBlock* block = graph_->blocks()->at(i);
     if (block->IsLoopHeader()) {
-      GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
+      int side_effects = loop_side_effects_[block->block_id()];
       TraceGVN("Try loop invariant motion for block B%d effects=0x%x\n",
                block->block_id(),
-               side_effects.ToIntegral());
+               side_effects);
 
-      GVNFlagSet accumulated_first_time_depends;
-      GVNFlagSet accumulated_first_time_changes;
       HBasicBlock* last = block->loop_information()->GetLastBackEdge();
       for (int j = block->block_id(); j <= last->block_id(); ++j) {
-        ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects,
-                         &accumulated_first_time_depends,
-                         &accumulated_first_time_changes);
+        ProcessLoopBlock(graph_->blocks()->at(j), block, side_effects);
       }
     }
   }
 }
 
 
-void HGlobalValueNumberer::ProcessLoopBlock(
-    HBasicBlock* block,
-    HBasicBlock* loop_header,
-    GVNFlagSet loop_kills,
-    GVNFlagSet* first_time_depends,
-    GVNFlagSet* first_time_changes) {
+void HGlobalValueNumberer::ProcessLoopBlock(HBasicBlock* block,
+                                            HBasicBlock* loop_header,
+                                            int loop_kills) {
   HBasicBlock* pre_header = loop_header->predecessors()->at(0);
-  GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
+  int depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
   TraceGVN("Loop invariant motion for B%d depends_flags=0x%x\n",
            block->block_id(),
-           depends_flags.ToIntegral());
+           depends_flags);
   HInstruction* instr = block->first();
   while (instr != NULL) {
     HInstruction* next = instr->next();
-    bool hoisted = false;
-    if (instr->CheckFlag(HValue::kUseGVN)) {
-      TraceGVN("Checking instruction %d (%s) instruction GVN flags 0x%X, "
-               "loop kills 0x%X\n",
+    if (instr->CheckFlag(HValue::kUseGVN) &&
+        (instr->flags() & depends_flags) == 0) {
+      TraceGVN("Checking instruction %d (%s)\n",
                instr->id(),
-               instr->Mnemonic(),
-               instr->gvn_flags().ToIntegral(),
-               depends_flags.ToIntegral());
-      bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
-      if (instr->IsTransitionElementsKind()) {
-        // It's possible to hoist transitions out of a loop as long as the
-        // hoisting wouldn't move the transition past a DependsOn of one of it's
-        // changes or any instructions that might change an objects map or
-        // elements contents.
-        GVNFlagSet changes = instr->ChangesFlags();
-        GVNFlagSet hoist_depends_blockers =
-            HValue::ConvertChangesToDependsFlags(changes);
-        // In addition to not hoisting transitions above other instructions that
-        // change dependencies that the transition changes, it must not be
-        // hoisted above map changes and stores to an elements backing store
-        // that the transition might change.
-        GVNFlagSet hoist_change_blockers = changes;
-        hoist_change_blockers.Add(kChangesMaps);
-        HTransitionElementsKind* trans = HTransitionElementsKind::cast(instr);
-        if (trans->original_map()->has_fast_double_elements()) {
-          hoist_change_blockers.Add(kChangesDoubleArrayElements);
-        }
-        if (trans->transitioned_map()->has_fast_double_elements()) {
-          hoist_change_blockers.Add(kChangesArrayElements);
-        }
-        TraceGVN("Checking dependencies on HTransitionElementsKind %d (%s) "
-                 "hoist depends blockers 0x%X, hoist change blockers 0x%X, "
-                 "accumulated depends 0x%X, accumulated changes 0x%X\n",
-                 instr->id(),
-                 instr->Mnemonic(),
-                 hoist_depends_blockers.ToIntegral(),
-                 hoist_change_blockers.ToIntegral(),
-                 first_time_depends->ToIntegral(),
-                 first_time_changes->ToIntegral());
-        // It's possible to hoist transition from the current loop loop only if
-        // they dominate all of the successor blocks in the same loop and there
-        // are not any instructions that have Changes/DependsOn that intervene
-        // between it and the beginning of the loop header.
-        bool in_nested_loop = block != loop_header &&
-            ((block->parent_loop_header() != loop_header) ||
-             block->IsLoopHeader());
-        can_hoist = !in_nested_loop &&
-            block->IsLoopSuccessorDominator() &&
-            !first_time_depends->ContainsAnyOf(hoist_depends_blockers) &&
-            !first_time_changes->ContainsAnyOf(hoist_change_blockers);
-      }
-
-      if (can_hoist) {
-        bool inputs_loop_invariant = true;
-        for (int i = 0; i < instr->OperandCount(); ++i) {
-          if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
-            inputs_loop_invariant = false;
-          }
-        }
-
-        if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
-          TraceGVN("Hoisting loop invariant instruction %d\n", instr->id());
-          // Move the instruction out of the loop.
-          instr->Unlink();
-          instr->InsertBefore(pre_header->end());
-          if (instr->HasSideEffects()) removed_side_effects_ = true;
-          hoisted = true;
+               instr->Mnemonic());
+      bool inputs_loop_invariant = true;
+      for (int i = 0; i < instr->OperandCount(); ++i) {
+        if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
+          inputs_loop_invariant = false;
         }
       }
-    }
-    if (!hoisted) {
-      // If an instruction is not hoisted, we have to account for its side
-      // effects when hoisting later HTransitionElementsKind instructions.
-      first_time_depends->Add(instr->DependsOnFlags());
-      first_time_changes->Add(instr->ChangesFlags());
+
+      if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
+        TraceGVN("Found loop invariant instruction %d\n", instr->id());
+        // Move the instruction out of the loop.
+        instr->Unlink();
+        instr->InsertBefore(pre_header->end());
+      }
     }
     instr = next;
   }
@@ -1640,20 +1478,20 @@
 }
 
 
-GVNFlagSet HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
+int HGlobalValueNumberer::CollectSideEffectsOnPathsToDominatedBlock(
     HBasicBlock* dominator, HBasicBlock* dominated) {
-  GVNFlagSet side_effects;
+  int side_effects = 0;
   for (int i = 0; i < dominated->predecessors()->length(); ++i) {
     HBasicBlock* block = dominated->predecessors()->at(i);
     if (dominator->block_id() < block->block_id() &&
         block->block_id() < dominated->block_id() &&
         visited_on_paths_.Add(block->block_id())) {
-      side_effects.Add(block_side_effects_[block->block_id()]);
+      side_effects |= block_side_effects_[block->block_id()];
       if (block->IsLoopHeader()) {
-        side_effects.Add(loop_side_effects_[block->block_id()]);
+        side_effects |= loop_side_effects_[block->block_id()];
       }
-      side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
-          dominator, block));
+      side_effects |= CollectSideEffectsOnPathsToDominatedBlock(
+          dominator, block);
     }
   }
   return side_effects;
@@ -1674,14 +1512,13 @@
   HInstruction* instr = block->first();
   while (instr != NULL) {
     HInstruction* next = instr->next();
-    GVNFlagSet flags = instr->ChangesFlags();
-    if (!flags.IsEmpty()) {
+    int flags = instr->ChangesFlags();
+    if (flags != 0) {
+      ASSERT(!instr->CheckFlag(HValue::kUseGVN));
       // Clear all instructions in the map that are affected by side effects.
       map->Kill(flags);
       TraceGVN("Instruction %d kills\n", instr->id());
-    }
-    if (instr->CheckFlag(HValue::kUseGVN)) {
-      ASSERT(!instr->HasObservableSideEffects());
+    } else if (instr->CheckFlag(HValue::kUseGVN)) {
       HValue* other = map->Lookup(instr);
       if (other != NULL) {
         ASSERT(instr->Equals(other) && other->Equals(instr));
@@ -1690,7 +1527,6 @@
                  instr->Mnemonic(),
                  other->id(),
                  other->Mnemonic());
-        if (instr->HasSideEffects()) removed_side_effects_ = true;
         instr->DeleteAndReplaceWith(other);
       } else {
         map->Add(instr);
@@ -1726,9 +1562,7 @@
 class HInferRepresentation BASE_EMBEDDED {
  public:
   explicit HInferRepresentation(HGraph* graph)
-      : graph_(graph),
-        worklist_(8),
-        in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
+      : graph_(graph), worklist_(8), in_worklist_(graph->GetMaximumValueID()) {}
 
   void Analyze();
 
@@ -1766,12 +1600,6 @@
   ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
   Representation inferred = current->InferredRepresentation();
   if (inferred.IsSpecialization()) {
-    if (FLAG_trace_representation) {
-      PrintF("Changing #%d representation %s -> %s based on inputs\n",
-             current->id(),
-             r.Mnemonic(),
-             inferred.Mnemonic());
-    }
     current->ChangeRepresentation(inferred);
     AddDependantsToWorklist(current);
   }
@@ -1799,12 +1627,6 @@
   Representation new_rep = TryChange(value);
   if (!new_rep.IsNone()) {
     if (!value->representation().Equals(new_rep)) {
-      if (FLAG_trace_representation) {
-        PrintF("Changing #%d representation %s -> %s based on uses\n",
-               value->id(),
-               r.Mnemonic(),
-               new_rep.Mnemonic());
-      }
       value->ChangeRepresentation(new_rep);
       AddDependantsToWorklist(value);
     }
@@ -1821,7 +1643,7 @@
     Representation rep = use->RequiredInputRepresentation(it.index());
     if (rep.IsNone()) continue;
     if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
-    use_count[rep.kind()] += use->LoopWeight();
+    ++use_count[rep.kind()];
   }
   int tagged_count = use_count[Representation::kTagged];
   int double_count = use_count[Representation::kDouble];
@@ -1834,7 +1656,7 @@
   }
 
   // Prefer unboxing over boxing, the latter is more expensive.
-  if (tagged_count > non_tagged_count) return Representation::None();
+  if (tagged_count > non_tagged_count) Representation::None();
 
   // Prefer Integer32 over Double, if possible.
   if (int32_count > 0 && value->IsConvertibleToInteger()) {
@@ -1848,7 +1670,7 @@
 
 
 void HInferRepresentation::Analyze() {
-  HPhase phase("H_Infer representations", graph_);
+  HPhase phase("Infer representations", graph_);
 
   // (1) Initialize bit vectors and count real uses. Each phi gets a
   // bit-vector of length <number of phis>.
@@ -1857,7 +1679,7 @@
   ZoneList<BitVector*> connected_phis(phi_count);
   for (int i = 0; i < phi_count; ++i) {
     phi_list->at(i)->InitRealUses(i);
-    BitVector* connected_set = new(zone()) BitVector(phi_count, graph_->zone());
+    BitVector* connected_set = new(zone()) BitVector(phi_count);
     connected_set->Add(i);
     connected_phis.Add(connected_set);
   }
@@ -1927,7 +1749,7 @@
 
 
 void HGraph::InitializeInferredTypes() {
-  HPhase phase("H_Inferring types", this);
+  HPhase phase("Inferring types", this);
   InitializeInferredTypes(0, this->blocks_.length() - 1);
 }
 
@@ -2029,7 +1851,7 @@
   }
 
   if (new_value == NULL) {
-    new_value = new(zone()) HChange(value, to,
+    new_value = new(zone()) HChange(value, value->representation(), to,
                                     is_truncating, deoptimize_on_undefined);
   }
 
@@ -2064,7 +1886,8 @@
 
 
 void HGraph::InsertRepresentationChanges() {
-  HPhase phase("H_Representation changes", this);
+  HPhase phase("Insert representation changes", this);
+
 
   // Compute truncation flag for phis: Initially assume that all
   // int32-phis allow truncation and iteratively remove the ones that
@@ -2083,9 +1906,13 @@
     for (int i = 0; i < phi_list()->length(); i++) {
       HPhi* phi = phi_list()->at(i);
       if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
-      if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
-        phi->ClearFlag(HValue::kTruncatingToInt32);
-        change = true;
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
+        if (!use->CheckFlag(HValue::kTruncatingToInt32)) {
+          phi->ClearFlag(HValue::kTruncatingToInt32);
+          change = true;
+          break;
+        }
       }
     }
   }
@@ -2120,7 +1947,7 @@
 
 
 void HGraph::MarkDeoptimizeOnUndefined() {
-  HPhase phase("H_MarkDeoptimizeOnUndefined", this);
+  HPhase phase("MarkDeoptimizeOnUndefined", this);
   // Compute DeoptimizeOnUndefined flag for phis.
   // Any phi that can reach a use with DeoptimizeOnUndefined set must
   // have DeoptimizeOnUndefined set.  Currently only HCompareIDAndBranch, with
@@ -2142,7 +1969,7 @@
 
 
 void HGraph::ComputeMinusZeroChecks() {
-  BitVector visited(GetMaximumValueID(), zone());
+  BitVector visited(GetMaximumValueID());
   for (int i = 0; i < blocks_.length(); ++i) {
     for (HInstruction* current = blocks_[i]->first();
          current != NULL;
@@ -2169,13 +1996,11 @@
 // a (possibly inlined) function.
 FunctionState::FunctionState(HGraphBuilder* owner,
                              CompilationInfo* info,
-                             TypeFeedbackOracle* oracle,
-                             ReturnHandlingFlag return_handling)
+                             TypeFeedbackOracle* oracle)
     : owner_(owner),
       compilation_info_(info),
       oracle_(oracle),
       call_context_(NULL),
-      return_handling_(return_handling),
       function_return_(NULL),
       test_context_(NULL),
       outer_(owner->function_state()) {
@@ -2218,7 +2043,6 @@
       for_typeof_(false) {
   owner->set_ast_context(this);  // Push.
 #ifdef DEBUG
-  ASSERT(owner->environment()->frame_type() == JS_FUNCTION);
   original_length_ = owner->environment()->length();
 #endif
 }
@@ -2232,16 +2056,14 @@
 EffectContext::~EffectContext() {
   ASSERT(owner()->HasStackOverflow() ||
          owner()->current_block() == NULL ||
-         (owner()->environment()->length() == original_length_ &&
-          owner()->environment()->frame_type() == JS_FUNCTION));
+         owner()->environment()->length() == original_length_);
 }
 
 
 ValueContext::~ValueContext() {
   ASSERT(owner()->HasStackOverflow() ||
          owner()->current_block() == NULL ||
-         (owner()->environment()->length() == original_length_ + 1 &&
-          owner()->environment()->frame_type() == JS_FUNCTION));
+         owner()->environment()->length() == original_length_ + 1);
 }
 
 
@@ -2268,12 +2090,12 @@
 void EffectContext::ReturnInstruction(HInstruction* instr, int ast_id) {
   ASSERT(!instr->IsControlInstruction());
   owner()->AddInstruction(instr);
-  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void EffectContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasObservableSideEffects());
+  ASSERT(!instr->HasSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
@@ -2291,12 +2113,12 @@
   }
   owner()->AddInstruction(instr);
   owner()->Push(instr);
-  if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+  if (instr->HasSideEffects()) owner()->AddSimulate(ast_id);
 }
 
 
 void ValueContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasObservableSideEffects());
+  ASSERT(!instr->HasSideEffects());
   if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
     return owner()->Bailout("bad value context for arguments object value");
   }
@@ -2321,7 +2143,7 @@
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
   // this one isn't actually needed (and wouldn't work if it were targeted).
-  if (instr->HasObservableSideEffects()) {
+  if (instr->HasSideEffects()) {
     builder->Push(instr);
     builder->AddSimulate(ast_id);
     builder->Pop();
@@ -2331,14 +2153,14 @@
 
 
 void TestContext::ReturnControl(HControlInstruction* instr, int ast_id) {
-  ASSERT(!instr->HasObservableSideEffects());
+  ASSERT(!instr->HasSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
   instr->SetSuccessorAt(1, empty_false);
   owner()->current_block()->Finish(instr);
-  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
-  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
+  empty_true->Goto(if_true());
+  empty_false->Goto(if_false());
   owner()->set_current_block(NULL);
 }
 
@@ -2359,8 +2181,8 @@
   HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
   builder->current_block()->Finish(test);
 
-  empty_true->Goto(if_true(), owner()->function_state()->drop_extra());
-  empty_false->Goto(if_false(), owner()->function_state()->drop_extra());
+  empty_true->Goto(if_true());
+  empty_false->Goto(if_false());
   builder->set_current_block(NULL);
 }
 
@@ -2446,7 +2268,7 @@
   if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
 
   {
-    HPhase phase("H_Block building");
+    HPhase phase("Block building");
     current_block_ = graph()->entry_block();
 
     Scope* scope = info()->scope();
@@ -2454,11 +2276,7 @@
       Bailout("function with illegal redeclaration");
       return NULL;
     }
-    if (scope->calls_eval()) {
-      Bailout("function calls eval");
-      return NULL;
-    }
-    SetUpScope(scope);
+    SetupScope(scope);
 
     // Add an edge to the body entry.  This is warty: the graph's start
     // environment will be used by the Lithium translation as the initial
@@ -2484,7 +2302,7 @@
     // Handle implicit declaration of the function name in named function
     // expressions before other declarations.
     if (scope->is_function_scope() && scope->function() != NULL) {
-      HandleDeclaration(scope->function(), CONST, NULL, NULL);
+      HandleDeclaration(scope->function(), Variable::CONST, NULL);
     }
     VisitDeclarations(scope->declarations());
     AddSimulate(AstNode::kDeclarationsId);
@@ -2505,31 +2323,16 @@
 
   graph()->OrderBlocks();
   graph()->AssignDominators();
-
-#ifdef DEBUG
-  // Do a full verify after building the graph and computing dominators.
-  graph()->Verify(true);
-#endif
-
   graph()->PropagateDeoptimizingMark();
-  if (!graph()->CheckConstPhiUses()) {
-    Bailout("Unsupported phi use of const variable");
-    return NULL;
-  }
   graph()->EliminateRedundantPhis();
-  if (!graph()->CheckArgumentsPhiUses()) {
-    Bailout("Unsupported phi use of arguments");
+  if (!graph()->CheckPhis()) {
+    Bailout("Unsupported phi use of arguments object");
     return NULL;
   }
   if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
-  graph()->CollectPhis();
-
-  if (graph()->has_osr_loop_entry()) {
-    const ZoneList<HPhi*>* phis = graph()->osr_loop_entry()->phis();
-    for (int j = 0; j < phis->length(); j++) {
-      HPhi* phi = phis->at(j);
-      graph()->osr_values()->at(phi->merged_index())->set_incoming_value(phi);
-    }
+  if (!graph()->CollectPhis()) {
+    Bailout("Unsupported phi use of uninitialized constant");
+    return NULL;
   }
 
   HInferRepresentation rep(graph());
@@ -2543,16 +2346,9 @@
 
   // Perform common subexpression elimination and loop-invariant code motion.
   if (FLAG_use_gvn) {
-    HPhase phase("H_Global value numbering", graph());
+    HPhase phase("Global value numbering", graph());
     HGlobalValueNumberer gvn(graph(), info());
-    bool removed_side_effects = gvn.Analyze();
-    // Trigger a second analysis pass to further eliminate duplicate values that
-    // could only be discovered by removing side-effect-generating instructions
-    // during the first pass.
-    if (FLAG_smi_only_arrays && removed_side_effects) {
-      removed_side_effects = gvn.Analyze();
-      ASSERT(!removed_side_effects);
-    }
+    gvn.Analyze();
   }
 
   if (FLAG_use_range) {
@@ -2576,7 +2372,7 @@
 
 
 void HGraph::ReplaceCheckedValues() {
-  HPhase phase("H_Replace checked values", this);
+  HPhase phase("Replace checked values", this);
   for (int i = 0; i < blocks()->length(); ++i) {
     HInstruction* instr = blocks()->at(i)->first();
     while (instr != NULL) {
@@ -2616,8 +2412,8 @@
 }
 
 
-template <class Instruction>
-HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
+template <int V>
+HInstruction* HGraphBuilder::PreProcessCall(HCall<V>* call) {
   int count = call->argument_count();
   ZoneList<HValue*> arguments(count);
   for (int i = 0; i < count; ++i) {
@@ -2631,16 +2427,12 @@
 }
 
 
-void HGraphBuilder::SetUpScope(Scope* scope) {
+void HGraphBuilder::SetupScope(Scope* scope) {
   HConstant* undefined_constant = new(zone()) HConstant(
       isolate()->factory()->undefined_value(), Representation::Tagged());
   AddInstruction(undefined_constant);
   graph_->set_undefined_constant(undefined_constant);
 
-  HArgumentsObject* object = new(zone()) HArgumentsObject;
-  AddInstruction(object);
-  graph()->SetArgumentsObject(object);
-
   // Set the initial values of parameters including "this".  "This" has
   // parameter index 0.
   ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
@@ -2667,9 +2459,10 @@
     if (!scope->arguments()->IsStackAllocated()) {
       return Bailout("context-allocated arguments");
     }
-
-    environment()->Bind(scope->arguments(),
-                        graph()->GetArgumentsObject());
+    HArgumentsObject* object = new(zone()) HArgumentsObject;
+    AddInstruction(object);
+    graph()->SetArgumentsObject(object);
+    environment()->Bind(scope->arguments(), object);
   }
 }
 
@@ -2773,20 +2566,12 @@
 
 HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
     BreakableStatement* stmt,
-    BreakType type,
-    int* drop_extra) {
-  *drop_extra = 0;
+    BreakType type) {
   BreakAndContinueScope* current = this;
   while (current != NULL && current->info()->target() != stmt) {
-    *drop_extra += current->info()->drop_extra();
     current = current->next();
   }
   ASSERT(current != NULL);  // Always found (unless stack is malformed).
-
-  if (type == BREAK) {
-    *drop_extra += current->info()->drop_extra();
-  }
-
   HBasicBlock* block = NULL;
   switch (type) {
     case BREAK:
@@ -2814,11 +2599,7 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  int drop_extra = 0;
-  HBasicBlock* continue_block = break_scope()->Get(stmt->target(),
-                                                   CONTINUE,
-                                                   &drop_extra);
-  Drop(drop_extra);
+  HBasicBlock* continue_block = break_scope()->Get(stmt->target(), CONTINUE);
   current_block()->Goto(continue_block);
   set_current_block(NULL);
 }
@@ -2828,11 +2609,7 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  int drop_extra = 0;
-  HBasicBlock* break_block = break_scope()->Get(stmt->target(),
-                                                BREAK,
-                                                &drop_extra);
-  Drop(drop_extra);
+  HBasicBlock* break_block = break_scope()->Get(stmt->target(), BREAK);
   current_block()->Goto(break_block);
   set_current_block(NULL);
 }
@@ -2848,38 +2625,7 @@
     CHECK_ALIVE(VisitForValue(stmt->expression()));
     HValue* result = environment()->Pop();
     current_block()->FinishExit(new(zone()) HReturn(result));
-  } else if (function_state()->is_construct()) {
-    // Return from an inlined construct call.  In a test context the return
-    // value will always evaluate to true, in a value context the return value
-    // needs to be a JSObject.
-    if (context->IsTest()) {
-      TestContext* test = TestContext::cast(context);
-      CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(test->if_true(), function_state()->drop_extra());
-    } else if (context->IsEffect()) {
-      CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(function_return(), function_state()->drop_extra());
-    } else {
-      ASSERT(context->IsValue());
-      CHECK_ALIVE(VisitForValue(stmt->expression()));
-      HValue* return_value = Pop();
-      HValue* receiver = environment()->Lookup(0);
-      HHasInstanceTypeAndBranch* typecheck =
-          new(zone()) HHasInstanceTypeAndBranch(return_value,
-                                                FIRST_SPEC_OBJECT_TYPE,
-                                                LAST_SPEC_OBJECT_TYPE);
-      HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
-      HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
-      typecheck->SetSuccessorAt(0, if_spec_object);
-      typecheck->SetSuccessorAt(1, not_spec_object);
-      current_block()->Finish(typecheck);
-      if_spec_object->AddLeaveInlined(return_value,
-                                      function_return(),
-                                      function_state()->drop_extra());
-      not_spec_object->AddLeaveInlined(receiver,
-                                       function_return(),
-                                       function_state()->drop_extra());
-    }
+    set_current_block(NULL);
   } else {
     // Return from an inlined function, visit the subexpression in the
     // expression context of the call.
@@ -2890,17 +2636,15 @@
                       test->if_false());
     } else if (context->IsEffect()) {
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
-      current_block()->Goto(function_return(), function_state()->drop_extra());
+      current_block()->Goto(function_return());
     } else {
       ASSERT(context->IsValue());
       CHECK_ALIVE(VisitForValue(stmt->expression()));
-      HValue* return_value = Pop();
-      current_block()->AddLeaveInlined(return_value,
-                                       function_return(),
-                                       function_state()->drop_extra());
+      HValue* return_value = environment()->Pop();
+      current_block()->AddLeaveInlined(return_value, function_return());
     }
+    set_current_block(NULL);
   }
-  set_current_block(NULL);
 }
 
 
@@ -2925,98 +2669,43 @@
     return Bailout("SwitchStatement: too many clauses");
   }
 
-  HValue* context = environment()->LookupContext();
-
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   AddSimulate(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
 
-  SwitchType switch_type = UNKNOWN_SWITCH;
-
-  // 1. Extract clause type
+  // 1. Build all the tests, with dangling true branches.  Unconditionally
+  // deoptimize if we encounter a non-smi comparison.
   for (int i = 0; i < clause_count; ++i) {
     CaseClause* clause = clauses->at(i);
     if (clause->is_default()) continue;
-
-    if (switch_type == UNKNOWN_SWITCH) {
-      if (clause->label()->IsSmiLiteral()) {
-        switch_type = SMI_SWITCH;
-      } else if (clause->label()->IsStringLiteral()) {
-        switch_type = STRING_SWITCH;
-      } else {
-        return Bailout("SwitchStatement: non-literal switch label");
-      }
-    } else if ((switch_type == STRING_SWITCH &&
-                !clause->label()->IsStringLiteral()) ||
-               (switch_type == SMI_SWITCH &&
-                !clause->label()->IsSmiLiteral())) {
-      return Bailout("SwitchStatemnt: mixed label types are not supported");
-    }
-  }
-
-  HUnaryControlInstruction* string_check = NULL;
-  HBasicBlock* not_string_block = NULL;
-
-  // Test switch's tag value if all clauses are string literals
-  if (switch_type == STRING_SWITCH) {
-    string_check = new(zone()) HIsStringAndBranch(tag_value);
-    first_test_block = graph()->CreateBasicBlock();
-    not_string_block = graph()->CreateBasicBlock();
-
-    string_check->SetSuccessorAt(0, first_test_block);
-    string_check->SetSuccessorAt(1, not_string_block);
-    current_block()->Finish(string_check);
-
-    set_current_block(first_test_block);
-  }
-
-  // 2. Build all the tests, with dangling true branches
-  int default_id = AstNode::kNoNumber;
-  for (int i = 0; i < clause_count; ++i) {
-    CaseClause* clause = clauses->at(i);
-    if (clause->is_default()) {
-      default_id = clause->EntryId();
-      continue;
-    }
-    if (switch_type == SMI_SWITCH) {
-      clause->RecordTypeFeedback(oracle());
+    if (!clause->label()->IsSmiLiteral()) {
+      return Bailout("SwitchStatement: non-literal switch label");
     }
 
-    // Generate a compare and branch.
+    // Unconditionally deoptimize on the first non-smi compare.
+    clause->RecordTypeFeedback(oracle());
+    if (!clause->IsSmiCompare()) {
+      // Finish with deoptimize and add uses of enviroment values to
+      // account for invisible uses.
+      current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+      set_current_block(NULL);
+      break;
+    }
+
+    // Otherwise generate a compare and branch.
     CHECK_ALIVE(VisitForValue(clause->label()));
     HValue* label_value = Pop();
-
-    HBasicBlock* next_test_block = graph()->CreateBasicBlock();
+    HCompareIDAndBranch* compare =
+        new(zone()) HCompareIDAndBranch(tag_value,
+                                        label_value,
+                                        Token::EQ_STRICT);
+    compare->SetInputRepresentation(Representation::Integer32());
     HBasicBlock* body_block = graph()->CreateBasicBlock();
-
-    HControlInstruction* compare;
-
-    if (switch_type == SMI_SWITCH) {
-      if (!clause->IsSmiCompare()) {
-        // Finish with deoptimize and add uses of enviroment values to
-        // account for invisible uses.
-        current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
-        set_current_block(NULL);
-        break;
-      }
-
-      HCompareIDAndBranch* compare_ =
-          new(zone()) HCompareIDAndBranch(tag_value,
-                                          label_value,
-                                          Token::EQ_STRICT);
-      compare_->SetInputRepresentation(Representation::Integer32());
-      compare = compare_;
-    } else {
-      compare = new(zone()) HStringCompareAndBranch(context, tag_value,
-                                                     label_value,
-                                                     Token::EQ_STRICT);
-    }
-
+    HBasicBlock* next_test_block = graph()->CreateBasicBlock();
     compare->SetSuccessorAt(0, body_block);
     compare->SetSuccessorAt(1, next_test_block);
     current_block()->Finish(compare);
-
     set_current_block(next_test_block);
   }
 
@@ -3024,18 +2713,10 @@
   // exit.  This block is NULL if we deoptimized.
   HBasicBlock* last_block = current_block();
 
-  if (not_string_block != NULL) {
-    int join_id = (default_id != AstNode::kNoNumber)
-        ? default_id
-        : stmt->ExitId();
-    last_block = CreateJoin(last_block, not_string_block, join_id);
-  }
-
-  // 3. Loop over the clauses and the linked list of tests in lockstep,
+  // 2. Loop over the clauses and the linked list of tests in lockstep,
   // translating the clause bodies.
   HBasicBlock* curr_test_block = first_test_block;
   HBasicBlock* fall_through_block = NULL;
-
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
     for (int i = 0; i < clause_count; ++i) {
@@ -3104,8 +2785,8 @@
 }
 
 
-bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
-  if (!HasOsrEntryAt(statement)) return false;
+void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+  if (!HasOsrEntryAt(statement)) return;
 
   HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
   HBasicBlock* osr_entry = graph()->CreateBasicBlock();
@@ -3118,30 +2799,15 @@
 
   set_current_block(osr_entry);
   int osr_entry_id = statement->OsrEntryId();
-  int first_expression_index = environment()->first_expression_index();
-  int length = environment()->length();
-  ZoneList<HUnknownOSRValue*>* osr_values =
-      new(zone()) ZoneList<HUnknownOSRValue*>(length);
-
-  for (int i = 0; i < first_expression_index; ++i) {
+  // We want the correct environment at the OsrEntry instruction.  Build
+  // it explicitly.  The expression stack should be empty.
+  ASSERT(environment()->ExpressionStackIsEmpty());
+  for (int i = 0; i < environment()->length(); ++i) {
     HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
     AddInstruction(osr_value);
     environment()->Bind(i, osr_value);
-    osr_values->Add(osr_value);
   }
 
-  if (first_expression_index != length) {
-    environment()->Drop(length - first_expression_index);
-    for (int i = first_expression_index; i < length; ++i) {
-      HUnknownOSRValue* osr_value = new(zone()) HUnknownOSRValue;
-      AddInstruction(osr_value);
-      environment()->Push(osr_value);
-      osr_values->Add(osr_value);
-    }
-  }
-
-  graph()->set_osr_values(osr_values);
-
   AddSimulate(osr_entry_id);
   AddInstruction(new(zone()) HOsrEntry(osr_entry_id));
   HContext* context = new(zone()) HContext;
@@ -3150,7 +2816,6 @@
   current_block()->Goto(loop_predecessor);
   loop_predecessor->SetJoinId(statement->EntryId());
   set_current_block(loop_predecessor);
-  return true;
 }
 
 
@@ -3174,11 +2839,10 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
-  bool osr_entry = PreProcessOsrEntry(stmt);
+  PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
   current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
-  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
 
   BreakAndContinueInfo break_info(stmt);
   CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
@@ -3217,12 +2881,10 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   ASSERT(current_block() != NULL);
-  bool osr_entry = PreProcessOsrEntry(stmt);
+  PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
   current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
-  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
 
   // If the condition is constant true, do not generate a branch.
   HBasicBlock* loop_successor = NULL;
@@ -3243,6 +2905,7 @@
 
   BreakAndContinueInfo break_info(stmt);
   if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
     CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
   }
   HBasicBlock* body_exit =
@@ -3264,11 +2927,10 @@
     CHECK_ALIVE(Visit(stmt->init()));
   }
   ASSERT(current_block() != NULL);
-  bool osr_entry = PreProcessOsrEntry(stmt);
+  PreProcessOsrEntry(stmt);
   HBasicBlock* loop_entry = CreateLoopHeaderBlock();
   current_block()->Goto(loop_entry);
   set_current_block(loop_entry);
-  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
 
   HBasicBlock* loop_successor = NULL;
   if (stmt->cond() != NULL) {
@@ -3288,6 +2950,7 @@
 
   BreakAndContinueInfo break_info(stmt);
   if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
     CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
   }
   HBasicBlock* body_exit =
@@ -3312,119 +2975,7 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-
-  if (!FLAG_optimize_for_in) {
-    return Bailout("ForInStatement optimization is disabled");
-  }
-
-  if (!oracle()->IsForInFastCase(stmt)) {
-    return Bailout("ForInStatement is not fast case");
-  }
-
-  if (!stmt->each()->IsVariableProxy() ||
-      !stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
-    return Bailout("ForInStatement with non-local each variable");
-  }
-
-  Variable* each_var = stmt->each()->AsVariableProxy()->var();
-
-  CHECK_ALIVE(VisitForValue(stmt->enumerable()));
-  HValue* enumerable = Top();  // Leave enumerable at the top.
-
-  HInstruction* map = AddInstruction(new(zone()) HForInPrepareMap(
-      environment()->LookupContext(), enumerable));
-  AddSimulate(stmt->PrepareId());
-
-  HInstruction* array = AddInstruction(
-      new(zone()) HForInCacheArray(
-          enumerable,
-          map,
-          DescriptorArray::kEnumCacheBridgeCacheIndex));
-
-  HInstruction* array_length = AddInstruction(
-      new(zone()) HFixedArrayBaseLength(array));
-
-  HInstruction* start_index = AddInstruction(new(zone()) HConstant(
-      Handle<Object>(Smi::FromInt(0)), Representation::Integer32()));
-
-  Push(map);
-  Push(array);
-  Push(array_length);
-  Push(start_index);
-
-  HInstruction* index_cache = AddInstruction(
-      new(zone()) HForInCacheArray(
-          enumerable,
-          map,
-          DescriptorArray::kEnumCacheBridgeIndicesCacheIndex));
-  HForInCacheArray::cast(array)->set_index_cache(
-      HForInCacheArray::cast(index_cache));
-
-  bool osr_entry = PreProcessOsrEntry(stmt);
-  HBasicBlock* loop_entry = CreateLoopHeaderBlock();
-  current_block()->Goto(loop_entry);
-  set_current_block(loop_entry);
-  if (osr_entry) graph()->set_osr_loop_entry(loop_entry);
-
-  HValue* index = environment()->ExpressionStackAt(0);
-  HValue* limit = environment()->ExpressionStackAt(1);
-
-  // Check that we still have more keys.
-  HCompareIDAndBranch* compare_index =
-      new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
-  compare_index->SetInputRepresentation(Representation::Integer32());
-
-  HBasicBlock* loop_body = graph()->CreateBasicBlock();
-  HBasicBlock* loop_successor = graph()->CreateBasicBlock();
-
-  compare_index->SetSuccessorAt(0, loop_body);
-  compare_index->SetSuccessorAt(1, loop_successor);
-  current_block()->Finish(compare_index);
-
-  set_current_block(loop_successor);
-  Drop(5);
-
-  set_current_block(loop_body);
-
-  HValue* key = AddInstruction(
-      new(zone()) HLoadKeyedFastElement(
-          environment()->ExpressionStackAt(2),  // Enum cache.
-          environment()->ExpressionStackAt(0),  // Iteration index.
-          HLoadKeyedFastElement::OMIT_HOLE_CHECK));
-
-  // Check if the expected map still matches that of the enumerable.
-  // If not just deoptimize.
-  AddInstruction(new(zone()) HCheckMapValue(
-      environment()->ExpressionStackAt(4),
-      environment()->ExpressionStackAt(3)));
-
-  Bind(each_var, key);
-
-  BreakAndContinueInfo break_info(stmt, 5);
-  CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info));
-
-  HBasicBlock* body_exit =
-      JoinContinue(stmt, current_block(), break_info.continue_block());
-
-  if (body_exit != NULL) {
-    set_current_block(body_exit);
-
-    HValue* current_index = Pop();
-    HInstruction* new_index = new(zone()) HAdd(environment()->LookupContext(),
-                                               current_index,
-                                               graph()->GetConstant1());
-    new_index->AssumeRepresentation(Representation::Integer32());
-    PushAndAdd(new_index);
-    body_exit = current_block();
-  }
-
-  HBasicBlock* loop_exit = CreateLoop(stmt,
-                                      loop_entry,
-                                      body_exit,
-                                      loop_successor,
-                                      break_info.break_block());
-
-  set_current_block(loop_exit);
+  return Bailout("ForInStatement");
 }
 
 
@@ -3545,7 +3096,7 @@
   }
   Handle<GlobalObject> global(info()->global_object());
   global->Lookup(*var->name(), lookup);
-  if (!lookup->IsFound() ||
+  if (!lookup->IsProperty() ||
       lookup->type() != NORMAL ||
       (is_store && lookup->IsReadOnly()) ||
       lookup->holder() != *global) {
@@ -3574,22 +3125,12 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   Variable* variable = expr->var();
+  if (variable->mode() == Variable::LET) {
+    return Bailout("reference to let variable");
+  }
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
-      if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
-        return Bailout("reference to global harmony declared variable");
-      }
-      // Handle known global constants like 'undefined' specially to avoid a
-      // load from a global cell for them.
-      Handle<Object> constant_value =
-          isolate()->factory()->GlobalConstantFor(variable->name());
-      if (!constant_value.is_null()) {
-        HConstant* instr =
-            new(zone()) HConstant(constant_value, Representation::Tagged());
-        return ast_context()->ReturnInstruction(instr, expr->id());
-      }
-
-      LookupResult lookup(isolate());
+      LookupResult lookup;
       GlobalPropertyAccess type =
           LookupGlobalProperty(variable, &lookup, false);
 
@@ -3601,8 +3142,8 @@
       if (type == kUseCell) {
         Handle<GlobalObject> global(info()->global_object());
         Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-        HLoadGlobalCell* instr =
-            new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
+        bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+        HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
         return ast_context()->ReturnInstruction(instr, expr->id());
       } else {
         HValue* context = environment()->LookupContext();
@@ -3621,18 +3162,20 @@
     case Variable::PARAMETER:
     case Variable::LOCAL: {
       HValue* value = environment()->Lookup(variable);
-      if (value == graph()->GetConstantHole()) {
-        ASSERT(variable->mode() == CONST ||
-               variable->mode() == CONST_HARMONY ||
-               variable->mode() == LET);
-        return Bailout("reference to uninitialized variable");
+      if (variable->mode() == Variable::CONST &&
+          value == graph()->GetConstantHole()) {
+        return Bailout("reference to uninitialized const variable");
       }
       return ast_context()->ReturnValue(value);
     }
 
     case Variable::CONTEXT: {
+      if (variable->mode() == Variable::CONST) {
+        return Bailout("reference to const context slot");
+      }
       HValue* context = BuildContextChainWalk(variable);
-      HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
+      HLoadContextSlot* instr =
+          new(zone()) HLoadContextSlot(context, variable->index());
       return ast_context()->ReturnInstruction(instr, expr->id());
     }
 
@@ -3666,99 +3209,18 @@
 }
 
 
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
-static bool IsFastLiteral(Handle<JSObject> boilerplate,
-                          int max_depth,
-                          int* max_properties,
-                          int* total_size) {
-  ASSERT(max_depth >= 0 && *max_properties >= 0);
-  if (max_depth == 0) return false;
-
-  Handle<FixedArrayBase> elements(boilerplate->elements());
-  if (elements->length() > 0 &&
-      elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
-    if (boilerplate->HasFastDoubleElements()) {
-      *total_size += FixedDoubleArray::SizeFor(elements->length());
-    } else if (boilerplate->HasFastElements()) {
-      int length = elements->length();
-      for (int i = 0; i < length; i++) {
-        if ((*max_properties)-- == 0) return false;
-        Handle<Object> value = JSObject::GetElement(boilerplate, i);
-        if (value->IsJSObject()) {
-          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-          if (!IsFastLiteral(value_object,
-                             max_depth - 1,
-                             max_properties,
-                             total_size)) {
-            return false;
-          }
-        }
-      }
-      *total_size += FixedArray::SizeFor(length);
-    } else {
-      return false;
-    }
-  }
-
-  Handle<FixedArray> properties(boilerplate->properties());
-  if (properties->length() > 0) {
-    return false;
-  } else {
-    int nof = boilerplate->map()->inobject_properties();
-    for (int i = 0; i < nof; i++) {
-      if ((*max_properties)-- == 0) return false;
-      Handle<Object> value(boilerplate->InObjectPropertyAt(i));
-      if (value->IsJSObject()) {
-        Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-        if (!IsFastLiteral(value_object,
-                           max_depth - 1,
-                           max_properties,
-                           total_size)) {
-          return false;
-        }
-      }
-    }
-  }
-
-  *total_size += boilerplate->map()->instance_size();
-  return true;
-}
-
-
 void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  Handle<JSFunction> closure = function_state()->compilation_info()->closure();
   HValue* context = environment()->LookupContext();
-  HInstruction* literal;
-
-  // Check whether to use fast or slow deep-copying for boilerplate.
-  int total_size = 0;
-  int max_properties = HFastLiteral::kMaxLiteralProperties;
-  Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()));
-  if (boilerplate->IsJSObject() &&
-      IsFastLiteral(Handle<JSObject>::cast(boilerplate),
-                    HFastLiteral::kMaxLiteralDepth,
-                    &max_properties,
-                    &total_size)) {
-    Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
-    literal = new(zone()) HFastLiteral(context,
-                                       boilerplate_object,
-                                       total_size,
-                                       expr->literal_index(),
-                                       expr->depth());
-  } else {
-    literal = new(zone()) HObjectLiteral(context,
-                                         expr->constant_properties(),
-                                         expr->fast_elements(),
-                                         expr->literal_index(),
-                                         expr->depth(),
-                                         expr->has_function());
-  }
-
+  HObjectLiteral* literal =
+      new(zone()) HObjectLiteral(context,
+                                 expr->constant_properties(),
+                                 expr->fast_elements(),
+                                 expr->literal_index(),
+                                 expr->depth(),
+                                 expr->has_function());
   // The object is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -3779,12 +3241,18 @@
       case ObjectLiteral::Property::COMPUTED:
         if (key->handle()->IsSymbol()) {
           if (property->emit_store()) {
-            property->RecordTypeFeedback(oracle());
             CHECK_ALIVE(VisitForValue(value));
             HValue* value = Pop();
-            HInstruction* store = BuildStoreNamed(literal, value, property);
+            Handle<String> name = Handle<String>::cast(key->handle());
+            HStoreNamedGeneric* store =
+                new(zone()) HStoreNamedGeneric(
+                                context,
+                                literal,
+                                name,
+                                value,
+                                function_strict_mode());
             AddInstruction(store);
-            if (store->HasObservableSideEffects()) AddSimulate(key->id());
+            AddSimulate(key->id());
           } else {
             CHECK_ALIVE(VisitForEffect(value));
           }
@@ -3821,48 +3289,12 @@
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
   HValue* context = environment()->LookupContext();
-  HInstruction* literal;
 
-  Handle<FixedArray> literals(environment()->closure()->literals());
-  Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
-
-  if (raw_boilerplate->IsUndefined()) {
-    raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
-        isolate(), literals, expr->constant_elements());
-    if (raw_boilerplate.is_null()) {
-      return Bailout("array boilerplate creation failed");
-    }
-    literals->set(expr->literal_index(), *raw_boilerplate);
-    if (JSObject::cast(*raw_boilerplate)->elements()->map() ==
-        isolate()->heap()->fixed_cow_array_map()) {
-      isolate()->counters()->cow_arrays_created_runtime()->Increment();
-    }
-  }
-
-  Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
-  ElementsKind boilerplate_elements_kind =
-        Handle<JSObject>::cast(boilerplate)->GetElementsKind();
-
-  // Check whether to use fast or slow deep-copying for boilerplate.
-  int total_size = 0;
-  int max_properties = HFastLiteral::kMaxLiteralProperties;
-  if (IsFastLiteral(boilerplate,
-                    HFastLiteral::kMaxLiteralDepth,
-                    &max_properties,
-                    &total_size)) {
-    literal = new(zone()) HFastLiteral(context,
-                                       boilerplate,
-                                       total_size,
-                                       expr->literal_index(),
-                                       expr->depth());
-  } else {
-    literal = new(zone()) HArrayLiteral(context,
-                                        boilerplate,
-                                        length,
-                                        expr->literal_index(),
-                                        expr->depth());
-  }
-
+  HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
+                                                     expr->constant_elements(),
+                                                     length,
+                                                     expr->literal_index(),
+                                                     expr->depth());
   // The array is expected in the bailout environment during computation
   // of the property values and is the value of the entire expression.
   PushAndAdd(literal);
@@ -3879,35 +3311,16 @@
     HValue* value = Pop();
     if (!Smi::IsValid(i)) return Bailout("Non-smi key in array literal");
 
-    elements = new(zone()) HLoadElements(literal);
-    AddInstruction(elements);
+    // Load the elements array before the first store.
+    if (elements == NULL)  {
+      elements = new(zone()) HLoadElements(literal);
+      AddInstruction(elements);
+    }
 
     HValue* key = AddInstruction(
         new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
                               Representation::Integer32()));
-
-    switch (boilerplate_elements_kind) {
-      case FAST_SMI_ONLY_ELEMENTS:
-        // Smi-only arrays need a smi check.
-        AddInstruction(new(zone()) HCheckSmi(value));
-        // Fall through.
-      case FAST_ELEMENTS:
-        AddInstruction(new(zone()) HStoreKeyedFastElement(
-            elements,
-            key,
-            value,
-            boilerplate_elements_kind));
-        break;
-      case FAST_DOUBLE_ELEMENTS:
-        AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                                key,
-                                                                value));
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-
+    AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
     AddSimulate(expr->GetIdForElement(i));
   }
   return ast_context()->ReturnValue(Pop());
@@ -3919,7 +3332,7 @@
                                Handle<String> name,
                                LookupResult* lookup) {
   type->LookupInDescriptors(NULL, *name, lookup);
-  if (!lookup->IsFound()) return false;
+  if (!lookup->IsPropertyOrTransition()) return false;
   if (lookup->type() == FIELD) return true;
   return (lookup->type() == MAP_TRANSITION) &&
       (type->unused_property_fields() > 0);
@@ -3947,8 +3360,7 @@
                                                   bool smi_and_map_check) {
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(object));
-    AddInstruction(new(zone()) HCheckMap(object, type, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(new(zone()) HCheckMap(object, type));
   }
 
   int index = ComputeStoredFieldIndex(type, name, lookup);
@@ -3968,7 +3380,7 @@
     instr->set_transition(transition);
     // TODO(fschneider): Record the new map type of the object in the IR to
     // enable elimination of redundant checks after the transition store.
-    instr->SetGVNFlag(kChangesMaps);
+    instr->SetFlag(HValue::kChangesMaps);
   }
   return instr;
 }
@@ -3983,26 +3395,7 @@
                          object,
                          name,
                          value,
-                         function_strict_mode_flag());
-}
-
-
-HInstruction* HGraphBuilder::BuildStoreNamed(HValue* object,
-                                             HValue* value,
-                                             ObjectLiteral::Property* prop) {
-  Literal* key = prop->key()->AsLiteral();
-  Handle<String> name = Handle<String>::cast(key->handle());
-  ASSERT(!name.is_null());
-
-  LookupResult lookup(isolate());
-  Handle<Map> type = prop->GetReceiverType();
-  bool is_monomorphic = prop->IsMonomorphic() &&
-      ComputeStoredField(type, name, &lookup);
-
-  return is_monomorphic
-      ? BuildStoreNamedField(object, name, value, type, &lookup,
-                             true)  // Needs smi and map check.
-      : BuildStoreNamedGeneric(object, name, value);
+                         function_strict_mode());
 }
 
 
@@ -4016,7 +3409,7 @@
   Handle<String> name = Handle<String>::cast(key->handle());
   ASSERT(!name.is_null());
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   SmallMapList* types = expr->GetReceiverTypes();
   bool is_monomorphic = expr->IsMonomorphic() &&
       ComputeStoredField(types->first(), name, &lookup);
@@ -4040,7 +3433,7 @@
   HBasicBlock* join = NULL;
   for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) {
     Handle<Map> map = types->at(i);
-    LookupResult lookup(isolate());
+    LookupResult lookup;
     if (ComputeStoredField(map, name, &lookup)) {
       if (count == 0) {
         AddInstruction(new(zone()) HCheckNonSmi(object));  // Only needed once.
@@ -4083,7 +3476,7 @@
       // The HSimulate for the store should not see the stored value in
       // effect contexts (it is not materialized at expr->id() in the
       // unoptimized code).
-      if (instr->HasObservableSideEffects()) {
+      if (instr->HasSideEffects()) {
         if (ast_context()->IsEffect()) {
           AddSimulate(expr->id());
         } else {
@@ -4123,7 +3516,7 @@
     ASSERT(!name.is_null());
 
     SmallMapList* types = expr->GetReceiverTypes();
-    LookupResult lookup(isolate());
+    LookupResult lookup;
 
     if (expr->IsMonomorphic()) {
       instr = BuildStoreNamed(object, value, expr);
@@ -4156,7 +3549,7 @@
   Push(value);
   instr->set_position(expr->position());
   AddInstruction(instr);
-  if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+  if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
   return ast_context()->ReturnValue(Pop());
 }
 
@@ -4168,16 +3561,16 @@
                                                    HValue* value,
                                                    int position,
                                                    int ast_id) {
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
+    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
     Handle<GlobalObject> global(info()->global_object());
     Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-    HInstruction* instr =
-        new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
+    HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
     instr->set_position(position);
     AddInstruction(instr);
-    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+    if (instr->HasSideEffects()) AddSimulate(ast_id);
   } else {
     HValue* context =  environment()->LookupContext();
     HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -4187,11 +3580,11 @@
                                         global_object,
                                         var->name(),
                                         value,
-                                        function_strict_mode_flag());
+                                        function_strict_mode());
     instr->set_position(position);
     AddInstruction(instr);
-    ASSERT(instr->HasObservableSideEffects());
-    if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+    ASSERT(instr->HasSideEffects());
+    if (instr->HasSideEffects()) AddSimulate(ast_id);
   }
 }
 
@@ -4208,8 +3601,8 @@
 
   if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == LET)  {
-      return Bailout("unsupported let compound assignment");
+    if (var->mode() == Variable::CONST || var->mode() == Variable::LET)  {
+      return Bailout("unsupported let or const compound assignment");
     }
 
     CHECK_ALIVE(VisitForValue(operation));
@@ -4224,9 +3617,6 @@
 
       case Variable::PARAMETER:
       case Variable::LOCAL:
-        if (var->mode() == CONST)  {
-          return Bailout("unsupported const compound assignment");
-        }
         Bind(var, Top());
         break;
 
@@ -4247,29 +3637,11 @@
           }
         }
 
-        HStoreContextSlot::Mode mode;
-
-        switch (var->mode()) {
-          case LET:
-            mode = HStoreContextSlot::kCheckDeoptimize;
-            break;
-          case CONST:
-            return ast_context()->ReturnValue(Pop());
-          case CONST_HARMONY:
-            // This case is checked statically so no need to
-            // perform checks here
-            UNREACHABLE();
-          default:
-            mode = HStoreContextSlot::kNoCheck;
-        }
-
         HValue* context = BuildContextChainWalk(var);
         HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
+            new(zone()) HStoreContextSlot(context, var->index(), Top());
         AddInstruction(instr);
-        if (instr->HasObservableSideEffects()) {
-          AddSimulate(expr->AssignmentId());
-        }
+        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
         break;
       }
 
@@ -4295,7 +3667,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasObservableSideEffects()) AddSimulate(expr->CompoundLoadId());
+      if (load->HasSideEffects()) AddSimulate(expr->CompoundLoadId());
 
       CHECK_ALIVE(VisitForValue(expr->value()));
       HValue* right = Pop();
@@ -4303,14 +3675,14 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+      if (instr->HasSideEffects()) AddSimulate(operation->id());
 
       HInstruction* store = BuildStoreNamed(obj, instr, prop);
       AddInstruction(store);
       // Drop the simulated receiver and value.  Return the value.
       Drop(2);
       Push(instr);
-      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
       return ast_context()->ReturnValue(Pop());
 
     } else {
@@ -4335,7 +3707,7 @@
 
       HInstruction* instr = BuildBinaryOperation(operation, left, right);
       PushAndAdd(instr);
-      if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+      if (instr->HasSideEffects()) AddSimulate(operation->id());
 
       expr->RecordTypeFeedback(oracle());
       HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -4374,23 +3746,19 @@
     HandlePropertyAssignment(expr);
   } else if (proxy != NULL) {
     Variable* var = proxy->var();
-
-    if (var->mode() == CONST) {
+    if (var->mode() == Variable::CONST) {
       if (expr->op() != Token::INIT_CONST) {
-        CHECK_ALIVE(VisitForValue(expr->value()));
-        return ast_context()->ReturnValue(Pop());
-      }
-
-      if (var->IsStackAllocated()) {
-        // We insert a use of the old value to detect unsupported uses of const
-        // variables (e.g. initialization inside a loop).
-        HValue* old_value = environment()->Lookup(var);
-        AddInstruction(new HUseConst(old_value));
-      }
-    } else if (var->mode() == CONST_HARMONY) {
-      if (expr->op() != Token::INIT_CONST_HARMONY) {
         return Bailout("non-initializer assignment to const");
       }
+      if (!var->IsStackAllocated()) {
+        return Bailout("assignment to const context slot");
+      }
+      // We insert a use of the old value to detect unsupported uses of const
+      // variables (e.g. initialization inside a loop).
+      HValue* old_value = environment()->Lookup(var);
+      AddInstruction(new HUseConst(old_value));
+    } else if (var->mode() == Variable::LET) {
+      return Bailout("unsupported assignment to let");
     }
 
     if (proxy->IsArguments()) return Bailout("assignment to arguments");
@@ -4407,14 +3775,6 @@
 
       case Variable::PARAMETER:
       case Variable::LOCAL: {
-        // Perform an initialization check for let declared variables
-        // or parameters.
-        if (var->mode() == LET && expr->op() == Token::ASSIGN) {
-          HValue* env_value = environment()->Lookup(var);
-          if (env_value == graph()->GetConstantHole()) {
-            return Bailout("assignment to let variable before initialization");
-          }
-        }
         // We do not allow the arguments object to occur in a context where it
         // may escape, but assignments to stack-allocated locals are
         // permitted.
@@ -4425,6 +3785,7 @@
       }
 
       case Variable::CONTEXT: {
+        ASSERT(var->mode() != Variable::CONST);
         // Bail out if we try to mutate a parameter value in a function using
         // the arguments object.  We do not (yet) correctly handle the
         // arguments property of the function.
@@ -4440,38 +3801,11 @@
         }
 
         CHECK_ALIVE(VisitForValue(expr->value()));
-        HStoreContextSlot::Mode mode;
-        if (expr->op() == Token::ASSIGN) {
-          switch (var->mode()) {
-            case LET:
-              mode = HStoreContextSlot::kCheckDeoptimize;
-              break;
-            case CONST:
-              return ast_context()->ReturnValue(Pop());
-            case CONST_HARMONY:
-              // This case is checked statically so no need to
-              // perform checks here
-              UNREACHABLE();
-            default:
-              mode = HStoreContextSlot::kNoCheck;
-          }
-        } else if (expr->op() == Token::INIT_VAR ||
-                   expr->op() == Token::INIT_LET ||
-                   expr->op() == Token::INIT_CONST_HARMONY) {
-          mode = HStoreContextSlot::kNoCheck;
-        } else {
-          ASSERT(expr->op() == Token::INIT_CONST);
-
-          mode = HStoreContextSlot::kCheckIgnoreAssignment;
-        }
-
         HValue* context = BuildContextChainWalk(var);
-        HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
-            context, var->index(), mode, Top());
+        HStoreContextSlot* instr =
+            new(zone()) HStoreContextSlot(context, var->index(), Top());
         AddInstruction(instr);
-        if (instr->HasObservableSideEffects()) {
-          AddSimulate(expr->AssignmentId());
-        }
+        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
         return ast_context()->ReturnValue(Pop());
       }
 
@@ -4512,8 +3846,7 @@
                                                     bool smi_and_map_check) {
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(object));
-    AddInstruction(new(zone()) HCheckMap(object, type, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(new(zone()) HCheckMap(object, type));
   }
 
   int index = lookup->GetLocalFieldIndexFromMap(*type);
@@ -4532,10 +3865,6 @@
 
 HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* obj,
                                                    Property* expr) {
-  if (expr->IsUninitialized() && !FLAG_always_opt) {
-    AddInstruction(new(zone()) HSoftDeoptimize);
-    current_block()->MarkAsDeoptimizing();
-  }
   ASSERT(expr->key()->IsPropertyName());
   Handle<Object> name = expr->key()->AsLiteral()->handle();
   HValue* context = environment()->LookupContext();
@@ -4547,18 +3876,17 @@
                                             Property* expr,
                                             Handle<Map> map,
                                             Handle<String> name) {
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   map->LookupInDescriptors(NULL, *name, &lookup);
-  if (lookup.IsFound() && lookup.type() == FIELD) {
+  if (lookup.IsProperty() && lookup.type() == FIELD) {
     return BuildLoadNamedField(obj,
                                expr,
                                map,
                                &lookup,
                                true);
-  } else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
+  } else if (lookup.IsProperty() && lookup.type() == CONSTANT_FUNCTION) {
     AddInstruction(new(zone()) HCheckNonSmi(obj));
-    AddInstruction(new(zone()) HCheckMap(obj, map, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(new(zone()) HCheckMap(obj, map));
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
     return new(zone()) HConstant(function, Representation::Tagged());
   } else {
@@ -4584,7 +3912,9 @@
     ASSERT(val != NULL);
     switch (elements_kind) {
       case EXTERNAL_PIXEL_ELEMENTS: {
-        val = AddInstruction(new(zone()) HClampToUint8(val));
+        HClampToUint8* clamp = new(zone()) HClampToUint8(val);
+        AddInstruction(clamp);
+        val = clamp;
         break;
       }
       case EXTERNAL_BYTE_ELEMENTS:
@@ -4593,19 +3923,14 @@
       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
       case EXTERNAL_INT_ELEMENTS:
       case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
-        if (!val->representation().IsInteger32()) {
-          val = AddInstruction(new(zone()) HChange(
-              val,
-              Representation::Integer32(),
-              true,  // Truncate to int32.
-              false));  // Don't deoptimize undefined (irrelevant here).
-        }
+        HToInt32* floor_val = new(zone()) HToInt32(val);
+        AddInstruction(floor_val);
+        val = floor_val;
         break;
       }
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
         break;
-      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -4616,55 +3941,30 @@
     return new(zone()) HStoreKeyedSpecializedArrayElement(
         external_elements, checked_key, val, elements_kind);
   } else {
-    ASSERT(val == NULL);
     return new(zone()) HLoadKeyedSpecializedArrayElement(
         external_elements, checked_key, elements_kind);
   }
 }
 
 
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
-                                                    HValue* checked_key,
-                                                    HValue* val,
-                                                    ElementsKind elements_kind,
-                                                    bool is_store) {
-  if (is_store) {
-    ASSERT(val != NULL);
-    switch (elements_kind) {
-      case FAST_DOUBLE_ELEMENTS:
-        return new(zone()) HStoreKeyedFastDoubleElement(
-            elements, checked_key, val);
-      case FAST_SMI_ONLY_ELEMENTS:
-        // Smi-only arrays need a smi check.
-        AddInstruction(new(zone()) HCheckSmi(val));
-        // Fall through.
-      case FAST_ELEMENTS:
-        return new(zone()) HStoreKeyedFastElement(
-            elements, checked_key, val, elements_kind);
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-  // It's an element load (!is_store).
-  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-    return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
-  } else {  // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
-    return new(zone()) HLoadKeyedFastElement(elements, checked_key);
-  }
-}
-
-
 HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
                                                            HValue* key,
                                                            HValue* val,
-                                                           Handle<Map> map,
+                                                           Expression* expr,
                                                            bool is_store) {
+  ASSERT(expr->IsMonomorphic());
+  Handle<Map> map = expr->GetMonomorphicReceiverType();
+  if (!map->has_fast_elements() &&
+      !map->has_fast_double_elements() &&
+      !map->has_external_array_elements()) {
+    return is_store ? BuildStoreKeyedGeneric(object, key, val)
+                    : BuildLoadKeyedGeneric(object, key);
+  }
+  AddInstruction(new(zone()) HCheckNonSmi(object));
   HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
-  bool fast_smi_only_elements = map->has_fast_smi_only_elements();
-  bool fast_elements = map->has_fast_elements();
   HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
-  if (is_store && (fast_elements || fast_smi_only_elements)) {
+  bool fast_double_elements = map->has_fast_double_elements();
+  if (is_store && map->has_fast_elements()) {
     AddInstruction(new(zone()) HCheckMap(
         elements, isolate()->factory()->fixed_array_map()));
   }
@@ -4679,17 +3979,28 @@
     return BuildExternalArrayElementAccess(external_elements, checked_key,
                                            val, map->elements_kind(), is_store);
   }
-  ASSERT(fast_smi_only_elements ||
-         fast_elements ||
-         map->has_fast_double_elements());
+  ASSERT(map->has_fast_elements() || fast_double_elements);
   if (map->instance_type() == JS_ARRAY_TYPE) {
     length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
   } else {
     length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
   }
   checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-  return BuildFastElementAccess(elements, checked_key, val,
-                                map->elements_kind(), is_store);
+  if (is_store) {
+    if (fast_double_elements) {
+      return new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                      checked_key,
+                                                      val);
+    } else {
+      return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
+    }
+  } else {
+    if (fast_double_elements) {
+      return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+    } else {
+      return new(zone()) HLoadKeyedFastElement(elements, checked_key);
+    }
+  }
 }
 
 
@@ -4703,6 +4014,7 @@
                                                       bool* has_side_effects) {
   *has_side_effects = false;
   AddInstruction(new(zone()) HCheckNonSmi(object));
+  AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
   SmallMapList* maps = prop->GetReceiverTypes();
   bool todo_external_array = false;
 
@@ -4712,61 +4024,15 @@
     type_todo[i] = false;
   }
 
-  // Elements_kind transition support.
-  MapHandleList transition_target(maps->length());
-  // Collect possible transition targets.
-  MapHandleList possible_transitioned_maps(maps->length());
   for (int i = 0; i < maps->length(); ++i) {
-    Handle<Map> map = maps->at(i);
-    ElementsKind elements_kind = map->elements_kind();
-    if (elements_kind == FAST_DOUBLE_ELEMENTS ||
-        elements_kind == FAST_ELEMENTS) {
-      possible_transitioned_maps.Add(map);
-    }
-  }
-  // Get transition target for each map (NULL == no transition).
-  for (int i = 0; i < maps->length(); ++i) {
-    Handle<Map> map = maps->at(i);
-    Handle<Map> transitioned_map =
-        map->FindTransitionedMap(&possible_transitioned_maps);
-    transition_target.Add(transitioned_map);
-  }
-
-  int num_untransitionable_maps = 0;
-  Handle<Map> untransitionable_map;
-  for (int i = 0; i < maps->length(); ++i) {
-    Handle<Map> map = maps->at(i);
-    ASSERT(map->IsMap());
-    if (!transition_target.at(i).is_null()) {
-      AddInstruction(new(zone()) HTransitionElementsKind(
-          object, map, transition_target.at(i)));
-    } else {
-      type_todo[map->elements_kind()] = true;
-      if (map->elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
-        todo_external_array = true;
-      }
-      num_untransitionable_maps++;
-      untransitionable_map = map;
+    ASSERT(maps->at(i)->IsMap());
+    type_todo[maps->at(i)->elements_kind()] = true;
+    if (maps->at(i)->elements_kind()
+        >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND) {
+      todo_external_array = true;
     }
   }
 
-  // If only one map is left after transitioning, handle this case
-  // monomorphically.
-  if (num_untransitionable_maps == 1) {
-    HInstruction* instr = NULL;
-    if (untransitionable_map->has_slow_elements_kind()) {
-      instr = AddInstruction(is_store ? BuildStoreKeyedGeneric(object, key, val)
-                                      : BuildLoadKeyedGeneric(object, key));
-    } else {
-      instr = AddInstruction(BuildMonomorphicElementAccess(
-          object, key, val, untransitionable_map, is_store));
-    }
-    *has_side_effects |= instr->HasObservableSideEffects();
-    instr->set_position(position);
-    return is_store ? NULL : instr;
-  }
-
-  AddInstruction(HCheckInstanceType::NewIsSpecObject(object));
   HBasicBlock* join = graph()->CreateBasicBlock();
 
   HInstruction* elements_kind_instr =
@@ -4776,20 +4042,14 @@
   HLoadExternalArrayPointer* external_elements = NULL;
   HInstruction* checked_key = NULL;
 
-  // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
-  // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
-  // arrays.
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
-  STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
-  STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
-  STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  // FAST_ELEMENTS is assumed to be the first case.
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
 
-  for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
+  for (ElementsKind elements_kind = FAST_ELEMENTS;
        elements_kind <= LAST_ELEMENTS_KIND;
        elements_kind = ElementsKind(elements_kind + 1)) {
-    // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
-    // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
-    // that's executed for all external array cases.
+    // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
+    // need to add some code that's executed for all external array cases.
     STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
                   LAST_ELEMENTS_KIND);
     if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -4811,22 +4071,15 @@
 
       set_current_block(if_true);
       HInstruction* access;
-      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-          elements_kind == FAST_ELEMENTS ||
+      if (elements_kind == FAST_ELEMENTS ||
           elements_kind == FAST_DOUBLE_ELEMENTS) {
-        if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
+        bool fast_double_elements =
+            elements_kind == FAST_DOUBLE_ELEMENTS;
+        if (is_store && elements_kind == FAST_ELEMENTS) {
           AddInstruction(new(zone()) HCheckMap(
               elements, isolate()->factory()->fixed_array_map(),
               elements_kind_branch));
         }
-        // TODO(jkummerow): The need for these two blocks could be avoided
-        // in one of two ways:
-        // (1) Introduce ElementsKinds for JSArrays that are distinct from
-        //     those for fast objects.
-        // (2) Put the common instructions into a third "join" block. This
-        //     requires additional AST IDs that we can deopt to from inside
-        //     that join block. They must be added to the Property class (when
-        //     it's a keyed property) and registered in the full codegen.
         HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
         HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
         HHasInstanceTypeAndBranch* typecheck =
@@ -4836,16 +4089,30 @@
         current_block()->Finish(typecheck);
 
         set_current_block(if_jsarray);
-        HInstruction* length;
-        length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
+        HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
+        AddInstruction(length);
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        access = AddInstruction(BuildFastElementAccess(
-            elements, checked_key, val, elements_kind, is_store));
-        if (!is_store) {
+        if (is_store) {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                         checked_key,
+                                                         val));
+          } else {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+          }
+        } else {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+          } else {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastElement(elements, checked_key));
+          }
           Push(access);
         }
-
-        *has_side_effects |= access->HasObservableSideEffects();
+        *has_side_effects |= access->HasSideEffects();
         if (position != -1) {
           access->set_position(position);
         }
@@ -4854,8 +4121,25 @@
         set_current_block(if_fastobject);
         length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        access = AddInstruction(BuildFastElementAccess(
-            elements, checked_key, val, elements_kind, is_store));
+        if (is_store) {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastDoubleElement(elements,
+                                                         checked_key,
+                                                         val));
+          } else {
+            access = AddInstruction(
+                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
+          }
+        } else {
+          if (fast_double_elements) {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
+          } else {
+            access = AddInstruction(
+                new(zone()) HLoadKeyedFastElement(elements, checked_key));
+          }
+        }
       } else if (elements_kind == DICTIONARY_ELEMENTS) {
         if (is_store) {
           access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -4866,7 +4150,7 @@
         access = AddInstruction(BuildExternalArrayElementAccess(
             external_elements, checked_key, val, elements_kind, is_store));
       }
-      *has_side_effects |= access->HasObservableSideEffects();
+      *has_side_effects |= access->HasSideEffects();
       access->set_position(position);
       if (!is_store) {
         Push(access);
@@ -4895,14 +4179,7 @@
   ASSERT(!expr->IsPropertyName());
   HInstruction* instr = NULL;
   if (expr->IsMonomorphic()) {
-    Handle<Map> map = expr->GetMonomorphicReceiverType();
-    if (map->has_slow_elements_kind()) {
-      instr = is_store ? BuildStoreKeyedGeneric(obj, key, val)
-                       : BuildLoadKeyedGeneric(obj, key);
-    } else {
-      AddInstruction(new(zone()) HCheckNonSmi(obj));
-      instr = BuildMonomorphicElementAccess(obj, key, val, map, is_store);
-    }
+    instr = BuildMonomorphicElementAccess(obj, key, val, expr, is_store);
   } else if (expr->GetReceiverTypes() != NULL &&
              !expr->GetReceiverTypes()->is_empty()) {
     return HandlePolymorphicElementAccess(
@@ -4916,7 +4193,7 @@
   }
   instr->set_position(position);
   AddInstruction(instr);
-  *has_side_effects = instr->HasObservableSideEffects();
+  *has_side_effects = instr->HasSideEffects();
   return instr;
 }
 
@@ -4930,7 +4207,7 @@
                          object,
                          key,
                          value,
-                         function_strict_mode_flag());
+                         function_strict_mode());
 }
 
 bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
@@ -4983,7 +4260,7 @@
   CHECK_ALIVE(VisitForValue(expr->obj()));
 
   HInstruction* instr = NULL;
-  if (expr->AsProperty()->IsArrayLength()) {
+  if (expr->IsArrayLength()) {
     HValue* array = Pop();
     AddInstruction(new(zone()) HCheckNonSmi(array));
     HInstruction* mapcheck =
@@ -5061,8 +4338,7 @@
   // its prototypes.
   if (smi_and_map_check) {
     AddInstruction(new(zone()) HCheckNonSmi(receiver));
-    AddInstruction(new(zone()) HCheckMap(receiver, receiver_map, NULL,
-                                         ALLOW_ELEMENT_TRANSITION_MAPS));
+    AddInstruction(new(zone()) HCheckMap(receiver, receiver_map));
   }
   if (!expr->holder().is_null()) {
     AddInstruction(new(zone()) HCheckPrototypeMaps(
@@ -5103,7 +4379,7 @@
         PrintF("Trying to inline the polymorphic call to %s\n",
                *name->ToCString());
       }
-      if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
+      if (FLAG_polymorphic_inlining && TryInline(expr)) {
         // Trying to inline will signal that we should bailout from the
         // entire compilation by setting stack overflow on the visitor.
         if (HasStackOverflow()) return;
@@ -5173,24 +4449,24 @@
 }
 
 
-bool HGraphBuilder::TryInline(CallKind call_kind,
-                              Handle<JSFunction> target,
-                              ZoneList<Expression*>* arguments,
-                              HValue* receiver,
-                              int ast_id,
-                              int return_id,
-                              ReturnHandlingFlag return_handling) {
+bool HGraphBuilder::TryInline(Call* expr) {
   if (!FLAG_use_inlining) return false;
 
+  // The function call we are inlining is a method call if the call
+  // is a property call.
+  CallKind call_kind = (expr->expression()->AsProperty() == NULL)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+
   // Precondition: call is monomorphic and we have found a target with the
   // appropriate arity.
   Handle<JSFunction> caller = info()->closure();
+  Handle<JSFunction> target = expr->target();
   Handle<SharedFunctionInfo> target_shared(target->shared());
 
   // Do a quick check on source code length to avoid parsing large
   // inlining candidates.
-  if ((FLAG_limit_inlining && target_shared->SourceSize() > kMaxSourceSize)
-      || target_shared->SourceSize() > kUnlimitedMaxSourceSize) {
+  if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
     TraceInline(target, caller, "target text too big");
     return false;
   }
@@ -5200,20 +4476,8 @@
     TraceInline(target, caller, "target not inlineable");
     return false;
   }
-  if (target_shared->dont_inline() || target_shared->dont_optimize()) {
-    TraceInline(target, caller, "target contains unsupported syntax [early]");
-    return false;
-  }
 
-  int nodes_added = target_shared->ast_node_count();
-  if ((FLAG_limit_inlining && nodes_added > kMaxInlinedSize) ||
-      nodes_added > kUnlimitedMaxInlinedSize) {
-    TraceInline(target, caller, "target AST is too large [early]");
-    return false;
-  }
-
-#if !defined(V8_TARGET_ARCH_IA32)
-  // Target must be able to use caller's context.
+  // No context change required.
   CompilationInfo* outer_info = info();
   if (target->context() != outer_info->closure()->context() ||
       outer_info->scope()->contains_with() ||
@@ -5221,8 +4485,6 @@
     TraceInline(target, caller, "target requires context change");
     return false;
   }
-#endif
-
 
   // Don't inline deeper than kMaxInliningLevels calls.
   HEnvironment* env = environment();
@@ -5232,37 +4494,32 @@
       TraceInline(target, caller, "inline depth limit reached");
       return false;
     }
-    if (env->outer()->frame_type() == JS_FUNCTION) {
-      current_level++;
-    }
+    current_level++;
     env = env->outer();
   }
 
   // Don't inline recursive functions.
-  for (FunctionState* state = function_state();
-       state != NULL;
-       state = state->outer()) {
-    if (state->compilation_info()->closure()->shared() == *target_shared) {
-      TraceInline(target, caller, "target is recursive");
-      return false;
-    }
+  if (*target_shared == outer_info->closure()->shared()) {
+    TraceInline(target, caller, "target is recursive");
+    return false;
   }
 
   // We don't want to add more than a certain number of nodes from inlining.
-  if ((FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) ||
-      inlined_count_ > kUnlimitedMaxInlinedNodes) {
+  if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
     TraceInline(target, caller, "cumulative AST node limit reached");
     return false;
   }
 
+  int count_before = AstNode::Count();
+
   // Parse and allocate variables.
   CompilationInfo target_info(target);
-  if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
+  if (!ParserApi::Parse(&target_info) ||
       !Scope::Analyze(&target_info)) {
     if (target_info.isolate()->has_pending_exception()) {
       // Parse or scope error, never optimize this function.
       SetStackOverflow();
-      target_shared->DisableOptimization();
+      target_shared->DisableOptimization(*target);
     }
     TraceInline(target, caller, "parse failure");
     return false;
@@ -5274,35 +4531,20 @@
   }
   FunctionLiteral* function = target_info.function();
 
-  // The following conditions must be checked again after re-parsing, because
-  // earlier the information might not have been complete due to lazy parsing.
-  nodes_added = function->ast_node_count();
-  if ((FLAG_limit_inlining && nodes_added > kMaxInlinedSize) ||
-      nodes_added > kUnlimitedMaxInlinedSize) {
-    TraceInline(target, caller, "target AST is too large [late]");
-    return false;
-  }
-  AstProperties::Flags* flags(function->flags());
-  if (flags->Contains(kDontInline) || flags->Contains(kDontOptimize)) {
-    TraceInline(target, caller, "target contains unsupported syntax [late]");
+  // Count the number of AST nodes added by inlining this call.
+  int nodes_added = AstNode::Count() - count_before;
+  if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
+    TraceInline(target, caller, "target AST is too large");
     return false;
   }
 
-  // If the function uses the arguments object check that inlining of functions
-  // with arguments object is enabled and the arguments-variable is
-  // stack allocated.
-  if (function->scope()->arguments() != NULL) {
-    if (!FLAG_inline_arguments) {
-      TraceInline(target, caller, "target uses arguments object");
-      return false;
-    }
-
-    if (!function->scope()->arguments()->IsStackAllocated()) {
-      TraceInline(target,
-                  caller,
-                  "target uses non-stackallocated arguments object");
-      return false;
-    }
+  // Don't inline functions that uses the arguments object or that
+  // have a mismatching number of parameters.
+  int arity = expr->arguments()->length();
+  if (function->scope()->arguments() != NULL ||
+      arity != target_shared->formal_parameter_count()) {
+    TraceInline(target, caller, "target requires special argument handling");
+    return false;
   }
 
   // All declarations must be inlineable.
@@ -5314,6 +4556,13 @@
       return false;
     }
   }
+  // All statements in the body must be inlineable.
+  for (int i = 0, count = function->body()->length(); i < count; ++i) {
+    if (!function->body()->at(i)->IsInlineable()) {
+      TraceInline(target, caller, "target contains unsupported syntax");
+      return false;
+    }
+  }
 
   // Generate the deoptimization data for the unoptimized version of
   // the target function if we don't already have it.
@@ -5325,11 +4574,11 @@
       TraceInline(target, caller, "could not generate deoptimization info");
       return false;
     }
-    if (target_shared->scope_info() == ScopeInfo::Empty()) {
+    if (target_shared->scope_info() == SerializedScopeInfo::Empty()) {
       // The scope info might not have been set if a lazily compiled
       // function is inlined before being called for the first time.
-      Handle<ScopeInfo> target_scope_info =
-          ScopeInfo::Create(target_info.scope());
+      Handle<SerializedScopeInfo> target_scope_info =
+          SerializedScopeInfo::Create(target_info.scope());
       target_shared->set_scope_info(*target_scope_info);
     }
     target_shared->EnableDeoptimizationSupport(*target_info.code());
@@ -5347,54 +4596,30 @@
   ASSERT(target_shared->has_deoptimization_support());
   TypeFeedbackOracle target_oracle(
       Handle<Code>(target_shared->code()),
-      Handle<Context>(target->context()->global_context()),
-      isolate());
-  // The function state is new-allocated because we need to delete it
-  // in two different places.
-  FunctionState* target_state = new FunctionState(
-      this, &target_info, &target_oracle, return_handling);
+      Handle<Context>(target->context()->global_context()));
+  FunctionState target_state(this, &target_info, &target_oracle);
 
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner_env =
       environment()->CopyForInlining(target,
-                                     arguments->length(),
                                      function,
                                      undefined,
-                                     call_kind,
-                                     function_state()->is_construct());
-#ifdef V8_TARGET_ARCH_IA32
-  // IA32 only, overwrite the caller's context in the deoptimization
-  // environment with the correct one.
-  //
-  // TODO(kmillikin): implement the same inlining on other platforms so we
-  // can remove the unsightly ifdefs in this function.
-  HConstant* context = new HConstant(Handle<Context>(target->context()),
-                                     Representation::Tagged());
-  AddInstruction(context);
-  inner_env->BindContext(context);
-#endif
-  AddSimulate(return_id);
-  current_block()->UpdateEnvironment(inner_env);
+                                     call_kind);
+  HBasicBlock* body_entry = CreateBasicBlock(inner_env);
+  current_block()->Goto(body_entry);
+  body_entry->SetJoinId(expr->ReturnId());
+  set_current_block(body_entry);
   AddInstruction(new(zone()) HEnterInlined(target,
-                                           arguments->length(),
                                            function,
-                                           call_kind,
-                                           function_state()->is_construct()));
-  // If the function uses arguments object create and bind one.
-  if (function->scope()->arguments() != NULL) {
-    ASSERT(function->scope()->arguments()->IsStackAllocated());
-    environment()->Bind(function->scope()->arguments(),
-                        graph()->GetArgumentsObject());
-  }
+                                           call_kind));
   VisitDeclarations(target_info.scope()->declarations());
   VisitStatements(function->body());
   if (HasStackOverflow()) {
     // Bail out if the inline function did, as we cannot residualize a call
     // instead.
     TraceInline(target, caller, "inline graph construction failed");
-    target_shared->DisableOptimization();
+    target_shared->DisableOptimization(*target);
     inline_bailout_ = true;
-    delete target_state;
     return true;
   }
 
@@ -5404,27 +4629,29 @@
   TraceInline(target, caller, NULL);
 
   if (current_block() != NULL) {
-    // Add default return value (i.e. undefined for normals calls or the newly
-    // allocated receiver for construct calls) if control can fall off the
-    // body.  In a test context, undefined is false and any JSObject is true.
-    if (call_context()->IsValue()) {
+    // Add a return of undefined if control can fall off the body.  In a
+    // test context, undefined is false.
+    if (inlined_test_context() == NULL) {
       ASSERT(function_return() != NULL);
-      HValue* return_value = function_state()->is_construct()
-          ? receiver
-          : undefined;
-      current_block()->AddLeaveInlined(return_value,
-                                       function_return(),
-                                       function_state()->drop_extra());
-    } else if (call_context()->IsEffect()) {
-      ASSERT(function_return() != NULL);
-      current_block()->Goto(function_return(), function_state()->drop_extra());
+      ASSERT(call_context()->IsEffect() || call_context()->IsValue());
+      if (call_context()->IsEffect()) {
+        current_block()->Goto(function_return());
+      } else {
+        current_block()->AddLeaveInlined(undefined, function_return());
+      }
     } else {
-      ASSERT(call_context()->IsTest());
-      ASSERT(inlined_test_context() != NULL);
-      HBasicBlock* target = function_state()->is_construct()
-          ? inlined_test_context()->if_true()
-          : inlined_test_context()->if_false();
-      current_block()->Goto(target, function_state()->drop_extra());
+      // The graph builder assumes control can reach both branches of a
+      // test, so we materialize the undefined value and test it rather than
+      // simply jumping to the false target.
+      //
+      // TODO(3168478): refactor to avoid this.
+      HBasicBlock* empty_true = graph()->CreateBasicBlock();
+      HBasicBlock* empty_false = graph()->CreateBasicBlock();
+      HBranch* test = new(zone()) HBranch(undefined, empty_true, empty_false);
+      current_block()->Finish(test);
+
+      empty_true->Goto(inlined_test_context()->if_true());
+      empty_false->Goto(inlined_test_context()->if_false());
     }
   }
 
@@ -5436,96 +4663,35 @@
     // Pop the return test context from the expression context stack.
     ASSERT(ast_context() == inlined_test_context());
     ClearInlinedTestContext();
-    delete target_state;
 
     // Forward to the real test context.
     if (if_true->HasPredecessor()) {
-      if_true->SetJoinId(ast_id);
+      if_true->SetJoinId(expr->id());
       HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
-      if_true->Goto(true_target, function_state()->drop_extra());
+      if_true->Goto(true_target);
     }
     if (if_false->HasPredecessor()) {
-      if_false->SetJoinId(ast_id);
+      if_false->SetJoinId(expr->id());
       HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
-      if_false->Goto(false_target, function_state()->drop_extra());
+      if_false->Goto(false_target);
     }
     set_current_block(NULL);
-    return true;
 
   } else if (function_return()->HasPredecessor()) {
-    function_return()->SetJoinId(ast_id);
+    function_return()->SetJoinId(expr->id());
     set_current_block(function_return());
   } else {
     set_current_block(NULL);
   }
-  delete target_state;
+
   return true;
 }
 
 
-bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
-  // The function call we are inlining is a method call if the call
-  // is a property call.
-  CallKind call_kind = (expr->expression()->AsProperty() == NULL)
-      ? CALL_AS_FUNCTION
-      : CALL_AS_METHOD;
-
-  return TryInline(call_kind,
-                   expr->target(),
-                   expr->arguments(),
-                   NULL,
-                   expr->id(),
-                   expr->ReturnId(),
-                   drop_extra ? DROP_EXTRA_ON_RETURN : NORMAL_RETURN);
-}
-
-
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr, HValue* receiver) {
-  return TryInline(CALL_AS_FUNCTION,
-                   expr->target(),
-                   expr->arguments(),
-                   receiver,
-                   expr->id(),
-                   expr->ReturnId(),
-                   CONSTRUCT_CALL_RETURN);
-}
-
-
-bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
-  if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
-  BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
-  switch (id) {
-    case kMathRound:
-    case kMathAbs:
-    case kMathSqrt:
-    case kMathLog:
-    case kMathSin:
-    case kMathCos:
-    case kMathTan:
-      if (expr->arguments()->length() == 1) {
-        HValue* argument = Pop();
-        HValue* context = environment()->LookupContext();
-        Drop(1);  // Receiver.
-        HUnaryMathOperation* op =
-            new(zone()) HUnaryMathOperation(context, argument, id);
-        op->set_position(expr->position());
-        if (drop_extra) Drop(1);  // Optionally drop the function.
-        ast_context()->ReturnInstruction(op, expr->id());
-        return true;
-      }
-      break;
-    default:
-      // Not supported for inlining yet.
-      break;
-  }
-  return false;
-}
-
-
-bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
-                                               HValue* receiver,
-                                               Handle<Map> receiver_map,
-                                               CheckType check_type) {
+bool HGraphBuilder::TryInlineBuiltinFunction(Call* expr,
+                                             HValue* receiver,
+                                             Handle<Map> receiver_map,
+                                             CheckType check_type) {
   ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
   // Try to inline calls like Math.* as operations in the calling function.
   if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -5562,7 +4728,6 @@
     case kMathLog:
     case kMathSin:
     case kMathCos:
-    case kMathTan:
       if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
         HValue* argument = Pop();
@@ -5599,7 +4764,7 @@
             AddInstruction(square_root);
             // MathPowHalf doesn't have side effects so there's no need for
             // an environment simulation here.
-            ASSERT(!square_root->HasObservableSideEffects());
+            ASSERT(!square_root->HasSideEffects());
             result = new(zone()) HDiv(context, double_one, square_root);
           } else if (exponent == 2.0) {
             result = new(zone()) HMul(context, left, left);
@@ -5617,80 +4782,6 @@
         return true;
       }
       break;
-    case kMathRandom:
-      if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
-        AddCheckConstantFunction(expr, receiver, receiver_map, true);
-        Drop(1);  // Receiver.
-        HValue* context = environment()->LookupContext();
-        HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-        AddInstruction(global_object);
-        HRandom* result = new(zone()) HRandom(global_object);
-        ast_context()->ReturnInstruction(result, expr->id());
-        return true;
-      }
-      break;
-    case kMathMax:
-    case kMathMin:
-      if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
-        AddCheckConstantFunction(expr, receiver, receiver_map, true);
-        HValue* right = Pop();
-        HValue* left = Pop();
-        Pop();  // Pop receiver.
-
-        HValue* left_operand = left;
-        HValue* right_operand = right;
-
-        // If we do not have two integers, we convert to double for comparison.
-        if (!left->representation().IsInteger32() ||
-            !right->representation().IsInteger32()) {
-          if (!left->representation().IsDouble()) {
-            HChange* left_convert = new(zone()) HChange(
-                left,
-                Representation::Double(),
-                false,  // Do not truncate when converting to double.
-                true);  // Deoptimize for undefined.
-            left_convert->SetFlag(HValue::kBailoutOnMinusZero);
-            left_operand = AddInstruction(left_convert);
-          }
-          if (!right->representation().IsDouble()) {
-            HChange* right_convert = new(zone()) HChange(
-                right,
-                Representation::Double(),
-                false,  // Do not truncate when converting to double.
-                true);  // Deoptimize for undefined.
-            right_convert->SetFlag(HValue::kBailoutOnMinusZero);
-            right_operand = AddInstruction(right_convert);
-          }
-        }
-
-        ASSERT(left_operand->representation().Equals(
-               right_operand->representation()));
-        ASSERT(!left_operand->representation().IsTagged());
-
-        Token::Value op = (id == kMathMin) ? Token::LT : Token::GT;
-
-        HCompareIDAndBranch* compare =
-            new(zone()) HCompareIDAndBranch(left_operand, right_operand, op);
-        compare->SetInputRepresentation(left_operand->representation());
-
-        HBasicBlock* return_left = graph()->CreateBasicBlock();
-        HBasicBlock* return_right = graph()->CreateBasicBlock();
-
-        compare->SetSuccessorAt(0, return_left);
-        compare->SetSuccessorAt(1, return_right);
-        current_block()->Finish(compare);
-
-        set_current_block(return_left);
-        Push(left);
-        set_current_block(return_right);
-        Push(right);
-
-        HBasicBlock* join = CreateJoin(return_left, return_right, expr->id());
-        set_current_block(join);
-        ast_context()->ReturnValue(Pop());
-        return true;
-      }
-      break;
     default:
       // Not yet supported for inlining.
       break;
@@ -5724,6 +4815,13 @@
   HValue* arg_two_value = environment()->Lookup(arg_two->var());
   if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
 
+  // Our implementation of arguments (based on this stack frame or an
+  // adapter below it) does not work for inlined functions.
+  if (function_state()->outer() != NULL) {
+    Bailout("Function.prototype.apply optimization in inlined function");
+    return true;
+  }
+
   // Found pattern f.apply(receiver, arguments).
   VisitForValue(prop->obj());
   if (HasStackOverflow() || current_block() == NULL) return true;
@@ -5734,46 +4832,13 @@
   VisitForValue(args->at(0));
   if (HasStackOverflow() || current_block() == NULL) return true;
   HValue* receiver = Pop();
-
-  if (function_state()->outer() == NULL) {
-    HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
-    HInstruction* length =
-        AddInstruction(new(zone()) HArgumentsLength(elements));
-    HValue* wrapped_receiver =
-        AddInstruction(new(zone()) HWrapReceiver(receiver, function));
-    HInstruction* result =
-        new(zone()) HApplyArguments(function,
-                                    wrapped_receiver,
-                                    length,
-                                    elements);
-    result->set_position(expr->position());
-    ast_context()->ReturnInstruction(result, expr->id());
-    return true;
-  } else {
-    // We are inside inlined function and we know exactly what is inside
-    // arguments object.
-    HValue* context = environment()->LookupContext();
-
-    HValue* wrapped_receiver =
-        AddInstruction(new(zone()) HWrapReceiver(receiver, function));
-    PushAndAdd(new(zone()) HPushArgument(wrapped_receiver));
-
-    HEnvironment* arguments_env = environment()->arguments_environment();
-
-    int parameter_count = arguments_env->parameter_count();
-    for (int i = 1; i < arguments_env->parameter_count(); i++) {
-      PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i)));
-    }
-
-    HInvokeFunction* call = new(zone()) HInvokeFunction(
-        context,
-        function,
-        parameter_count);
-    Drop(parameter_count);
-    call->set_position(expr->position());
-    ast_context()->ReturnInstruction(call, expr->id());
-    return true;
-  }
+  HInstruction* elements = AddInstruction(new(zone()) HArgumentsElements);
+  HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
+  HInstruction* result =
+      new(zone()) HApplyArguments(function, receiver, length, elements);
+  result->set_position(expr->position());
+  ast_context()->ReturnInstruction(result, expr->id());
+  return true;
 }
 
 
@@ -5825,19 +4890,14 @@
       Handle<Map> receiver_map = (types == NULL || types->is_empty())
           ? Handle<Map>::null()
           : types->first();
-      if (TryInlineBuiltinMethodCall(expr,
-                                     receiver,
-                                     receiver_map,
-                                     expr->check_type())) {
-        if (FLAG_trace_inlining) {
-          PrintF("Inlining builtin ");
-          expr->target()->ShortPrint();
-          PrintF("\n");
-        }
+      if (TryInlineBuiltinFunction(expr,
+                                   receiver,
+                                   receiver_map,
+                                   expr->check_type())) {
         return;
       }
 
-      if (CallStubCompiler::HasCustomCallGenerator(expr->target()) ||
+      if (CallStubCompiler::HasCustomCallGenerator(*expr->target()) ||
           expr->check_type() != RECEIVER_MAP_CHECK) {
         // When the target has a custom call IC generator, use the IC,
         // because it is likely to generate better code.  Also use the IC
@@ -5848,7 +4908,7 @@
       } else {
         AddCheckConstantFunction(expr, receiver, receiver_map, true);
 
-        if (TryInlineCall(expr)) return;
+        if (TryInline(expr)) return;
         call = PreProcessCall(
             new(zone()) HCallConstantFunction(expr->target(),
                                               argument_count));
@@ -5865,21 +4925,17 @@
     }
 
   } else {
-    expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
     VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    // FIXME.
     bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
 
-    if (proxy != NULL && proxy->var()->is_possibly_eval()) {
-      return Bailout("possible direct call to eval");
-    }
-
     if (global_call) {
       Variable* var = proxy->var();
       bool known_global_function = false;
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
       // and generate optimized code for calling the function.
-      LookupResult lookup(isolate());
+      LookupResult lookup;
       GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
       if (type == kUseCell &&
           !info()->global_object()->IsAccessCheckNeeded()) {
@@ -5908,15 +4964,7 @@
                IsGlobalObject());
         environment()->SetExpressionStackAt(receiver_index, global_receiver);
 
-        if (TryInlineBuiltinFunctionCall(expr, false)) {  // Nothing to drop.
-          if (FLAG_trace_inlining) {
-            PrintF("Inlining builtin ");
-            expr->target()->ShortPrint();
-            PrintF("\n");
-          }
-          return;
-        }
-        if (TryInlineCall(expr)) return;
+        if (TryInline(expr)) return;
         call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(),
                                                            argument_count));
       } else {
@@ -5930,40 +4978,8 @@
         Drop(argument_count);
       }
 
-    } else if (expr->IsMonomorphic()) {
-      // The function is on the stack in the unoptimized code during
-      // evaluation of the arguments.
-      CHECK_ALIVE(VisitForValue(expr->expression()));
-      HValue* function = Top();
-      HValue* context = environment()->LookupContext();
-      HGlobalObject* global = new(zone()) HGlobalObject(context);
-      HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
-      AddInstruction(global);
-      PushAndAdd(receiver);
-      CHECK_ALIVE(VisitExpressions(expr->arguments()));
-      AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
-
-      if (TryInlineBuiltinFunctionCall(expr, true)) {  // Drop the function.
-        if (FLAG_trace_inlining) {
-          PrintF("Inlining builtin ");
-          expr->target()->ShortPrint();
-          PrintF("\n");
-        }
-        return;
-      }
-
-      if (TryInlineCall(expr, true)) {   // Drop function from environment.
-        return;
-      } else {
-        call = PreProcessCall(new(zone()) HInvokeFunction(context,
-                                                          function,
-                                                          argument_count));
-        Drop(1);  // The function.
-      }
-
     } else {
-      CHECK_ALIVE(VisitForValue(expr->expression()));
-      HValue* function = Top();
+      CHECK_ALIVE(VisitArgument(expr->expression()));
       HValue* context = environment()->LookupContext();
       HGlobalObject* global_object = new(zone()) HGlobalObject(context);
       HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global_object);
@@ -5972,7 +4988,9 @@
       PushAndAdd(new(zone()) HPushArgument(receiver));
       CHECK_ALIVE(VisitArgumentList(expr->arguments()));
 
-      call = new(zone()) HCallFunction(context, function, argument_count);
+      // The function to call is treated as an argument to the call function
+      // stub.
+      call = new(zone()) HCallFunction(context, argument_count + 1);
       Drop(argument_count + 1);
     }
   }
@@ -5982,72 +5000,25 @@
 }
 
 
-// Checks whether allocation using the given constructor can be inlined.
-static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
-  return constructor->has_initial_map() &&
-      constructor->initial_map()->instance_type() == JS_OBJECT_TYPE;
-}
-
-
 void HGraphBuilder::VisitCallNew(CallNew* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  expr->RecordTypeFeedback(oracle());
-  int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
+  // The constructor function is also used as the receiver argument to the
+  // JS construct call builtin.
+  HValue* constructor = NULL;
+  CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
+  CHECK_ALIVE(VisitArgumentList(expr->arguments()));
+
   HValue* context = environment()->LookupContext();
 
-  if (FLAG_inline_construct &&
-      expr->IsMonomorphic() &&
-      IsAllocationInlineable(expr->target())) {
-    // The constructor function is on the stack in the unoptimized code
-    // during evaluation of the arguments.
-    CHECK_ALIVE(VisitForValue(expr->expression()));
-    HValue* function = Top();
-    CHECK_ALIVE(VisitExpressions(expr->arguments()));
-    Handle<JSFunction> constructor = expr->target();
-    HValue* check = AddInstruction(
-        new(zone()) HCheckFunction(function, constructor));
-
-    // Force completion of inobject slack tracking before generating
-    // allocation code to finalize instance size.
-    if (constructor->shared()->IsInobjectSlackTrackingInProgress()) {
-      constructor->shared()->CompleteInobjectSlackTracking();
-    }
-
-    // Replace the constructor function with a newly allocated receiver.
-    HInstruction* receiver = new(zone()) HAllocateObject(context, constructor);
-    // Index of the receiver from the top of the expression stack.
-    const int receiver_index = argument_count - 1;
-    AddInstruction(receiver);
-    ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
-    environment()->SetExpressionStackAt(receiver_index, receiver);
-
-    if (TryInlineConstruct(expr, receiver)) return;
-
-    // TODO(mstarzinger): For now we remove the previous HAllocateObject and
-    // add HPushArgument for the arguments in case inlining failed.  What we
-    // actually should do is emit HInvokeFunction on the constructor instead
-    // of using HCallNew as a fallback.
-    receiver->DeleteAndReplaceWith(NULL);
-    check->DeleteAndReplaceWith(NULL);
-    environment()->SetExpressionStackAt(receiver_index, function);
-    HInstruction* call = PreProcessCall(
-        new(zone()) HCallNew(context, function, argument_count));
-    call->set_position(expr->position());
-    return ast_context()->ReturnInstruction(call, expr->id());
-  } else {
-    // The constructor function is both an operand to the instruction and an
-    // argument to the construct call.
-    HValue* constructor = NULL;
-    CHECK_ALIVE(constructor = VisitArgument(expr->expression()));
-    CHECK_ALIVE(VisitArgumentList(expr->arguments()));
-    HInstruction* call =
-        new(zone()) HCallNew(context, constructor, argument_count);
-    Drop(argument_count);
-    call->set_position(expr->position());
-    return ast_context()->ReturnInstruction(call, expr->id());
-  }
+  // The constructor is both an operand to the instruction and an argument
+  // to the construct call.
+  int arg_count = expr->arguments()->length() + 1;  // Plus constructor.
+  HCallNew* call = new(zone()) HCallNew(context, constructor, arg_count);
+  call->set_position(expr->position());
+  Drop(arg_count);
+  return ast_context()->ReturnInstruction(call, expr->id());
 }
 
 
@@ -6214,6 +5185,7 @@
 
 
 void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+  // TODO(svenpanne) Perhaps a switch/virtual function is nicer here.
   if (ast_context()->IsTest()) {
     TestContext* context = TestContext::cast(ast_context());
     VisitForControl(expr->expression(),
@@ -6235,7 +5207,7 @@
                                 materialize_true));
 
   if (materialize_false->HasPredecessor()) {
-    materialize_false->SetJoinId(expr->MaterializeFalseId());
+    materialize_false->SetJoinId(expr->expression()->id());
     set_current_block(materialize_false);
     Push(graph()->GetConstantFalse());
   } else {
@@ -6243,7 +5215,7 @@
   }
 
   if (materialize_true->HasPredecessor()) {
-    materialize_true->SetJoinId(expr->MaterializeTrueId());
+    materialize_true->SetJoinId(expr->expression()->id());
     set_current_block(materialize_true);
     Push(graph()->GetConstantTrue());
   } else {
@@ -6312,7 +5284,7 @@
 
   if (proxy != NULL) {
     Variable* var = proxy->var();
-    if (var->mode() == CONST)  {
+    if (var->mode() == Variable::CONST)  {
       return Bailout("unsupported count operation with const");
     }
     // Argument of the count operation is a variable, not a property.
@@ -6353,15 +5325,10 @@
         }
 
         HValue* context = BuildContextChainWalk(var);
-        HStoreContextSlot::Mode mode =
-            (var->mode() == LET || var->mode() == CONST_HARMONY)
-            ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
         HStoreContextSlot* instr =
-            new(zone()) HStoreContextSlot(context, var->index(), mode, after);
+            new(zone()) HStoreContextSlot(context, var->index(), after);
         AddInstruction(instr);
-        if (instr->HasObservableSideEffects()) {
-          AddSimulate(expr->AssignmentId());
-        }
+        if (instr->HasSideEffects()) AddSimulate(expr->AssignmentId());
         break;
       }
 
@@ -6390,7 +5357,7 @@
         load = BuildLoadNamedGeneric(obj, prop);
       }
       PushAndAdd(load);
-      if (load->HasObservableSideEffects()) AddSimulate(expr->CountId());
+      if (load->HasSideEffects()) AddSimulate(expr->CountId());
 
       after = BuildIncrement(returns_original_input, expr);
       input = Pop();
@@ -6403,7 +5370,7 @@
       // necessary.
       environment()->SetExpressionStackAt(0, after);
       if (returns_original_input) environment()->SetExpressionStackAt(1, input);
-      if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+      if (store->HasSideEffects()) AddSimulate(expr->AssignmentId());
 
     } else {
       // Keyed property.
@@ -6480,34 +5447,38 @@
         AddInstruction(HCheckInstanceType::NewIsString(right));
         instr = new(zone()) HStringAdd(context, left, right);
       } else {
-        instr = HAdd::NewHAdd(zone(), context, left, right);
+        instr = new(zone()) HAdd(context, left, right);
       }
       break;
     case Token::SUB:
-      instr = HSub::NewHSub(zone(), context, left, right);
+      instr = new(zone()) HSub(context, left, right);
       break;
     case Token::MUL:
-      instr = HMul::NewHMul(zone(), context, left, right);
+      instr = new(zone()) HMul(context, left, right);
       break;
     case Token::MOD:
-      instr = HMod::NewHMod(zone(), context, left, right);
+      instr = new(zone()) HMod(context, left, right);
       break;
     case Token::DIV:
-      instr = HDiv::NewHDiv(zone(), context, left, right);
+      instr = new(zone()) HDiv(context, left, right);
       break;
     case Token::BIT_XOR:
+      instr = new(zone()) HBitXor(context, left, right);
+      break;
     case Token::BIT_AND:
+      instr = new(zone()) HBitAnd(context, left, right);
+      break;
     case Token::BIT_OR:
-      instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
+      instr = new(zone()) HBitOr(context, left, right);
       break;
     case Token::SAR:
-      instr = HSar::NewHSar(zone(), context, left, right);
+      instr = new(zone()) HSar(context, left, right);
       break;
     case Token::SHR:
-      instr = HShr::NewHShr(zone(), context, left, right);
+      instr = new(zone()) HShr(context, left, right);
       break;
     case Token::SHL:
-      instr = HShl::NewHShl(zone(), context, left, right);
+      instr = new(zone()) HShl(context, left, right);
       break;
     default:
       UNREACHABLE();
@@ -6700,75 +5671,26 @@
 }
 
 
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
-                                               HTypeof* typeof_expr,
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+                                               Expression* expr,
                                                Handle<String> check) {
-  // Note: The HTypeof itself is removed during canonicalization, if possible.
-  HValue* value = typeof_expr->value();
-  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
-  instr->set_position(expr->position());
-  return ast_context()->ReturnControl(instr, expr->id());
+  CHECK_ALIVE(VisitForTypeOf(expr));
+  HValue* expr_value = Pop();
+  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
+  instr->set_position(compare_expr->position());
+  return ast_context()->ReturnControl(instr, compare_expr->id());
 }
 
 
-static bool MatchLiteralCompareNil(HValue* left,
-                                   Token::Value op,
-                                   HValue* right,
-                                   Handle<Object> nil,
-                                   HValue** expr) {
-  if (left->IsConstant() &&
-      HConstant::cast(left)->handle().is_identical_to(nil) &&
-      Token::IsEqualityOp(op)) {
-    *expr = right;
-    return true;
-  }
-  return false;
-}
-
-
-static bool MatchLiteralCompareTypeof(HValue* left,
-                                      Token::Value op,
-                                      HValue* right,
-                                      HTypeof** typeof_expr,
-                                      Handle<String>* check) {
-  if (left->IsTypeof() &&
-      Token::IsEqualityOp(op) &&
-      right->IsConstant() &&
-      HConstant::cast(right)->HasStringValue()) {
-    *typeof_expr = HTypeof::cast(left);
-    *check = Handle<String>::cast(HConstant::cast(right)->handle());
-    return true;
-  }
-  return false;
-}
-
-
-static bool IsLiteralCompareTypeof(HValue* left,
-                                   Token::Value op,
-                                   HValue* right,
-                                   HTypeof** typeof_expr,
-                                   Handle<String>* check) {
-  return MatchLiteralCompareTypeof(left, op, right, typeof_expr, check) ||
-      MatchLiteralCompareTypeof(right, op, left, typeof_expr, check);
-}
-
-
-static bool IsLiteralCompareNil(HValue* left,
-                                Token::Value op,
-                                HValue* right,
-                                Handle<Object> nil,
-                                HValue** expr) {
-  return MatchLiteralCompareNil(left, op, right, nil, expr) ||
-      MatchLiteralCompareNil(right, op, left, nil, expr);
-}
-
-
-static bool IsLiteralCompareBool(HValue* left,
-                                 Token::Value op,
-                                 HValue* right) {
-  return op == Token::EQ_STRICT &&
-      ((left->IsConstant() && HConstant::cast(left)->handle()->IsBoolean()) ||
-       (right->IsConstant() && HConstant::cast(right)->handle()->IsBoolean()));
+void HGraphBuilder::HandleLiteralCompareUndefined(
+    CompareOperation* compare_expr, Expression* expr) {
+  CHECK_ALIVE(VisitForValue(expr));
+  HValue* lhs = Pop();
+  HValue* rhs = graph()->GetConstantUndefined();
+  HCompareObjectEqAndBranch* instr =
+      new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
+  instr->set_position(compare_expr->position());
+  return ast_context()->ReturnControl(instr, compare_expr->id());
 }
 
 
@@ -6789,9 +5711,21 @@
     return ast_context()->ReturnControl(instr, expr->id());
   }
 
+  // Check for special cases that compare against literals.
+  Expression *sub_expr;
+  Handle<String> check;
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    HandleLiteralCompareTypeof(expr, sub_expr, check);
+    return;
+  }
+
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    HandleLiteralCompareUndefined(expr, sub_expr);
+    return;
+  }
+
   TypeInfo type_info = oracle()->CompareType(expr);
   // Check if this expression was ever executed according to type feedback.
-  // Note that for the special typeof/null/undefined cases we get unknown here.
   if (type_info.IsUninitialized()) {
     AddInstruction(new(zone()) HSoftDeoptimize);
     current_block()->MarkAsDeoptimizing();
@@ -6806,26 +5740,6 @@
   HValue* left = Pop();
   Token::Value op = expr->op();
 
-  HTypeof* typeof_expr = NULL;
-  Handle<String> check;
-  if (IsLiteralCompareTypeof(left, op, right, &typeof_expr, &check)) {
-    return HandleLiteralCompareTypeof(expr, typeof_expr, check);
-  }
-  HValue* sub_expr = NULL;
-  Factory* f = graph()->isolate()->factory();
-  if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) {
-    return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
-  }
-  if (IsLiteralCompareNil(left, op, right, f->null_value(), &sub_expr)) {
-    return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
-  }
-  if (IsLiteralCompareBool(left, op, right)) {
-    HCompareObjectEqAndBranch* result =
-        new(zone()) HCompareObjectEqAndBranch(left, right);
-    result->set_position(expr->position());
-    return ast_context()->ReturnControl(result, expr->id());
-  }
-
   if (op == Token::INSTANCEOF) {
     // Check to see if the rhs of the instanceof is a global function not
     // residing in new space. If it is we assume that the function will stay the
@@ -6838,9 +5752,9 @@
         !info()->global_object()->IsAccessCheckNeeded()) {
       Handle<String> name = proxy->name();
       Handle<GlobalObject> global(info()->global_object());
-      LookupResult lookup(isolate());
+      LookupResult lookup;
       global->Lookup(*name, &lookup);
-      if (lookup.IsFound() &&
+      if (lookup.IsProperty() &&
           lookup.type() == NORMAL &&
           lookup.GetValue()->IsJSFunction()) {
         Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
@@ -6873,29 +5787,14 @@
     switch (op) {
       case Token::EQ:
       case Token::EQ_STRICT: {
-        // Can we get away with map check and not instance type check?
-        Handle<Map> map = oracle()->GetCompareMap(expr);
-        if (!map.is_null()) {
-          AddInstruction(new(zone()) HCheckNonSmi(left));
-          AddInstruction(new(zone()) HCheckMap(left, map, NULL,
-                                               ALLOW_ELEMENT_TRANSITION_MAPS));
-          AddInstruction(new(zone()) HCheckNonSmi(right));
-          AddInstruction(new(zone()) HCheckMap(right, map, NULL,
-                                               ALLOW_ELEMENT_TRANSITION_MAPS));
-          HCompareObjectEqAndBranch* result =
-              new(zone()) HCompareObjectEqAndBranch(left, right);
-          result->set_position(expr->position());
-          return ast_context()->ReturnControl(result, expr->id());
-        } else {
-          AddInstruction(new(zone()) HCheckNonSmi(left));
-          AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
-          AddInstruction(new(zone()) HCheckNonSmi(right));
-          AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
-          HCompareObjectEqAndBranch* result =
-              new(zone()) HCompareObjectEqAndBranch(left, right);
-          result->set_position(expr->position());
-          return ast_context()->ReturnControl(result, expr->id());
-        }
+        AddInstruction(new(zone()) HCheckNonSmi(left));
+        AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+        AddInstruction(new(zone()) HCheckNonSmi(right));
+        AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+        HCompareObjectEqAndBranch* result =
+            new(zone()) HCompareObjectEqAndBranch(left, right);
+        result->set_position(expr->position());
+        return ast_context()->ReturnControl(result, expr->id());
       }
       default:
         return Bailout("Unsupported non-primitive compare");
@@ -6928,16 +5827,14 @@
 }
 
 
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
-                                            HValue* value,
-                                            NilValue nil) {
+void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  EqualityKind kind =
-      expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
-  HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
-  instr->set_position(expr->position());
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* value = Pop();
+  HIsNullAndBranch* instr =
+      new(zone()) HIsNullAndBranch(value, expr->is_strict());
   return ast_context()->ReturnControl(instr, expr->id());
 }
 
@@ -6946,94 +5843,41 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  HThisFunction* self = new(zone()) HThisFunction(
-      function_state()->compilation_info()->closure());
+  HThisFunction* self = new(zone()) HThisFunction;
   return ast_context()->ReturnInstruction(self, expr->id());
 }
 
 
-void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
-  int length = declarations->length();
-  int global_count = 0;
-  for (int i = 0; i < declarations->length(); i++) {
-    Declaration* decl = declarations->at(i);
-    FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
-    HandleDeclaration(decl->proxy(),
-                      decl->mode(),
-                      fun_decl != NULL ? fun_decl->fun() : NULL,
-                      &global_count);
-  }
-
-  // Batch declare global functions and variables.
-  if (global_count > 0) {
-    Handle<FixedArray> array =
-        isolate()->factory()->NewFixedArray(2 * global_count, TENURED);
-    for (int j = 0, i = 0; i < length; i++) {
-      Declaration* decl = declarations->at(i);
-      Variable* var = decl->proxy()->var();
-
-      if (var->IsUnallocated()) {
-        array->set(j++, *(var->name()));
-        FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration();
-        if (fun_decl == NULL) {
-          if (var->binding_needs_init()) {
-            // In case this binding needs initialization use the hole.
-            array->set_the_hole(j++);
-          } else {
-            array->set_undefined(j++);
-          }
-        } else {
-          Handle<SharedFunctionInfo> function =
-              Compiler::BuildFunctionInfo(fun_decl->fun(), info()->script());
-          // Check for stack-overflow exception.
-          if (function.is_null()) {
-            SetStackOverflow();
-            return;
-          }
-          array->set(j++, *function);
-        }
-      }
-    }
-    int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
-                DeclareGlobalsNativeFlag::encode(info()->is_native()) |
-                DeclareGlobalsLanguageMode::encode(info()->language_mode());
-    HInstruction* result =
-        new(zone()) HDeclareGlobals(environment()->LookupContext(),
-                                    array,
-                                    flags);
-    AddInstruction(result);
-  }
+void HGraphBuilder::VisitDeclaration(Declaration* decl) {
+  HandleDeclaration(decl->proxy(), decl->mode(), decl->fun());
 }
 
 
 void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
-                                      VariableMode mode,
-                                      FunctionLiteral* function,
-                                      int* global_count) {
+                                      Variable::Mode mode,
+                                      FunctionLiteral* function) {
+  if (mode == Variable::LET) return Bailout("unsupported let declaration");
   Variable* var = proxy->var();
-  bool binding_needs_init =
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (var->location()) {
     case Variable::UNALLOCATED:
-      ++(*global_count);
-      return;
+      return Bailout("unsupported global declaration");
     case Variable::PARAMETER:
     case Variable::LOCAL:
     case Variable::CONTEXT:
-      if (binding_needs_init || function != NULL) {
+      if (mode == Variable::CONST || function != NULL) {
         HValue* value = NULL;
-        if (function != NULL) {
-          CHECK_ALIVE(VisitForValue(function));
-          value = Pop();
-        } else {
+        if (mode == Variable::CONST) {
           value = graph()->GetConstantHole();
+        } else {
+          VisitForValue(function);
+          value = Pop();
         }
         if (var->IsContextSlot()) {
           HValue* context = environment()->LookupContext();
-          HStoreContextSlot* store = new HStoreContextSlot(
-              context, var->index(), HStoreContextSlot::kNoCheck, value);
+          HStoreContextSlot* store =
+              new HStoreContextSlot(context, var->index(), value);
           AddInstruction(store);
-          if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+          if (store->HasSideEffects()) AddSimulate(proxy->id());
         } else {
           environment()->Bind(var, value);
         }
@@ -7045,51 +5889,6 @@
 }
 
 
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
-  UNREACHABLE();
-}
-
-
-void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
-  // TODO(rossberg)
-}
-
-
-void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
-  // TODO(rossberg)
-}
-
-
-void HGraphBuilder::VisitModulePath(ModulePath* module) {
-  // TODO(rossberg)
-}
-
-
-void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
-  // TODO(rossberg)
-}
-
-
 // Generators for inline runtime functions.
 // Support for types.
 void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
@@ -7118,7 +5917,9 @@
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
-      new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
+      new(zone()) HHasInstanceTypeAndBranch(value,
+                                            JS_FUNCTION_TYPE,
+                                            JS_FUNCTION_PROXY_TYPE);
   return ast_context()->ReturnControl(result, call->id());
 }
 
@@ -7188,11 +5989,10 @@
 void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
   ASSERT(call->arguments()->length() == 0);
   if (function_state()->outer() != NULL) {
-    // We are generating graph for inlined function.
-    HValue* value = function_state()->is_construct()
-        ? graph()->GetConstantTrue()
-        : graph()->GetConstantFalse();
-    return ast_context()->ReturnValue(value);
+    // We are generating graph for inlined function. Currently
+    // constructor inlining is not supported and we can just return
+    // false from %_IsConstructCall().
+    return ast_context()->ReturnValue(graph()->GetConstantFalse());
   } else {
     return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch,
                                         call->id());
@@ -7246,56 +6046,8 @@
 }
 
 
-void HGraphBuilder::GenerateDateField(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 2);
-  ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* date = Pop();
-  HDateField* result = new(zone()) HDateField(date, index);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 2);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
-  HValue* value = Pop();
-  HValue* object = Pop();
-  // Check if object is a not a smi.
-  HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object);
-  HBasicBlock* if_smi = graph()->CreateBasicBlock();
-  HBasicBlock* if_heap_object = graph()->CreateBasicBlock();
-  HBasicBlock* join = graph()->CreateBasicBlock();
-  smicheck->SetSuccessorAt(0, if_smi);
-  smicheck->SetSuccessorAt(1, if_heap_object);
-  current_block()->Finish(smicheck);
-  if_smi->Goto(join);
-
-  // Check if object is a JSValue.
-  set_current_block(if_heap_object);
-  HHasInstanceTypeAndBranch* typecheck =
-      new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE);
-  HBasicBlock* if_js_value = graph()->CreateBasicBlock();
-  HBasicBlock* not_js_value = graph()->CreateBasicBlock();
-  typecheck->SetSuccessorAt(0, if_js_value);
-  typecheck->SetSuccessorAt(1, not_js_value);
-  current_block()->Finish(typecheck);
-  not_js_value->Goto(join);
-
-  // Create in-object property store to kValueOffset.
-  set_current_block(if_js_value);
-  Handle<String> name = isolate()->factory()->undefined_symbol();
-  AddInstruction(new HStoreNamedField(object,
-                                      name,
-                                      value,
-                                      true,  // in-object store.
-                                      JSValue::kValueOffset));
-  if_js_value->Goto(join);
-  join->SetJoinId(call->id());
-  set_current_block(join);
-  return ast_context()->ReturnValue(value);
+  return Bailout("inlined runtime function: SetValueOf");
 }
 
 
@@ -7361,11 +6113,7 @@
 
 // Fast support for Math.random().
 void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
-  HValue* context = environment()->LookupContext();
-  HGlobalObject* global_object = new(zone()) HGlobalObject(context);
-  AddInstruction(global_object);
-  HRandom* result = new(zone()) HRandom(global_object);
-  return ast_context()->ReturnInstruction(result, call->id());
+  return Bailout("inlined runtime function: RandomHeapNumber");
 }
 
 
@@ -7462,37 +6210,12 @@
     CHECK_ALIVE(VisitArgument(call->arguments()->at(i)));
   }
   CHECK_ALIVE(VisitForValue(call->arguments()->last()));
-
   HValue* function = Pop();
   HValue* context = environment()->LookupContext();
-
-  // Branch for function proxies, or other non-functions.
-  HHasInstanceTypeAndBranch* typecheck =
-      new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE);
-  HBasicBlock* if_jsfunction = graph()->CreateBasicBlock();
-  HBasicBlock* if_nonfunction = graph()->CreateBasicBlock();
-  HBasicBlock* join = graph()->CreateBasicBlock();
-  typecheck->SetSuccessorAt(0, if_jsfunction);
-  typecheck->SetSuccessorAt(1, if_nonfunction);
-  current_block()->Finish(typecheck);
-
-  set_current_block(if_jsfunction);
-  HInstruction* invoke_result = AddInstruction(
-      new(zone()) HInvokeFunction(context, function, arg_count));
+  HInvokeFunction* result =
+      new(zone()) HInvokeFunction(context, function, arg_count);
   Drop(arg_count);
-  Push(invoke_result);
-  if_jsfunction->Goto(join);
-
-  set_current_block(if_nonfunction);
-  HInstruction* call_result = AddInstruction(
-      new(zone()) HCallFunction(context, function, arg_count));
-  Drop(arg_count);
-  Push(call_result);
-  if_nonfunction->Goto(join);
-
-  set_current_block(join);
-  join->SetJoinId(call->id());
-  return ast_context()->ReturnValue(Pop());
+  return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
@@ -7532,18 +6255,6 @@
 }
 
 
-void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
-  ASSERT_EQ(1, call->arguments()->length());
-  CHECK_ALIVE(VisitArgumentList(call->arguments()));
-  HValue* context = environment()->LookupContext();
-  HCallStub* result =
-      new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1);
-  result->set_transcendental_type(TranscendentalCache::TAN);
-  Drop(1);
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
 void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
   ASSERT_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitArgumentList(call->arguments()));
@@ -7591,7 +6302,6 @@
     : closure_(closure),
       values_(0),
       assigned_variables_(4),
-      frame_type_(JS_FUNCTION),
       parameter_count_(0),
       specials_count_(1),
       local_count_(0),
@@ -7606,7 +6316,6 @@
 HEnvironment::HEnvironment(const HEnvironment* other)
     : values_(0),
       assigned_variables_(0),
-      frame_type_(JS_FUNCTION),
       parameter_count_(0),
       specials_count_(1),
       local_count_(0),
@@ -7618,23 +6327,6 @@
 }
 
 
-HEnvironment::HEnvironment(HEnvironment* outer,
-                           Handle<JSFunction> closure,
-                           FrameType frame_type,
-                           int arguments)
-    : closure_(closure),
-      values_(arguments),
-      assigned_variables_(0),
-      frame_type_(frame_type),
-      parameter_count_(arguments),
-      local_count_(0),
-      outer_(outer),
-      pop_count_(0),
-      push_count_(0),
-      ast_id_(AstNode::kNoNumber) {
-}
-
-
 void HEnvironment::Initialize(int parameter_count,
                               int local_count,
                               int stack_height) {
@@ -7652,7 +6344,6 @@
   closure_ = other->closure();
   values_.AddAll(other->values_);
   assigned_variables_.AddAll(other->assigned_variables_);
-  frame_type_ = other->frame_type_;
   parameter_count_ = other->parameter_count_;
   local_count_ = other->local_count_;
   if (other->outer_ != NULL) outer_ = other->outer_->Copy();  // Deep copy.
@@ -7707,8 +6398,9 @@
 
 
 bool HEnvironment::ExpressionStackIsEmpty() const {
-  ASSERT(length() >= first_expression_index());
-  return length() == first_expression_index();
+  int first_expression = parameter_count() + specials_count() + local_count();
+  ASSERT(length() >= first_expression);
+  return length() == first_expression;
 }
 
 
@@ -7759,66 +6451,32 @@
 }
 
 
-HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
-                                                  Handle<JSFunction> target,
-                                                  FrameType frame_type,
-                                                  int arguments) const {
-  HEnvironment* new_env = new(closure()->GetIsolate()->zone())
-      HEnvironment(outer, target, frame_type, arguments + 1);
-  for (int i = 0; i <= arguments; ++i) {  // Include receiver.
-    new_env->Push(ExpressionStackAt(arguments - i));
-  }
-  new_env->ClearHistory();
-  return new_env;
-}
-
-
 HEnvironment* HEnvironment::CopyForInlining(
     Handle<JSFunction> target,
-    int arguments,
     FunctionLiteral* function,
     HConstant* undefined,
-    CallKind call_kind,
-    bool is_construct) const {
-  ASSERT(frame_type() == JS_FUNCTION);
-
-  Zone* zone = closure()->GetIsolate()->zone();
-
+    CallKind call_kind) const {
   // Outer environment is a copy of this one without the arguments.
   int arity = function->scope()->num_parameters();
-
   HEnvironment* outer = Copy();
-  outer->Drop(arguments + 1);  // Including receiver.
+  outer->Drop(arity + 1);  // Including receiver.
   outer->ClearHistory();
-
-  if (is_construct) {
-    // Create artificial constructor stub environment.  The receiver should
-    // actually be the constructor function, but we pass the newly allocated
-    // object instead, DoComputeConstructStubFrame() relies on that.
-    outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
-  }
-
-  if (arity != arguments) {
-    // Create artificial arguments adaptation environment.
-    outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments);
-  }
-
+  Zone* zone = closure()->GetIsolate()->zone();
   HEnvironment* inner =
       new(zone) HEnvironment(outer, function->scope(), target);
   // Get the argument values from the original environment.
   for (int i = 0; i <= arity; ++i) {  // Include receiver.
-    HValue* push = (i <= arguments) ?
-        ExpressionStackAt(arguments - i) : undefined;
+    HValue* push = ExpressionStackAt(arity - i);
     inner->SetValueAt(i, push);
   }
   // If the function we are inlining is a strict mode function or a
   // builtin function, pass undefined as the receiver for function
   // calls (instead of the global receiver).
-  if ((target->shared()->native() || !function->is_classic_mode()) &&
-      call_kind == CALL_AS_FUNCTION && !is_construct) {
+  if ((target->shared()->native() || function->strict_mode()) &&
+      call_kind == CALL_AS_FUNCTION) {
     inner->SetValueAt(0, undefined);
   }
-  inner->SetValueAt(arity + 1, LookupContext());
+  inner->SetValueAt(arity + 1, outer->LookupContext());
   for (int i = arity + 2; i < inner->length(); ++i) {
     inner->SetValueAt(i, undefined);
   }
@@ -7834,7 +6492,7 @@
     if (i == parameter_count()) stream->Add("specials\n");
     if (i == parameter_count() + specials_count()) stream->Add("locals\n");
     if (i == parameter_count() + specials_count() + local_count()) {
-      stream->Add("expressions\n");
+      stream->Add("expressions");
     }
     HValue* val = values_.at(i);
     stream->Add("%d: ", i);
@@ -7845,7 +6503,6 @@
     }
     stream->Add("\n");
   }
-  PrintF("\n");
 }
 
 
@@ -7910,10 +6567,7 @@
     }
 
     PrintEmptyProperty("xhandlers");
-    const char* flags = current->IsLoopSuccessorDominator()
-        ? "dom-loop-succ"
-        : "";
-    PrintStringProperty("flags", flags);
+    PrintEmptyProperty("flags");
 
     if (current->dominator() != NULL) {
       PrintBlockProperty("dominator", current->dominator()->block_id());
@@ -8014,7 +6668,7 @@
     PrintIndent();
     trace_.Add("%d %s", range->id(), type);
     if (range->HasRegisterAssigned()) {
-      LOperand* op = range->CreateAssignedOperand(ZONE);
+      LOperand* op = range->CreateAssignedOperand();
       int assigned_reg = op->index();
       if (op->IsDoubleRegister()) {
         trace_.Add(" \"%s\"",
@@ -8040,9 +6694,7 @@
     }
     LOperand* op = range->FirstHint();
     int hint_index = -1;
-    if (op != NULL && op->IsUnallocated()) {
-      hint_index = LUnallocated::cast(op)->virtual_register();
-    }
+    if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister();
     trace_.Add(" %d %d", parent_index, hint_index);
     UseInterval* cur_interval = range->first_interval();
     while (cur_interval != NULL && range->Covers(cur_interval->start())) {
@@ -8158,10 +6810,7 @@
     HStatistics::Instance()->SaveTiming(name_, end - start_, size);
   }
 
-  // Produce trace output if flag is set so that the first letter of the
-  // phase name matches the command line parameter FLAG_trace_phase.
-  if (FLAG_trace_hydrogen &&
-      OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) {
+  if (FLAG_trace_hydrogen) {
     if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_);
     if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_);
     if (allocator_ != NULL) {
@@ -8170,7 +6819,7 @@
   }
 
 #ifdef DEBUG
-  if (graph_ != NULL) graph_->Verify(false);  // No full verify.
+  if (graph_ != NULL) graph_->Verify();
   if (allocator_ != NULL) allocator_->Verify();
 #endif
 }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index e2779bb..03fbc73 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -121,12 +121,11 @@
 
   void Finish(HControlInstruction* last);
   void FinishExit(HControlInstruction* instruction);
-  void Goto(HBasicBlock* block, bool drop_extra = false);
+  void Goto(HBasicBlock* block);
 
   int PredecessorIndexOf(HBasicBlock* predecessor) const;
   void AddSimulate(int ast_id) { AddInstruction(CreateSimulate(ast_id)); }
   void AssignCommonDominator(HBasicBlock* other);
-  void AssignLoopSuccessorDominators();
 
   void FinishExitWithDeoptimization(HDeoptimize::UseEnvironment has_uses) {
     FinishExit(CreateDeoptimize(has_uses));
@@ -134,9 +133,7 @@
 
   // Add the inlined function exit sequence, adding an HLeaveInlined
   // instruction and updating the bailout environment.
-  void AddLeaveInlined(HValue* return_value,
-                       HBasicBlock* target,
-                       bool drop_extra = false);
+  void AddLeaveInlined(HValue* return_value, HBasicBlock* target);
 
   // If a target block is tagged as an inline function return, all
   // predecessors should contain the inlined exit sequence:
@@ -150,13 +147,6 @@
   bool IsDeoptimizing() const { return is_deoptimizing_; }
   void MarkAsDeoptimizing() { is_deoptimizing_ = true; }
 
-  bool IsLoopSuccessorDominator() const {
-    return dominates_loop_successors_;
-  }
-  void MarkAsLoopSuccessorDominator() {
-    dominates_loop_successors_ = true;
-  }
-
   inline Zone* zone();
 
 #ifdef DEBUG
@@ -190,22 +180,6 @@
   HBasicBlock* parent_loop_header_;
   bool is_inline_return_target_;
   bool is_deoptimizing_;
-  bool dominates_loop_successors_;
-};
-
-
-class HPredecessorIterator BASE_EMBEDDED {
- public:
-  explicit HPredecessorIterator(HBasicBlock* block)
-      : predecessor_list_(block->predecessors()), current_(0) { }
-
-  bool Done() { return current_ >= predecessor_list_->length(); }
-  HBasicBlock* Current() { return predecessor_list_->at(current_); }
-  void Advance() { current_++; }
-
- private:
-  const ZoneList<HBasicBlock*>* predecessor_list_;
-  int current_;
 };
 
 
@@ -269,13 +243,11 @@
 
   // Returns false if there are phi-uses of the arguments-object
   // which are not supported by the optimizing compiler.
-  bool CheckArgumentsPhiUses();
+  bool CheckPhis();
 
-  // Returns false if there are phi-uses of an uninitialized const
-  // which are not supported by the optimizing compiler.
-  bool CheckConstPhiUses();
-
-  void CollectPhis();
+  // Returns false if there are phi-uses of hole values comming
+  // from uninitialized consts.
+  bool CollectPhis();
 
   Handle<Code> Compile(CompilationInfo* info);
 
@@ -293,6 +265,7 @@
   HArgumentsObject* GetArgumentsObject() const {
     return arguments_object_.get();
   }
+  bool HasArgumentsObject() const { return arguments_object_.is_set(); }
 
   void SetArgumentsObject(HArgumentsObject* object) {
     arguments_object_.set(object);
@@ -310,29 +283,9 @@
   }
 
 #ifdef DEBUG
-  void Verify(bool do_full_verify) const;
+  void Verify() const;
 #endif
 
-  bool has_osr_loop_entry() {
-    return osr_loop_entry_.is_set();
-  }
-
-  HBasicBlock* osr_loop_entry() {
-    return osr_loop_entry_.get();
-  }
-
-  void set_osr_loop_entry(HBasicBlock* entry) {
-    osr_loop_entry_.set(entry);
-  }
-
-  ZoneList<HUnknownOSRValue*>* osr_values() {
-    return osr_values_.get();
-  }
-
-  void set_osr_values(ZoneList<HUnknownOSRValue*>* values) {
-    osr_values_.set(values);
-  }
-
  private:
   void Postorder(HBasicBlock* block,
                  BitVector* visited,
@@ -373,9 +326,6 @@
   SetOncePointer<HConstant> constant_hole_;
   SetOncePointer<HArgumentsObject> arguments_object_;
 
-  SetOncePointer<HBasicBlock> osr_loop_entry_;
-  SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
-
   DISALLOW_COPY_AND_ASSIGN(HGraph);
 };
 
@@ -383,34 +333,18 @@
 Zone* HBasicBlock::zone() { return graph_->zone(); }
 
 
-// Type of stack frame an environment might refer to.
-enum FrameType { JS_FUNCTION, JS_CONSTRUCT, ARGUMENTS_ADAPTOR };
-
-
 class HEnvironment: public ZoneObject {
  public:
   HEnvironment(HEnvironment* outer,
                Scope* scope,
                Handle<JSFunction> closure);
 
-  HEnvironment* DiscardInlined(bool drop_extra) {
-    HEnvironment* outer = outer_;
-    while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
-    if (drop_extra) outer->Drop(1);
-    return outer;
-  }
-
-  HEnvironment* arguments_environment() {
-    return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
-  }
-
   // Simple accessors.
   Handle<JSFunction> closure() const { return closure_; }
   const ZoneList<HValue*>* values() const { return &values_; }
   const ZoneList<int>* assigned_variables() const {
     return &assigned_variables_;
   }
-  FrameType frame_type() const { return frame_type_; }
   int parameter_count() const { return parameter_count_; }
   int specials_count() const { return specials_count_; }
   int local_count() const { return local_count_; }
@@ -426,10 +360,6 @@
     return i >= parameter_count() && i < parameter_count() + specials_count();
   }
 
-  int first_expression_index() const {
-    return parameter_count() + specials_count() + local_count();
-  }
-
   void Bind(Variable* variable, HValue* value) {
     Bind(IndexFor(variable), value);
   }
@@ -493,11 +423,9 @@
   // environment is the outer environment but the top expression stack
   // elements are moved to an inner environment as parameters.
   HEnvironment* CopyForInlining(Handle<JSFunction> target,
-                                int arguments,
                                 FunctionLiteral* function,
                                 HConstant* undefined,
-                                CallKind call_kind,
-                                bool is_construct) const;
+                                CallKind call_kind) const;
 
   void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
 
@@ -518,18 +446,6 @@
  private:
   explicit HEnvironment(const HEnvironment* other);
 
-  HEnvironment(HEnvironment* outer,
-               Handle<JSFunction> closure,
-               FrameType frame_type,
-               int arguments);
-
-  // Create an artificial stub environment (e.g. for argument adaptor or
-  // constructor stub).
-  HEnvironment* CreateStubEnvironment(HEnvironment* outer,
-                                      Handle<JSFunction> target,
-                                      FrameType frame_type,
-                                      int arguments) const;
-
   // True if index is included in the expression stack part of the environment.
   bool HasExpressionAt(int index) const;
 
@@ -551,7 +467,6 @@
   // Value array [parameters] [specials] [locals] [temporaries].
   ZoneList<HValue*> values_;
   ZoneList<int> assigned_variables_;
-  FrameType frame_type_;
   int parameter_count_;
   int specials_count_;
   int local_count_;
@@ -686,26 +601,16 @@
 };
 
 
-enum ReturnHandlingFlag {
-  NORMAL_RETURN,
-  DROP_EXTRA_ON_RETURN,
-  CONSTRUCT_CALL_RETURN
-};
-
-
-class FunctionState {
+class FunctionState BASE_EMBEDDED {
  public:
   FunctionState(HGraphBuilder* owner,
                 CompilationInfo* info,
-                TypeFeedbackOracle* oracle,
-                ReturnHandlingFlag return_handling);
+                TypeFeedbackOracle* oracle);
   ~FunctionState();
 
   CompilationInfo* compilation_info() { return compilation_info_; }
   TypeFeedbackOracle* oracle() { return oracle_; }
   AstContext* call_context() { return call_context_; }
-  bool drop_extra() { return return_handling_ == DROP_EXTRA_ON_RETURN; }
-  bool is_construct() { return return_handling_ == CONSTRUCT_CALL_RETURN; }
   HBasicBlock* function_return() { return function_return_; }
   TestContext* test_context() { return test_context_; }
   void ClearInlinedTestContext() {
@@ -725,13 +630,7 @@
   // inlined. NULL when not inlining.
   AstContext* call_context_;
 
-  // Indicate whether we have to perform special handling on return from
-  // inlined functions.
-  // - DROP_EXTRA_ON_RETURN: Drop an extra value from the environment.
-  // - CONSTRUCT_CALL_RETURN: Either use allocated receiver or return value.
-  ReturnHandlingFlag return_handling_;
-
-  // When inlining in an effect or value context, this is the return block.
+  // When inlining in an effect of value context, this is the return block.
   // It is NULL otherwise.  When inlining in a test context, there are a
   // pair of return blocks in the context.  When not inlining, there is no
   // local return point.
@@ -748,19 +647,14 @@
 class HGraphBuilder: public AstVisitor {
  public:
   enum BreakType { BREAK, CONTINUE };
-  enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
 
   // A class encapsulating (lazily-allocated) break and continue blocks for
   // a breakable statement.  Separated from BreakAndContinueScope so that it
   // can have a separate lifetime.
   class BreakAndContinueInfo BASE_EMBEDDED {
    public:
-    explicit BreakAndContinueInfo(BreakableStatement* target,
-                                  int drop_extra = 0)
-        : target_(target),
-          break_block_(NULL),
-          continue_block_(NULL),
-          drop_extra_(drop_extra) {
+    explicit BreakAndContinueInfo(BreakableStatement* target)
+      : target_(target), break_block_(NULL), continue_block_(NULL) {
     }
 
     BreakableStatement* target() { return target_; }
@@ -768,13 +662,11 @@
     void set_break_block(HBasicBlock* block) { break_block_ = block; }
     HBasicBlock* continue_block() { return continue_block_; }
     void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
-    int drop_extra() { return drop_extra_; }
 
    private:
     BreakableStatement* target_;
     HBasicBlock* break_block_;
     HBasicBlock* continue_block_;
-    int drop_extra_;
   };
 
   // A helper class to maintain a stack of current BreakAndContinueInfo
@@ -793,7 +685,7 @@
     BreakAndContinueScope* next() { return next_; }
 
     // Search the break stack for a break or continue target.
-    HBasicBlock* Get(BreakableStatement* stmt, BreakType type, int* drop_extra);
+    HBasicBlock* Get(BreakableStatement* stmt, BreakType type);
 
    private:
     BreakAndContinueInfo* info_;
@@ -834,10 +726,6 @@
 
   TypeFeedbackOracle* oracle() const { return function_state()->oracle(); }
 
-  FunctionState* function_state() const { return function_state_; }
-
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
-
  private:
   // Type of a member function that generates inline code for a native function.
   typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
@@ -855,13 +743,8 @@
   static const int kMaxInlinedSize = 196;
   static const int kMaxSourceSize = 600;
 
-  // Even in the 'unlimited' case we have to have some limit in order not to
-  // overflow the stack.
-  static const int kUnlimitedMaxInlinedNodes = 1000;
-  static const int kUnlimitedMaxInlinedSize = 1000;
-  static const int kUnlimitedMaxSourceSize = 600;
-
   // Simple accessors.
+  FunctionState* function_state() const { return function_state_; }
   void set_function_state(FunctionState* state) { function_state_ = state; }
 
   AstContext* ast_context() const { return ast_context_; }
@@ -871,6 +754,7 @@
   CompilationInfo* info() const {
     return function_state()->compilation_info();
   }
+
   AstContext* call_context() const {
     return function_state()->call_context();
   }
@@ -883,9 +767,8 @@
   void ClearInlinedTestContext() {
     function_state()->ClearInlinedTestContext();
   }
-  StrictModeFlag function_strict_mode_flag() {
-    return function_state()->compilation_info()->is_classic_mode()
-        ? kNonStrictMode : kStrictMode;
+  bool function_strict_mode() {
+    return function_state()->compilation_info()->is_strict_mode();
   }
 
   // Generators for inline runtime functions.
@@ -897,9 +780,8 @@
 #undef INLINE_FUNCTION_GENERATOR_DECLARATION
 
   void HandleDeclaration(VariableProxy* proxy,
-                         VariableMode mode,
-                         FunctionLiteral* function,
-                         int* global_count);
+                         Variable::Mode mode,
+                         FunctionLiteral* function);
 
   void VisitDelete(UnaryOperation* expr);
   void VisitVoid(UnaryOperation* expr);
@@ -913,7 +795,7 @@
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
 
-  bool PreProcessOsrEntry(IterationStatement* statement);
+  void PreProcessOsrEntry(IterationStatement* statement);
   // True iff. we are compiling for OSR and the statement is the entry.
   bool HasOsrEntryAt(IterationStatement* statement);
   void VisitLoopBody(IterationStatement* stmt,
@@ -967,7 +849,7 @@
 
   // Remove the arguments from the bailout environment and emit instructions
   // to push them as outgoing parameters.
-  template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
+  template <int V> HInstruction* PreProcessCall(HCall<V>* call);
 
   void TraceRepresentation(Token::Value op,
                            TypeInfo info,
@@ -975,7 +857,7 @@
                            Representation rep);
   static Representation ToRepresentation(TypeInfo info);
 
-  void SetUpScope(Scope* scope);
+  void SetupScope(Scope* scope);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
 
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
@@ -999,21 +881,11 @@
   // Try to optimize fun.apply(receiver, arguments) pattern.
   bool TryCallApply(Call* expr);
 
-  bool TryInline(CallKind call_kind,
-                 Handle<JSFunction> target,
-                 ZoneList<Expression*>* arguments,
-                 HValue* receiver,
-                 int ast_id,
-                 int return_id,
-                 ReturnHandlingFlag return_handling);
-
-  bool TryInlineCall(Call* expr, bool drop_extra = false);
-  bool TryInlineConstruct(CallNew* expr, HValue* receiver);
-  bool TryInlineBuiltinMethodCall(Call* expr,
-                                  HValue* receiver,
-                                  Handle<Map> receiver_map,
-                                  CheckType check_type);
-  bool TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra);
+  bool TryInline(Call* expr);
+  bool TryInlineBuiltinFunction(Call* expr,
+                                HValue* receiver,
+                                Handle<Map> receiver_map,
+                                CheckType check_type);
 
   // If --trace-inlining, print a line of the inlining trace.  Inlining
   // succeeded if the reason string is NULL and failed if there is a
@@ -1038,12 +910,11 @@
                                   HValue* receiver,
                                   SmallMapList* types,
                                   Handle<String> name);
-  void HandleLiteralCompareTypeof(CompareOperation* expr,
-                                  HTypeof* typeof_expr,
+  void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
+                                  Expression* expr,
                                   Handle<String> check);
-  void HandleLiteralCompareNil(CompareOperation* expr,
-                               HValue* value,
-                               NilValue nil);
+  void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
+                                     Expression* expr);
 
   HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
                                            HValue* string,
@@ -1067,16 +938,11 @@
       HValue* val,
       ElementsKind elements_kind,
       bool is_store);
-  HInstruction* BuildFastElementAccess(HValue* elements,
-                                       HValue* checked_key,
-                                       HValue* val,
-                                       ElementsKind elements_kind,
-                                       bool is_store);
 
   HInstruction* BuildMonomorphicElementAccess(HValue* object,
                                               HValue* key,
                                               HValue* val,
-                                              Handle<Map> map,
+                                              Expression* expr,
                                               bool is_store);
   HValue* HandlePolymorphicElementAccess(HValue* object,
                                          HValue* key,
@@ -1103,9 +969,6 @@
   HInstruction* BuildStoreNamed(HValue* object,
                                 HValue* value,
                                 Expression* expr);
-  HInstruction* BuildStoreNamed(HValue* object,
-                                HValue* value,
-                                ObjectLiteral::Property* prop);
   HInstruction* BuildStoreNamedField(HValue* object,
                                      Handle<String> name,
                                      HValue* value,
@@ -1174,10 +1037,10 @@
     Resize(kInitialSize);
   }
 
-  void Kill(GVNFlagSet flags);
+  void Kill(int flags);
 
   void Add(HValue* value) {
-    present_flags_.Add(value->gvn_flags());
+    present_flags_ |= value->flags();
     Insert(value);
   }
 
@@ -1210,8 +1073,7 @@
   int array_size_;
   int lists_size_;
   int count_;  // The number of values stored in the HValueMap.
-  GVNFlagSet present_flags_;  // All flags that are in any value in the
-                              // HValueMap.
+  int present_flags_;  // All flags that are in any value in the HValueMap.
   HValueMapListElement* array_;  // Primary store - contains the first value
   // with a given hash.  Colliding elements are stored in linked lists.
   HValueMapListElement* lists_;  // The linked lists containing hash collisions.
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 3cf0d00..0ca2d6b 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -30,15 +30,13 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // A light-weight IA32 Assembler.
 
 #ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
 #define V8_IA32_ASSEMBLER_IA32_INL_H_
 
-#include "ia32/assembler-ia32.h"
-
 #include "cpu.h"
 #include "debug.h"
 
@@ -80,26 +78,19 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
-                              || rmode_ == EMBEDDED_OBJECT
-                              || rmode_ == EXTERNAL_REFERENCE);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   return reinterpret_cast<Address>(pc_);
 }
 
 
 int RelocInfo::target_address_size() {
-  return Assembler::kSpecialTargetSize;
+  return Assembler::kExternalTargetSize;
 }
 
 
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
-  Assembler::set_target_address_at(pc_, target);
+void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-        host(), this, HeapObject::cast(target_code));
-  }
+  Assembler::set_target_address_at(pc_, target);
 }
 
 
@@ -121,16 +112,10 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
-  if (mode == UPDATE_WRITE_BARRIER &&
-      host() != NULL &&
-      target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
-  }
 }
 
 
@@ -157,18 +142,11 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
-                                WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
-  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
-  }
 }
 
 
@@ -183,11 +161,6 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Assembler::set_target_address_at(pc_ + 1, target);
-  if (host() != NULL) {
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-        host(), this, HeapObject::cast(target_code));
-  }
 }
 
 
@@ -221,14 +194,14 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitEmbeddedPointer(this);
+    visitor->VisitPointer(target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(this);
+    visitor->VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
@@ -249,14 +222,14 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitEmbeddedPointer(heap, this);
+    StaticVisitor::VisitPointer(heap, target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(this);
+    StaticVisitor::VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index a42f632..9996474 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -32,7 +32,7 @@
 
 // The original source code covered by the above license above has been modified
 // significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -55,8 +55,6 @@
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
   ASSERT(!initialized_);
   ASSERT(supported_ == 0);
@@ -88,23 +86,23 @@
   __ pushfd();
   __ push(ecx);
   __ push(ebx);
-  __ mov(ebp, esp);
+  __ mov(ebp, Operand(esp));
 
   // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
   __ pushfd();
   __ pop(eax);
-  __ mov(edx, eax);
+  __ mov(edx, Operand(eax));
   __ xor_(eax, 0x200000);  // Flip bit 21.
   __ push(eax);
   __ popfd();
   __ pushfd();
   __ pop(eax);
-  __ xor_(eax, edx);  // Different if CPUID is supported.
+  __ xor_(eax, Operand(edx));  // Different if CPUID is supported.
   __ j(not_zero, &cpuid);
 
   // CPUID not supported. Clear the supported features in edx:eax.
-  __ xor_(eax, eax);
-  __ xor_(edx, edx);
+  __ xor_(eax, Operand(eax));
+  __ xor_(edx, Operand(edx));
   __ jmp(&done);
 
   // Invoke CPUID with 1 in eax to get feature information in
@@ -120,13 +118,13 @@
 
   // Move the result from ecx:edx to edx:eax and make sure to mark the
   // CPUID feature as supported.
-  __ mov(eax, edx);
+  __ mov(eax, Operand(edx));
   __ or_(eax, 1 << CPUID);
-  __ mov(edx, ecx);
+  __ mov(edx, Operand(ecx));
 
   // Done.
   __ bind(&done);
-  __ mov(esp, ebp);
+  __ mov(esp, Operand(ebp));
   __ pop(ebx);
   __ pop(ecx);
   __ popfd();
@@ -288,18 +286,6 @@
       && ((buf_[0] & 0x07) == reg.code());  // register codes match.
 }
 
-
-bool Operand::is_reg_only() const {
-  return (buf_[0] & 0xF8) == 0xC0;  // Addressing mode is register only.
-}
-
-
-Register Operand::reg() const {
-  ASSERT(is_reg_only());
-  return Register::from_code(buf_[0] & 0x07);
-}
-
-
 // -----------------------------------------------------------------------------
 // Implementation of Assembler.
 
@@ -350,7 +336,7 @@
   }
 #endif
 
-  // Set up buffer pointers.
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -377,7 +363,7 @@
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
   ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
-  // Set up code descriptor.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -388,91 +374,8 @@
 
 void Assembler::Align(int m) {
   ASSERT(IsPowerOf2(m));
-  int mask = m - 1;
-  int addr = pc_offset();
-  Nop((m - (addr & mask)) & mask);
-}
-
-
-bool Assembler::IsNop(Address addr) {
-  Address a = addr;
-  while (*a == 0x66) a++;
-  if (*a == 0x90) return true;
-  if (a[0] == 0xf && a[1] == 0x1f) return true;
-  return false;
-}
-
-
-void Assembler::Nop(int bytes) {
-  EnsureSpace ensure_space(this);
-
-  if (!CpuFeatures::IsSupported(SSE2)) {
-    // Older CPUs that do not support SSE2 may not support multibyte NOP
-    // instructions.
-    for (; bytes > 0; bytes--) {
-      EMIT(0x90);
-    }
-    return;
-  }
-
-  // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
-  while (bytes > 0) {
-    switch (bytes) {
-      case 2:
-        EMIT(0x66);
-      case 1:
-        EMIT(0x90);
-        return;
-      case 3:
-        EMIT(0xf);
-        EMIT(0x1f);
-        EMIT(0);
-        return;
-      case 4:
-        EMIT(0xf);
-        EMIT(0x1f);
-        EMIT(0x40);
-        EMIT(0);
-        return;
-      case 6:
-        EMIT(0x66);
-      case 5:
-        EMIT(0xf);
-        EMIT(0x1f);
-        EMIT(0x44);
-        EMIT(0);
-        EMIT(0);
-        return;
-      case 7:
-        EMIT(0xf);
-        EMIT(0x1f);
-        EMIT(0x80);
-        EMIT(0);
-        EMIT(0);
-        EMIT(0);
-        EMIT(0);
-        return;
-      default:
-      case 11:
-        EMIT(0x66);
-        bytes--;
-      case 10:
-        EMIT(0x66);
-        bytes--;
-      case 9:
-        EMIT(0x66);
-        bytes--;
-      case 8:
-        EMIT(0xf);
-        EMIT(0x1f);
-        EMIT(0x84);
-        EMIT(0);
-        EMIT(0);
-        EMIT(0);
-        EMIT(0);
-        EMIT(0);
-        bytes -= 8;
-    }
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
   }
 }
 
@@ -546,6 +449,13 @@
 }
 
 
+void Assembler::push(Handle<Object> handle) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x68);
+  emit(handle);
+}
+
+
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
@@ -575,7 +485,7 @@
 
 
 void Assembler::mov_b(Register dst, const Operand& src) {
-  CHECK(dst.is_byte_register());
+  ASSERT(dst.code() < 4);
   EnsureSpace ensure_space(this);
   EMIT(0x8A);
   emit_operand(dst, src);
@@ -591,7 +501,7 @@
 
 
 void Assembler::mov_b(const Operand& dst, Register src) {
-  CHECK(src.is_byte_register());
+  ASSERT(src.code() < 4);
   EnsureSpace ensure_space(this);
   EMIT(0x88);
   emit_operand(src, dst);
@@ -704,6 +614,26 @@
 }
 
 
+void Assembler::cmov(Condition cc, Register dst, int32_t imm32) {
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
+  EnsureSpace ensure_space(this);
+  UNIMPLEMENTED();
+  USE(cc);
+  USE(dst);
+  USE(imm32);
+}
+
+
+void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) {
+  ASSERT(CpuFeatures::IsEnabled(CMOV));
+  EnsureSpace ensure_space(this);
+  UNIMPLEMENTED();
+  USE(cc);
+  USE(dst);
+  USE(handle);
+}
+
+
 void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
   ASSERT(CpuFeatures::IsEnabled(CMOV));
   EnsureSpace ensure_space(this);
@@ -771,13 +701,6 @@
 }
 
 
-void Assembler::add(const Operand& dst, Register src) {
-  EnsureSpace ensure_space(this);
-  EMIT(0x01);
-  emit_operand(src, dst);
-}
-
-
 void Assembler::add(const Operand& dst, const Immediate& x) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
@@ -818,29 +741,25 @@
 
 void Assembler::cmpb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  if (op.is_reg(eax)) {
-    EMIT(0x3C);
-  } else {
-    EMIT(0x80);
-    emit_operand(edi, op);  // edi == 7
-  }
+  EMIT(0x80);
+  emit_operand(edi, op);  // edi == 7
   EMIT(imm8);
 }
 
 
-void Assembler::cmpb(const Operand& op, Register reg) {
-  CHECK(reg.is_byte_register());
+void Assembler::cmpb(const Operand& dst, Register src) {
+  ASSERT(src.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x38);
-  emit_operand(reg, op);
+  emit_operand(src, dst);
 }
 
 
-void Assembler::cmpb(Register reg, const Operand& op) {
-  CHECK(reg.is_byte_register());
+void Assembler::cmpb(Register dst, const Operand& src) {
+  ASSERT(dst.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x3A);
-  emit_operand(reg, op);
+  emit_operand(dst, src);
 }
 
 
@@ -901,7 +820,6 @@
 
 
 void Assembler::dec_b(Register dst) {
-  CHECK(dst.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0xFE);
   EMIT(0xC8 | dst.code());
@@ -1151,6 +1069,18 @@
 }
 
 
+void Assembler::subb(const Operand& op, int8_t imm8) {
+  EnsureSpace ensure_space(this);
+  if (op.is_reg(eax)) {
+    EMIT(0x2c);
+  } else {
+    EMIT(0x80);
+    emit_operand(ebp, op);  // ebp == 5
+  }
+  EMIT(imm8);
+}
+
+
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
   emit_arith(5, dst, x);
@@ -1164,6 +1094,14 @@
 }
 
 
+void Assembler::subb(Register dst, const Operand& src) {
+  ASSERT(dst.code() < 4);
+  EnsureSpace ensure_space(this);
+  EMIT(0x2A);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::sub(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x29);
@@ -1175,9 +1113,7 @@
   EnsureSpace ensure_space(this);
   // Only use test against byte for registers that have a byte
   // variant: eax, ebx, ecx, and edx.
-  if (imm.rmode_ == RelocInfo::NONE &&
-      is_uint8(imm.x_) &&
-      reg.is_byte_register()) {
+  if (imm.rmode_ == RelocInfo::NONE && is_uint8(imm.x_) && reg.code() < 4) {
     uint8_t imm8 = imm.x_;
     if (reg.is(eax)) {
       EMIT(0xA8);
@@ -1207,7 +1143,6 @@
 
 
 void Assembler::test_b(Register reg, const Operand& op) {
-  CHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x84);
   emit_operand(reg, op);
@@ -1223,10 +1158,6 @@
 
 
 void Assembler::test_b(const Operand& op, uint8_t imm8) {
-  if (op.is_reg_only() && !op.reg().is_byte_register()) {
-    test(op, Immediate(imm8));
-    return;
-  }
   EnsureSpace ensure_space(this);
   EMIT(0xF6);
   emit_operand(eax, op);
@@ -1247,10 +1178,10 @@
 }
 
 
-void Assembler::xor_(const Operand& dst, Register src) {
+void Assembler::xor_(const Operand& src, Register dst) {
   EnsureSpace ensure_space(this);
   EMIT(0x31);
-  emit_operand(src, dst);
+  emit_operand(dst, src);
 }
 
 
@@ -1706,13 +1637,6 @@
 }
 
 
-void Assembler::fptan() {
-  EnsureSpace ensure_space(this);
-  EMIT(0xD9);
-  EMIT(0xF2);
-}
-
-
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
   EMIT(0xD9);
@@ -1720,27 +1644,6 @@
 }
 
 
-void Assembler::f2xm1() {
-  EnsureSpace ensure_space(this);
-  EMIT(0xD9);
-  EMIT(0xF0);
-}
-
-
-void Assembler::fscale() {
-  EnsureSpace ensure_space(this);
-  EMIT(0xD9);
-  EMIT(0xFD);
-}
-
-
-void Assembler::fninit() {
-  EnsureSpace ensure_space(this);
-  EMIT(0xDB);
-  EMIT(0xE3);
-}
-
-
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xC0, i);
@@ -2054,16 +1957,6 @@
 }
 
 
-void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
-  ASSERT(CpuFeatures::IsEnabled(SSE2));
-  EnsureSpace ensure_space(this);
-  EMIT(0x66);
-  EMIT(0x0F);
-  EMIT(0x2E);
-  emit_sse_operand(dst, src);
-}
-
-
 void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
   ASSERT(CpuFeatures::IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
@@ -2269,19 +2162,6 @@
 }
 
 
-void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
-  ASSERT(CpuFeatures::IsSupported(SSE4_1));
-  ASSERT(is_uint8(imm8));
-  EnsureSpace ensure_space(this);
-  EMIT(0x66);
-  EMIT(0x0F);
-  EMIT(0x3A);
-  EMIT(0x17);
-  emit_sse_operand(dst, src);
-  EMIT(imm8);
-}
-
-
 void Assembler::pand(XMMRegister dst, XMMRegister src) {
   ASSERT(CpuFeatures::IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2461,7 +2341,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // Set up new buffer.
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
@@ -2591,7 +2471,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data, NULL);
+  RelocInfo rinfo(pc_, rmode, data);
   reloc_info_writer.Write(&rinfo);
 }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 929b485..4698e3e 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -75,8 +75,6 @@
   static inline Register FromAllocationIndex(int index);
 
   static Register from_code(int code) {
-    ASSERT(code >= 0);
-    ASSERT(code < kNumRegisters);
     Register r = { code };
     return r;
   }
@@ -97,25 +95,16 @@
   int code_;
 };
 
-const int kRegister_eax_Code = 0;
-const int kRegister_ecx_Code = 1;
-const int kRegister_edx_Code = 2;
-const int kRegister_ebx_Code = 3;
-const int kRegister_esp_Code = 4;
-const int kRegister_ebp_Code = 5;
-const int kRegister_esi_Code = 6;
-const int kRegister_edi_Code = 7;
-const int kRegister_no_reg_Code = -1;
 
-const Register eax = { kRegister_eax_Code };
-const Register ecx = { kRegister_ecx_Code };
-const Register edx = { kRegister_edx_Code };
-const Register ebx = { kRegister_ebx_Code };
-const Register esp = { kRegister_esp_Code };
-const Register ebp = { kRegister_ebp_Code };
-const Register esi = { kRegister_esi_Code };
-const Register edi = { kRegister_edi_Code };
-const Register no_reg = { kRegister_no_reg_Code };
+const Register eax = { 0 };
+const Register ecx = { 1 };
+const Register edx = { 2 };
+const Register ebx = { 3 };
+const Register esp = { 4 };
+const Register ebp = { 5 };
+const Register esi = { 6 };
+const Register edi = { 7 };
+const Register no_reg = { -1 };
 
 
 inline const char* Register::AllocationIndexToString(int index) {
@@ -311,6 +300,9 @@
 
 class Operand BASE_EMBEDDED {
  public:
+  // reg
+  INLINE(explicit Operand(Register reg));
+
   // XMM reg
   INLINE(explicit Operand(XMMRegister xmm_reg));
 
@@ -355,16 +347,12 @@
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
-  // Returns true if this Operand is a wrapper for one register.
-  bool is_reg_only() const;
-
-  // Asserts that this Operand is a wrapper for one register and returns the
-  // register.
-  Register reg() const;
-
  private:
-  // reg
-  INLINE(explicit Operand(Register reg));
+  byte buf_[6];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  // Only valid if len_ > 4.
+  RelocInfo::Mode rmode_;
 
   // Set the ModRM byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
@@ -374,15 +362,7 @@
   inline void set_disp8(int8_t disp);
   inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
 
-  byte buf_[6];
-  // The number of bytes in buf_.
-  unsigned int len_;
-  // Only valid if len_ > 4.
-  RelocInfo::Mode rmode_;
-
   friend class Assembler;
-  friend class MacroAssembler;
-  friend class LCodeGen;
 };
 
 
@@ -598,8 +578,8 @@
 
   // This sets the branch destination (which is in the instruction on x86).
   // This is for calls and branches within generated code.
-  inline static void deserialization_set_special_target_at(
-      Address instruction_payload, Address target) {
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
     set_target_address_at(instruction_payload, target);
   }
 
@@ -610,7 +590,8 @@
     set_target_address_at(instruction_payload, target);
   }
 
-  static const int kSpecialTargetSize = kPointerSize;
+  static const int kCallTargetSize = kPointerSize;
+  static const int kExternalTargetSize = kPointerSize;
 
   // Distance between the address of the code target in the call instruction
   // and the return address
@@ -629,6 +610,8 @@
   // The debug break slot must be able to contain a call instruction.
   static const int kDebugBreakSlotLength = kCallInstructionLength;
 
+  // One byte opcode for test eax,0xXXXXXXXX.
+  static const byte kTestEaxByte = 0xA9;
   // One byte opcode for test al, 0xXX.
   static const byte kTestAlByte = 0xA8;
   // One byte opcode for nop.
@@ -665,7 +648,6 @@
   // possible to align the pc offset to a multiple
   // of m. m must be a power of 2.
   void Align(int m);
-  void Nop(int bytes = 1);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
 
@@ -680,6 +662,7 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
+  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -688,9 +671,7 @@
   void leave();
 
   // Moves
-  void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
   void mov_b(Register dst, const Operand& src);
-  void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
   void mov_b(const Operand& dst, int8_t imm8);
   void mov_b(const Operand& dst, Register src);
 
@@ -706,22 +687,17 @@
   void mov(const Operand& dst, Handle<Object> handle);
   void mov(const Operand& dst, Register src);
 
-  void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
   void movsx_b(Register dst, const Operand& src);
 
-  void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
   void movsx_w(Register dst, const Operand& src);
 
-  void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
   void movzx_b(Register dst, const Operand& src);
 
-  void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
   void movzx_w(Register dst, const Operand& src);
 
   // Conditional moves
-  void cmov(Condition cc, Register dst, Register src) {
-    cmov(cc, dst, Operand(src));
-  }
+  void cmov(Condition cc, Register dst, int32_t imm32);
+  void cmov(Condition cc, Register dst, Handle<Object> handle);
   void cmov(Condition cc, Register dst, const Operand& src);
 
   // Flag management.
@@ -739,31 +715,24 @@
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
 
-  void add(Register dst, Register src) { add(dst, Operand(src)); }
   void add(Register dst, const Operand& src);
-  void add(const Operand& dst, Register src);
-  void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
   void add(const Operand& dst, const Immediate& x);
 
   void and_(Register dst, int32_t imm32);
   void and_(Register dst, const Immediate& x);
-  void and_(Register dst, Register src) { and_(dst, Operand(src)); }
   void and_(Register dst, const Operand& src);
-  void and_(const Operand& dst, Register src);
+  void and_(const Operand& src, Register dst);
   void and_(const Operand& dst, const Immediate& x);
 
-  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
   void cmpb(const Operand& op, int8_t imm8);
-  void cmpb(Register reg, const Operand& op);
-  void cmpb(const Operand& op, Register reg);
+  void cmpb(Register src, const Operand& dst);
+  void cmpb(const Operand& dst, Register src);
   void cmpb_al(const Operand& op);
   void cmpw_ax(const Operand& op);
   void cmpw(const Operand& op, Immediate imm16);
   void cmp(Register reg, int32_t imm32);
   void cmp(Register reg, Handle<Object> handle);
-  void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
   void cmp(Register reg, const Operand& op);
-  void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
   void cmp(const Operand& op, const Immediate& imm);
   void cmp(const Operand& op, Handle<Object> handle);
 
@@ -779,7 +748,6 @@
 
   // Signed multiply instructions.
   void imul(Register src);                               // edx:eax = eax * src.
-  void imul(Register dst, Register src) { imul(dst, Operand(src)); }
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
 
@@ -796,10 +764,8 @@
   void not_(Register dst);
 
   void or_(Register dst, int32_t imm32);
-  void or_(Register dst, Register src) { or_(dst, Operand(src)); }
   void or_(Register dst, const Operand& src);
   void or_(const Operand& dst, Register src);
-  void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
   void or_(const Operand& dst, const Immediate& x);
 
   void rcl(Register dst, uint8_t imm8);
@@ -810,42 +776,35 @@
 
   void sbb(Register dst, const Operand& src);
 
-  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
   void shld(Register dst, const Operand& src);
 
   void shl(Register dst, uint8_t imm8);
   void shl_cl(Register dst);
 
-  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
   void shrd(Register dst, const Operand& src);
 
   void shr(Register dst, uint8_t imm8);
   void shr_cl(Register dst);
 
-  void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
+  void subb(const Operand& dst, int8_t imm8);
+  void subb(Register dst, const Operand& src);
   void sub(const Operand& dst, const Immediate& x);
-  void sub(Register dst, Register src) { sub(dst, Operand(src)); }
   void sub(Register dst, const Operand& src);
   void sub(const Operand& dst, Register src);
 
   void test(Register reg, const Immediate& imm);
-  void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
   void test(Register reg, const Operand& op);
   void test_b(Register reg, const Operand& op);
   void test(const Operand& op, const Immediate& imm);
-  void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
   void test_b(const Operand& op, uint8_t imm8);
 
   void xor_(Register dst, int32_t imm32);
-  void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
   void xor_(Register dst, const Operand& src);
-  void xor_(const Operand& dst, Register src);
-  void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
+  void xor_(const Operand& src, Register dst);
   void xor_(const Operand& dst, const Immediate& x);
 
   // Bit operations.
   void bt(const Operand& dst, Register src);
-  void bts(Register dst, Register src) { bts(Operand(dst), src); }
   void bts(const Operand& dst, Register src);
 
   // Miscellaneous
@@ -876,7 +835,6 @@
   void call(Label* L);
   void call(byte* entry, RelocInfo::Mode rmode);
   int CallSize(const Operand& adr);
-  void call(Register reg) { call(Operand(reg)); }
   void call(const Operand& adr);
   int CallSize(Handle<Code> code, RelocInfo::Mode mode);
   void call(Handle<Code> code,
@@ -887,7 +845,6 @@
   // unconditional jump to L
   void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(byte* entry, RelocInfo::Mode rmode);
-  void jmp(Register reg) { jmp(Operand(reg)); }
   void jmp(const Operand& adr);
   void jmp(Handle<Code> code, RelocInfo::Mode rmode);
 
@@ -930,11 +887,7 @@
   void fchs();
   void fcos();
   void fsin();
-  void fptan();
   void fyl2x();
-  void f2xm1();
-  void fscale();
-  void fninit();
 
   void fadd(int i);
   void fsub(int i);
@@ -976,7 +929,6 @@
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
 
-  void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
   void cvtsi2sd(XMMRegister dst, const Operand& src);
   void cvtss2sd(XMMRegister dst, XMMRegister src);
   void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -992,7 +944,6 @@
   void andpd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
-  void ucomisd(XMMRegister dst, const Operand& src);
 
   enum RoundingMode {
     kRoundToNearest = 0x0,
@@ -1018,16 +969,13 @@
   void movdbl(XMMRegister dst, const Operand& src);
   void movdbl(const Operand& dst, XMMRegister src);
 
-  void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
   void movd(XMMRegister dst, const Operand& src);
-  void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
-  void movd(const Operand& dst, XMMRegister src);
+  void movd(const Operand& src, XMMRegister dst);
   void movsd(XMMRegister dst, XMMRegister src);
 
   void movss(XMMRegister dst, const Operand& src);
-  void movss(const Operand& dst, XMMRegister src);
+  void movss(const Operand& src, XMMRegister dst);
   void movss(XMMRegister dst, XMMRegister src);
-  void extractps(Register dst, XMMRegister src, byte imm8);
 
   void pand(XMMRegister dst, XMMRegister src);
   void pxor(XMMRegister dst, XMMRegister src);
@@ -1039,17 +987,11 @@
   void psrlq(XMMRegister reg, int8_t shift);
   void psrlq(XMMRegister dst, XMMRegister src);
   void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
-  void pextrd(Register dst, XMMRegister src, int8_t offset) {
-    pextrd(Operand(dst), src, offset);
-  }
   void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
-  void pinsrd(XMMRegister dst, Register src, int8_t offset) {
-    pinsrd(dst, Operand(src), offset);
-  }
   void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
 
   // Parallel XMM operations.
-  void movntdqa(XMMRegister dst, const Operand& src);
+  void movntdqa(XMMRegister src, const Operand& dst);
   void movntdq(const Operand& dst, XMMRegister src);
   // Prefetch src position into cache level.
   // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@@ -1091,7 +1033,7 @@
   // Get the number of bytes available in the buffer.
   inline int available_space() const { return reloc_info_writer.pos() - pc_; }
 
-  static bool IsNop(Address addr);
+  static bool IsNop(Address addr) { return *addr == 0x90; }
 
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
@@ -1103,9 +1045,6 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
@@ -1118,8 +1057,9 @@
 
   byte* addr_at(int pos) { return buffer_ + pos; }
 
-
  private:
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index a5d42cf..310ea3d 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -69,294 +69,309 @@
 
   // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
-  __ add(eax, Immediate(num_extra_args + 1));
+  __ add(Operand(eax), Immediate(num_extra_args + 1));
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
 
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax: number of arguments
+  //  -- edi: constructor function
+  // -----------------------------------
+
+  Label non_function_call;
+  // Check that function is not a smi.
+  __ JumpIfSmi(edi, &non_function_call);
+  // Check that function is a JSFunction.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &non_function_call);
+
+  // Jump to the function-specific construct stub.
+  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ jmp(Operand(ebx));
+
+  // edi: called object
+  // eax: number of arguments
+  __ bind(&non_function_call);
+  // Set expected number of arguments to zero (not changing eax).
+  __ Set(ebx, Immediate(0));
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  Handle<Code> arguments_adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+  __ SetCallKind(ecx, CALL_AS_METHOD);
+  __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
+}
+
+
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
                                            bool count_constructions) {
-  // ----------- S t a t e -------------
-  //  -- eax: number of arguments
-  //  -- edi: constructor function
-  // -----------------------------------
-
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
   // Enter a construct frame.
-  {
-    FrameScope scope(masm, StackFrame::CONSTRUCT);
+  __ EnterConstructFrame();
 
-    // Store a smi-tagged arguments count on the stack.
-    __ SmiTag(eax);
-    __ push(eax);
+  // Store a smi-tagged arguments count on the stack.
+  __ SmiTag(eax);
+  __ push(eax);
 
-    // Push the function to invoke on the stack.
-    __ push(edi);
+  // Push the function to invoke on the stack.
+  __ push(edi);
 
-    // Try to allocate the object without transitioning into C code. If any of
-    // the preconditions is not met, the code bails out to the runtime call.
-    Label rt_call, allocated;
-    if (FLAG_inline_new) {
-      Label undo_allocation;
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  Label rt_call, allocated;
+  if (FLAG_inline_new) {
+    Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-      ExternalReference debug_step_in_fp =
-          ExternalReference::debug_step_in_fp_address(masm->isolate());
-      __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
-      __ j(not_equal, &rt_call);
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address(masm->isolate());
+    __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+    __ j(not_equal, &rt_call);
 #endif
 
-      // Verified that the constructor is a JSFunction.
-      // Load the initial map and verify that it is in fact a map.
-      // edi: constructor
-      __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-      // Will both indicate a NULL and a Smi
-      __ JumpIfSmi(eax, &rt_call);
-      // edi: constructor
-      // eax: initial map (if proven valid below)
-      __ CmpObjectType(eax, MAP_TYPE, ebx);
-      __ j(not_equal, &rt_call);
+    // Verified that the constructor is a JSFunction.
+    // Load the initial map and verify that it is in fact a map.
+    // edi: constructor
+    __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi
+    __ JumpIfSmi(eax, &rt_call);
+    // edi: constructor
+    // eax: initial map (if proven valid below)
+    __ CmpObjectType(eax, MAP_TYPE, ebx);
+    __ j(not_equal, &rt_call);
 
-      // Check that the constructor is not constructing a JSFunction (see
-      // comments in Runtime_NewObject in runtime.cc). In which case the
-      // initial map's instance type would be JS_FUNCTION_TYPE.
-      // edi: constructor
-      // eax: initial map
-      __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-      __ j(equal, &rt_call);
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // edi: constructor
+    // eax: initial map
+    __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+    __ j(equal, &rt_call);
 
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+      __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
+      __ j(not_zero, &allocate);
+
+      __ push(eax);
+      __ push(edi);
+
+      __ push(edi);  // constructor
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(edi);
+      __ pop(eax);
+
+      __ bind(&allocate);
+    }
+
+    // Now allocate the JSObject on the heap.
+    // edi: constructor
+    // eax: initial map
+    __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+    __ shl(edi, kPointerSizeLog2);
+    __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+    // Allocated the JSObject, now initialize the fields.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+    Factory* factory = masm->isolate()->factory();
+    __ mov(ecx, factory->empty_fixed_array());
+    __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+    __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+    // Set extra fields in the newly allocated object.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    { Label loop, entry;
+      // To allow for truncation.
       if (count_constructions) {
-        Label allocate;
-        // Decrease generous allocation count.
-        __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-        __ dec_b(FieldOperand(ecx,
-                              SharedFunctionInfo::kConstructionCountOffset));
-        __ j(not_zero, &allocate);
-
-        __ push(eax);
-        __ push(edi);
-
-        __ push(edi);  // constructor
-        // The call will replace the stub, so the countdown is only done once.
-        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-        __ pop(edi);
-        __ pop(eax);
-
-        __ bind(&allocate);
-      }
-
-      // Now allocate the JSObject on the heap.
-      // edi: constructor
-      // eax: initial map
-      __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
-      __ shl(edi, kPointerSizeLog2);
-      __ AllocateInNewSpace(
-          edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-      // Allocated the JSObject, now initialize the fields.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object
-      __ mov(Operand(ebx, JSObject::kMapOffset), eax);
-      Factory* factory = masm->isolate()->factory();
-      __ mov(ecx, factory->empty_fixed_array());
-      __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
-      __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
-      // Set extra fields in the newly allocated object.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object
-      __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-      __ mov(edx, factory->undefined_value());
-      if (count_constructions) {
-        __ movzx_b(esi,
-                   FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
-        __ lea(esi,
-               Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
-        // esi: offset of first field after pre-allocated fields
-        if (FLAG_debug_code) {
-          __ cmp(esi, edi);
-          __ Assert(less_equal,
-                    "Unexpected number of pre-allocated property fields.");
-        }
-        __ InitializeFieldsWithFiller(ecx, esi, edx);
         __ mov(edx, factory->one_pointer_filler_map());
-      }
-      __ InitializeFieldsWithFiller(ecx, edi, edx);
-
-      // Add the object tag to make the JSObject real, so that we can continue
-      // and jump into the continuation code at any time from now on. Any
-      // failures need to undo the allocation, so that the heap is in a
-      // consistent state and verifiable.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object
-      __ or_(ebx, Immediate(kHeapObjectTag));
-
-      // Check if a non-empty properties array is needed.
-      // Allocate and initialize a FixedArray if it is.
-      // eax: initial map
-      // ebx: JSObject
-      // edi: start of next object
-      // Calculate the total number of properties described by the map.
-      __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
-      __ movzx_b(ecx,
-                 FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
-      __ add(edx, ecx);
-      // Calculate unused properties past the end of the in-object properties.
-      __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
-      __ sub(edx, ecx);
-      // Done if no extra properties are to be allocated.
-      __ j(zero, &allocated);
-      __ Assert(positive, "Property allocation count failed.");
-
-      // Scale the number of elements by pointer size and add the header for
-      // FixedArrays to the start of the next object calculation from above.
-      // ebx: JSObject
-      // edi: start of next object (will be start of FixedArray)
-      // edx: number of elements in properties array
-      __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                            times_pointer_size,
-                            edx,
-                            edi,
-                            ecx,
-                            no_reg,
-                            &undo_allocation,
-                            RESULT_CONTAINS_TOP);
-
-      // Initialize the FixedArray.
-      // ebx: JSObject
-      // edi: FixedArray
-      // edx: number of elements
-      // ecx: start of next object
-      __ mov(eax, factory->fixed_array_map());
-      __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
-      __ SmiTag(edx);
-      __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
-
-      // Initialize the fields to undefined.
-      // ebx: JSObject
-      // edi: FixedArray
-      // ecx: start of next object
-      { Label loop, entry;
+      } else {
         __ mov(edx, factory->undefined_value());
-        __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
-        __ jmp(&entry);
-        __ bind(&loop);
-        __ mov(Operand(eax, 0), edx);
-        __ add(eax, Immediate(kPointerSize));
-        __ bind(&entry);
-        __ cmp(eax, ecx);
-        __ j(below, &loop);
       }
-
-      // Store the initialized FixedArray into the properties field of
-      // the JSObject
-      // ebx: JSObject
-      // edi: FixedArray
-      __ or_(edi, Immediate(kHeapObjectTag));  // add the heap tag
-      __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
-
-
-      // Continue with JSObject being successfully allocated
-      // ebx: JSObject
-      __ jmp(&allocated);
-
-      // Undo the setting of the new top so that the heap is verifiable. For
-      // example, the map's unused properties potentially do not match the
-      // allocated objects unused properties.
-      // ebx: JSObject (previous new top)
-      __ bind(&undo_allocation);
-      __ UndoAllocationInNewSpace(ebx);
+      __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ mov(Operand(ecx, 0), edx);
+      __ add(Operand(ecx), Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmp(ecx, Operand(edi));
+      __ j(less, &loop);
     }
 
-    // Allocate the new receiver object using the runtime call.
-    __ bind(&rt_call);
-    // Must restore edi (constructor) before calling runtime.
-    __ mov(edi, Operand(esp, 0));
-    // edi: function (constructor)
-    __ push(edi);
-    __ CallRuntime(Runtime::kNewObject, 1);
-    __ mov(ebx, eax);  // store result in ebx
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    __ or_(Operand(ebx), Immediate(kHeapObjectTag));
 
-    // New object allocated.
-    // ebx: newly allocated object
-    __ bind(&allocated);
-    // Retrieve the function from the stack.
-    __ pop(edi);
+    // Check if a non-empty properties array is needed.
+    // Allocate and initialize a FixedArray if it is.
+    // eax: initial map
+    // ebx: JSObject
+    // edi: start of next object
+    // Calculate the total number of properties described by the map.
+    __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+    __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+    __ add(edx, Operand(ecx));
+    // Calculate unused properties past the end of the in-object properties.
+    __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+    __ sub(edx, Operand(ecx));
+    // Done if no extra properties are to be allocated.
+    __ j(zero, &allocated);
+    __ Assert(positive, "Property allocation count failed.");
 
-    // Retrieve smi-tagged arguments count from the stack.
-    __ mov(eax, Operand(esp, 0));
-    __ SmiUntag(eax);
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // ebx: JSObject
+    // edi: start of next object (will be start of FixedArray)
+    // edx: number of elements in properties array
+    __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          edx,
+                          edi,
+                          ecx,
+                          no_reg,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
 
-    // Push the allocated receiver to the stack. We need two copies
-    // because we may have to return the original one and the calling
-    // conventions dictate that the called function pops the receiver.
-    __ push(ebx);
-    __ push(ebx);
+    // Initialize the FixedArray.
+    // ebx: JSObject
+    // edi: FixedArray
+    // edx: number of elements
+    // ecx: start of next object
+    __ mov(eax, factory->fixed_array_map());
+    __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
+    __ SmiTag(edx);
+    __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
 
-    // Set up pointer to last argument.
-    __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
-    // Copy arguments and receiver to the expression stack.
-    Label loop, entry;
-    __ mov(ecx, eax);
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ push(Operand(ebx, ecx, times_4, 0));
-    __ bind(&entry);
-    __ dec(ecx);
-    __ j(greater_equal, &loop);
-
-    // Call the function.
-    if (is_api_function) {
-      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      ParameterCount expected(0);
-      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-    } else {
-      ParameterCount actual(eax);
-      __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
+    // Initialize the fields to undefined.
+    // ebx: JSObject
+    // edi: FixedArray
+    // ecx: start of next object
+    { Label loop, entry;
+      __ mov(edx, factory->undefined_value());
+      __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ mov(Operand(eax, 0), edx);
+      __ add(Operand(eax), Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmp(eax, Operand(ecx));
+      __ j(below, &loop);
     }
 
-    // Store offset of return address for deoptimizer.
-    if (!is_api_function && !count_constructions) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
-    }
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject
+    // ebx: JSObject
+    // edi: FixedArray
+    __ or_(Operand(edi), Immediate(kHeapObjectTag));  // add the heap tag
+    __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
 
-    // Restore context from the frame.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
 
-    // If the result is an object (in the ECMA sense), we should get rid
-    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-    // on page 74.
-    Label use_receiver, exit;
+    // Continue with JSObject being successfully allocated
+    // ebx: JSObject
+    __ jmp(&allocated);
 
-    // If the result is a smi, it is *not* an object in the ECMA sense.
-    __ JumpIfSmi(eax, &use_receiver);
-
-    // If the type of the result (stored in its map) is less than
-    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-    __ j(above_equal, &exit);
-
-    // Throw away the result of the constructor invocation and use the
-    // on-stack receiver as the result.
-    __ bind(&use_receiver);
-    __ mov(eax, Operand(esp, 0));
-
-    // Restore the arguments count and leave the construct frame.
-    __ bind(&exit);
-    __ mov(ebx, Operand(esp, kPointerSize));  // Get arguments count.
-
-    // Leave construct frame.
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // ebx: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(ebx);
   }
 
+  // Allocate the new receiver object using the runtime call.
+  __ bind(&rt_call);
+  // Must restore edi (constructor) before calling runtime.
+  __ mov(edi, Operand(esp, 0));
+  // edi: function (constructor)
+  __ push(edi);
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(ebx, Operand(eax));  // store result in ebx
+
+  // New object allocated.
+  // ebx: newly allocated object
+  __ bind(&allocated);
+  // Retrieve the function from the stack.
+  __ pop(edi);
+
+  // Retrieve smi-tagged arguments count from the stack.
+  __ mov(eax, Operand(esp, 0));
+  __ SmiUntag(eax);
+
+  // Push the allocated receiver to the stack. We need two copies
+  // because we may have to return the original one and the calling
+  // conventions dictate that the called function pops the receiver.
+  __ push(ebx);
+  __ push(ebx);
+
+  // Setup pointer to last argument.
+  __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+  // Copy arguments and receiver to the expression stack.
+  Label loop, entry;
+  __ mov(ecx, Operand(eax));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ push(Operand(ebx, ecx, times_4, 0));
+  __ bind(&entry);
+  __ dec(ecx);
+  __ j(greater_equal, &loop);
+
+  // Call the function.
+  if (is_api_function) {
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+    Handle<Code> code =
+        masm->isolate()->builtins()->HandleApiCallConstruct();
+    ParameterCount expected(0);
+    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  } else {
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+  }
+
+  // Restore context from the frame.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  __ JumpIfSmi(eax, &use_receiver);
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+  __ j(above_equal, &exit);
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ mov(eax, Operand(esp, 0));
+
+  // Restore the arguments count and leave the construct frame.
+  __ bind(&exit);
+  __ mov(ebx, Operand(esp, kPointerSize));  // get arguments count
+  __ LeaveConstructFrame();
+
   // Remove caller arguments from the stack and return.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
@@ -384,58 +399,57 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
-  // Clear the context before we push it when entering the internal frame.
+  // Clear the context before we push it when entering the JS frame.
   __ Set(esi, Immediate(0));
 
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
 
-    // Load the previous frame pointer (ebx) to access C arguments
-    __ mov(ebx, Operand(ebp, 0));
+  // Load the previous frame pointer (ebx) to access C arguments
+  __ mov(ebx, Operand(ebp, 0));
 
-    // Get the function from the frame and setup the context.
-    __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
-    __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+  // Get the function from the frame and setup the context.
+  __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+  __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
 
-    // Push the function and the receiver onto the stack.
-    __ push(ecx);
-    __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+  // Push the function and the receiver onto the stack.
+  __ push(ecx);
+  __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
 
-    // Load the number of arguments and setup pointer to the arguments.
-    __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
-    __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+  // Load the number of arguments and setup pointer to the arguments.
+  __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+  __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
 
-    // Copy arguments to the stack in a loop.
-    Label loop, entry;
-    __ Set(ecx, Immediate(0));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
-    __ push(Operand(edx, 0));  // dereference handle
-    __ inc(ecx);
-    __ bind(&entry);
-    __ cmp(ecx, eax);
-    __ j(not_equal, &loop);
+  // Copy arguments to the stack in a loop.
+  Label loop, entry;
+  __ Set(ecx, Immediate(0));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+  __ push(Operand(edx, 0));  // dereference handle
+  __ inc(Operand(ecx));
+  __ bind(&entry);
+  __ cmp(ecx, Operand(eax));
+  __ j(not_equal, &loop);
 
-    // Get the function from the stack and call it.
-    // kPointerSize for the receiver.
-    __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
+  // Get the function from the stack and call it.
+  __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));  // +1 ~ receiver
 
-    // Invoke the code.
-    if (is_construct) {
-      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
-      __ CallStub(&stub);
-    } else {
-      ParameterCount actual(eax);
-      __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
-    }
-
-    // Exit the internal frame. Notice that this also removes the empty.
-    // context and the function left on the stack by the code
-    // invocation.
+  // Invoke the code.
+  if (is_construct) {
+    __ call(masm->isolate()->builtins()->JSConstructCall(),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(eax);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
-  __ ret(kPointerSize);  // Remove receiver.
+
+  // Exit the JS frame. Notice that this also removes the empty
+  // context and the function left on the stack by the code
+  // invocation.
+  __ LeaveInternalFrame();
+  __ ret(1 * kPointerSize);  // remove receiver
 }
 
 
@@ -450,68 +464,68 @@
 
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
 
-    // Push a copy of the function.
-    __ push(edi);
-    // Push call kind information.
-    __ push(ecx);
+  // Push a copy of the function.
+  __ push(edi);
+  // Push call kind information.
+  __ push(ecx);
 
-    __ push(edi);  // Function is also the parameter to the runtime call.
-    __ CallRuntime(Runtime::kLazyCompile, 1);
+  __ push(edi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyCompile, 1);
 
-    // Restore call kind information.
-    __ pop(ecx);
-    // Restore receiver.
-    __ pop(edi);
+  // Restore call kind information.
+  __ pop(ecx);
+  // Restore receiver.
+  __ pop(edi);
 
-    // Tear down internal frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
+  __ jmp(Operand(eax));
 }
 
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
 
-    // Push a copy of the function onto the stack.
-    __ push(edi);
-    // Push call kind information.
-    __ push(ecx);
+  // Push a copy of the function onto the stack.
+  __ push(edi);
+  // Push call kind information.
+  __ push(ecx);
 
-    __ push(edi);  // Function is also the parameter to the runtime call.
-    __ CallRuntime(Runtime::kLazyRecompile, 1);
+  __ push(edi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-    // Restore call kind information.
-    __ pop(ecx);
-    // Restore receiver.
-    __ pop(edi);
+  // Restore call kind information.
+  __ pop(ecx);
+  // Restore receiver.
+  __ pop(edi);
 
-    // Tear down internal frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
+  __ jmp(Operand(eax));
 }
 
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
 
-    // Pass deoptimization type to the runtime system.
-    __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
-    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  // Pass the function and deoptimization type to the runtime system.
+  __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
 
-    // Tear down internal frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Get the full codegen state from the stack and untag it.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -552,10 +566,9 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ pushad();
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kNotifyOSR, 0);
-  }
+  __ EnterInternalFrame();
+  __ CallRuntime(Runtime::kNotifyOSR, 0);
+  __ LeaveInternalFrame();
   __ popad();
   __ ret(0);
 }
@@ -566,7 +579,7 @@
 
   // 1. Make sure we have at least one argument.
   { Label done;
-    __ test(eax, eax);
+    __ test(eax, Operand(eax));
     __ j(not_zero, &done);
     __ pop(ebx);
     __ push(Immediate(factory->undefined_value()));
@@ -618,21 +631,18 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ SmiTag(eax);
+    __ push(eax);
 
-    { // In order to preserve argument count.
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(eax);
-      __ push(eax);
+    __ push(ebx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(ebx, eax);
+    __ Set(edx, Immediate(0));  // restore
 
-      __ push(ebx);
-      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-      __ mov(ebx, eax);
-      __ Set(edx, Immediate(0));  // restore
-
-      __ pop(eax);
-      __ SmiUntag(eax);
-    }
-
+    __ pop(eax);
+    __ SmiUntag(eax);
+    __ LeaveInternalFrame();
     // Restore the function to edi.
     __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
     __ jmp(&patch_receiver);
@@ -685,23 +695,22 @@
   // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
   //     or a function proxy via CALL_FUNCTION_PROXY.
   { Label function, non_proxy;
-    __ test(edx, edx);
+    __ test(edx, Operand(edx));
     __ j(zero, &function);
     __ Set(ebx, Immediate(0));
-    __ cmp(edx, Immediate(1));
+    __ SetCallKind(ecx, CALL_AS_METHOD);
+    __ cmp(Operand(edx), Immediate(1));
     __ j(not_equal, &non_proxy);
 
     __ pop(edx);   // return address
     __ push(edi);  // re-add proxy object as additional argument
     __ push(edx);
     __ inc(eax);
-    __ SetCallKind(ecx, CALL_AS_FUNCTION);
     __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
 
     __ bind(&non_proxy);
-    __ SetCallKind(ecx, CALL_AS_METHOD);
     __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
     __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
            RelocInfo::CODE_TARGET);
@@ -717,13 +726,13 @@
   __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
   __ SmiUntag(ebx);
   __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ cmp(eax, ebx);
+  __ cmp(eax, Operand(ebx));
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
 
   ParameterCount expected(0);
-  __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
-                CALL_AS_METHOD);
+  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
+                NullCallWrapper(), CALL_AS_METHOD);
 }
 
 
@@ -731,160 +740,163 @@
   static const int kArgumentsOffset = 2 * kPointerSize;
   static const int kReceiverOffset = 3 * kPointerSize;
   static const int kFunctionOffset = 4 * kPointerSize;
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
 
-    __ push(Operand(ebp, kFunctionOffset));  // push this
-    __ push(Operand(ebp, kArgumentsOffset));  // push arguments
-    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+  __ EnterInternalFrame();
 
-    // Check the stack for overflow. We are not trying to catch
-    // interruptions (e.g. debug break and preemption) here, so the "real stack
-    // limit" is checked.
-    Label okay;
-    ExternalReference real_stack_limit =
-        ExternalReference::address_of_real_stack_limit(masm->isolate());
-    __ mov(edi, Operand::StaticVariable(real_stack_limit));
-    // Make ecx the space we have left. The stack might already be overflowed
-    // here which will cause ecx to become negative.
-    __ mov(ecx, esp);
-    __ sub(ecx, edi);
-    // Make edx the space we need for the array when it is unrolled onto the
-    // stack.
-    __ mov(edx, eax);
-    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
-    // Check if the arguments will overflow the stack.
-    __ cmp(ecx, edx);
-    __ j(greater, &okay);  // Signed comparison.
+  __ push(Operand(ebp, kFunctionOffset));  // push this
+  __ push(Operand(ebp, kArgumentsOffset));  // push arguments
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-    // Out of stack space.
-    __ push(Operand(ebp, 4 * kPointerSize));  // push this
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-    __ bind(&okay);
-    // End of stack check.
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  ExternalReference real_stack_limit =
+      ExternalReference::address_of_real_stack_limit(masm->isolate());
+  __ mov(edi, Operand::StaticVariable(real_stack_limit));
+  // Make ecx the space we have left. The stack might already be overflowed
+  // here which will cause ecx to become negative.
+  __ mov(ecx, Operand(esp));
+  __ sub(ecx, Operand(edi));
+  // Make edx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(edx, Operand(eax));
+  __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+  // Check if the arguments will overflow the stack.
+  __ cmp(ecx, Operand(edx));
+  __ j(greater, &okay);  // Signed comparison.
 
-    // Push current index and limit.
-    const int kLimitOffset =
-        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-    __ push(eax);  // limit
-    __ push(Immediate(0));  // index
+  // Out of stack space.
+  __ push(Operand(ebp, 4 * kPointerSize));  // push this
+  __ push(eax);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  __ bind(&okay);
+  // End of stack check.
 
-    // Get the receiver.
-    __ mov(ebx, Operand(ebp, kReceiverOffset));
+  // Push current index and limit.
+  const int kLimitOffset =
+      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+  __ push(eax);  // limit
+  __ push(Immediate(0));  // index
 
-    // Check that the function is a JS function (otherwise it must be a proxy).
-    Label push_receiver;
-    __ mov(edi, Operand(ebp, kFunctionOffset));
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &push_receiver);
+  // Get the receiver.
+  __ mov(ebx, Operand(ebp, kReceiverOffset));
 
-    // Change context eagerly to get the right global object if necessary.
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  // Check that the function is a JS function (otherwise it must be a proxy).
+  Label push_receiver;
+  __ mov(edi, Operand(ebp, kFunctionOffset));
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &push_receiver);
 
-    // Compute the receiver.
-    // Do not transform the receiver for strict mode functions.
-    Label call_to_object, use_global_receiver;
-    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
-              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-    __ j(not_equal, &push_receiver);
+  // Change context eagerly to get the right global object if necessary.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
-    Factory* factory = masm->isolate()->factory();
+  // Compute the receiver.
+  // Do not transform the receiver for strict mode functions.
+  Label call_to_object, use_global_receiver;
+  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+  __ j(not_equal, &push_receiver);
 
-    // Do not transform the receiver for natives (shared already in ecx).
-    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
-              1 << SharedFunctionInfo::kNativeBitWithinByte);
-    __ j(not_equal, &push_receiver);
+  Factory* factory = masm->isolate()->factory();
 
-    // Compute the receiver in non-strict mode.
-    // Call ToObject on the receiver if it is not an object, or use the
-    // global object if it is null or undefined.
-    __ JumpIfSmi(ebx, &call_to_object);
-    __ cmp(ebx, factory->null_value());
-    __ j(equal, &use_global_receiver);
-    __ cmp(ebx, factory->undefined_value());
-    __ j(equal, &use_global_receiver);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
-    __ j(above_equal, &push_receiver);
+  // Do not transform the receiver for natives (shared already in ecx).
+  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+            1 << SharedFunctionInfo::kNativeBitWithinByte);
+  __ j(not_equal, &push_receiver);
 
-    __ bind(&call_to_object);
-    __ push(ebx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(ebx, eax);
-    __ jmp(&push_receiver);
+  // Compute the receiver in non-strict mode.
+  // Call ToObject on the receiver if it is not an object, or use the
+  // global object if it is null or undefined.
+  __ JumpIfSmi(ebx, &call_to_object);
+  __ cmp(ebx, factory->null_value());
+  __ j(equal, &use_global_receiver);
+  __ cmp(ebx, factory->undefined_value());
+  __ j(equal, &use_global_receiver);
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+  __ j(above_equal, &push_receiver);
 
-    // Use the current global receiver object as the receiver.
-    __ bind(&use_global_receiver);
-    const int kGlobalOffset =
-        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-    __ mov(ebx, FieldOperand(esi, kGlobalOffset));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
-    __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+  __ bind(&call_to_object);
+  __ push(ebx);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(ebx, Operand(eax));
+  __ jmp(&push_receiver);
 
-    // Push the receiver.
-    __ bind(&push_receiver);
-    __ push(ebx);
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+  __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
 
-    // Copy all arguments from the array to the stack.
-    Label entry, loop;
-    __ mov(eax, Operand(ebp, kIndexOffset));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
+  // Push the receiver.
+  __ bind(&push_receiver);
+  __ push(ebx);
 
-    // Use inline caching to speed up access to arguments.
-    Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-    __ call(ic, RelocInfo::CODE_TARGET);
-    // It is important that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to indicate that
-    // we have generated an inline version of the keyed load.  In this
-    // case, we know that we are not generating a test instruction next.
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ mov(eax, Operand(ebp, kIndexOffset));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
 
-    // Push the nth argument.
-    __ push(eax);
+  // Use inline caching to speed up access to arguments.
+  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // It is important that we do not have a test instruction after the
+  // call.  A test instruction after the call is used to indicate that
+  // we have generated an inline version of the keyed load.  In this
+  // case, we know that we are not generating a test instruction next.
 
-    // Update the index on the stack and in register eax.
-    __ mov(eax, Operand(ebp, kIndexOffset));
-    __ add(eax, Immediate(1 << kSmiTagSize));
-    __ mov(Operand(ebp, kIndexOffset), eax);
+  // Push the nth argument.
+  __ push(eax);
 
-    __ bind(&entry);
-    __ cmp(eax, Operand(ebp, kLimitOffset));
-    __ j(not_equal, &loop);
+  // Update the index on the stack and in register eax.
+  __ mov(eax, Operand(ebp, kIndexOffset));
+  __ add(Operand(eax), Immediate(1 << kSmiTagSize));
+  __ mov(Operand(ebp, kIndexOffset), eax);
 
-    // Invoke the function.
-    Label call_proxy;
-    ParameterCount actual(eax);
-    __ SmiUntag(eax);
-    __ mov(edi, Operand(ebp, kFunctionOffset));
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &call_proxy);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+  __ bind(&entry);
+  __ cmp(eax, Operand(ebp, kLimitOffset));
+  __ j(not_equal, &loop);
 
-    frame_scope.GenerateLeaveFrame();
-    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+  // Invoke the function.
+  Label call_proxy;
+  ParameterCount actual(eax);
+  __ SmiUntag(eax);
+  __ mov(edi, Operand(ebp, kFunctionOffset));
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &call_proxy);
+  __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
-    // Invoke the function proxy.
-    __ bind(&call_proxy);
-    __ push(edi);  // add function proxy as last argument
-    __ inc(eax);
-    __ Set(ebx, Immediate(0));
-    __ SetCallKind(ecx, CALL_AS_METHOD);
-    __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
-    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-            RelocInfo::CODE_TARGET);
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-    // Leave internal frame.
-  }
+  // Invoke the function proxy.
+  __ bind(&call_proxy);
+  __ push(edi);  // add function proxy as last argument
+  __ inc(eax);
+  __ Set(ebx, Immediate(0));
+  __ SetCallKind(ecx, CALL_AS_METHOD);
+  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+
+  __ LeaveInternalFrame();
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
 
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. If the parameter initial_capacity is larger than zero an elements
 // backing store is allocated with this size and filled with the hole values.
@@ -895,11 +907,13 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
+                                 int initial_capacity,
                                  Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
+  ASSERT(initial_capacity >= 0);
 
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+  // Load the initial map from the array function.
+  __ mov(scratch1, FieldOperand(array_function,
+                                JSFunction::kPrototypeOrInitialMapOffset));
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
@@ -954,6 +968,7 @@
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
+  STATIC_ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
   if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
@@ -965,17 +980,13 @@
     }
   } else {
     Label loop, entry;
-    __ mov(scratch2, Immediate(initial_capacity));
     __ jmp(&entry);
     __ bind(&loop);
-    __ mov(FieldOperand(scratch1,
-                        scratch2,
-                        times_pointer_size,
-                        FixedArray::kHeaderSize),
-           factory->the_hole_value());
+    __ mov(Operand(scratch1, 0), factory->the_hole_value());
+    __ add(Operand(scratch1), Immediate(kPointerSize));
     __ bind(&entry);
-    __ dec(scratch2);
-    __ j(not_sign, &loop);
+    __ cmp(scratch1, Operand(scratch2));
+    __ j(below, &loop);
   }
 }
 
@@ -1002,7 +1013,10 @@
   ASSERT(!fill_with_hole || array_size.is(ecx));  // rep stos count
   ASSERT(!fill_with_hole || !result.is(eax));  // result is never eax
 
-  __ LoadInitialArrayMap(array_function, scratch, elements_array);
+  // Load the initial map from the array function.
+  __ mov(elements_array,
+         FieldOperand(array_function,
+                      JSFunction::kPrototypeOrInitialMapOffset));
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
@@ -1068,7 +1082,7 @@
     __ bind(&loop);
     __ stos();
     __ bind(&entry);
-    __ cmp(edi, elements_array_end);
+    __ cmp(edi, Operand(elements_array_end));
     __ j(below, &loop);
     __ bind(&done);
   }
@@ -1093,7 +1107,7 @@
                             bool construct_call,
                             Label* call_generic_code) {
   Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call,
-      empty_array, not_empty_array, finish, cant_transition_map, not_double;
+        empty_array, not_empty_array;
 
   // Push the constructor and argc. No need to tag argc as a smi, as there will
   // be no garbage collection with this on the stack.
@@ -1106,7 +1120,7 @@
   __ push(eax);
 
   // Check for array construction with zero arguments.
-  __ test(eax, eax);
+  __ test(eax, Operand(eax));
   __ j(not_zero, &argc_one_or_more);
 
   __ bind(&empty_array);
@@ -1117,6 +1131,7 @@
                        ebx,
                        ecx,
                        edi,
+                       kPreallocatedArrayElements,
                        &prepare_generic_code_call);
   __ IncrementCounter(masm->isolate()->counters()->array_function_native(), 1);
   __ pop(ebx);
@@ -1132,7 +1147,7 @@
   __ j(not_equal, &argc_two_or_more);
   STATIC_ASSERT(kSmiTag == 0);
   __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
-  __ test(ecx, ecx);
+  __ test(ecx, Operand(ecx));
   __ j(not_zero, &not_empty_array);
 
   // The single argument passed is zero, so we jump to the code above used to
@@ -1145,7 +1160,7 @@
     __ mov(eax, Operand(esp, i * kPointerSize));
     __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
   }
-  __ Drop(2);  // Drop two stack slots.
+  __ add(Operand(esp), Immediate(2 * kPointerSize));  // Drop two stack slots.
   __ push(Immediate(0));  // Treat this as a call with argc of zero.
   __ jmp(&empty_array);
 
@@ -1203,44 +1218,39 @@
                   false,
                   &prepare_generic_code_call);
   __ IncrementCounter(counters->array_function_native(), 1);
-  __ push(ebx);
-  __ mov(ebx, Operand(esp, kPointerSize));
+  __ mov(eax, ebx);
+  __ pop(ebx);
+  if (construct_call) {
+    __ pop(edi);
+  }
+  __ push(eax);
+  // eax: JSArray
   // ebx: argc
   // edx: elements_array_end (untagged)
   // esp[0]: JSArray
-  // esp[4]: argc
-  // esp[8]: constructor (only if construct_call)
-  // esp[12]: return address
-  // esp[16]: last argument
+  // esp[4]: return address
+  // esp[8]: last argument
 
   // Location of the last argument
-  int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
-  __ lea(edi, Operand(esp, last_arg_offset));
+  __ lea(edi, Operand(esp, 2 * kPointerSize));
 
   // Location of the first array element (Parameter fill_with_holes to
-  // AllocateJSArray is false, so the FixedArray is returned in ecx).
+  // AllocateJSArrayis false, so the FixedArray is returned in ecx).
   __ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
 
-  Label has_non_smi_element;
-
   // ebx: argc
   // edx: location of the first array element
   // edi: location of the last argument
   // esp[0]: JSArray
-  // esp[4]: argc
-  // esp[8]: constructor (only if construct_call)
-  // esp[12]: return address
-  // esp[16]: last argument
+  // esp[4]: return address
+  // esp[8]: last argument
   Label loop, entry;
   __ mov(ecx, ebx);
   __ jmp(&entry);
   __ bind(&loop);
   __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(eax, &has_non_smi_element);
-  }
   __ mov(Operand(edx, 0), eax);
-  __ add(edx, Immediate(kPointerSize));
+  __ add(Operand(edx), Immediate(kPointerSize));
   __ bind(&entry);
   __ dec(ecx);
   __ j(greater_equal, &loop);
@@ -1248,56 +1258,13 @@
   // Remove caller arguments from the stack and return.
   // ebx: argc
   // esp[0]: JSArray
-  // esp[4]: argc
-  // esp[8]: constructor (only if construct_call)
-  // esp[12]: return address
-  // esp[16]: last argument
-  __ bind(&finish);
-  __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
+  // esp[4]: return address
+  // esp[8]: last argument
   __ pop(eax);
-  __ pop(ebx);
-  __ lea(esp, Operand(esp, ebx, times_pointer_size,
-                      last_arg_offset - kPointerSize));
-  __ jmp(ecx);
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &not_double,
-              DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  // Throw away the array that's only been partially constructed.
-  __ pop(eax);
-  __ UndoAllocationInNewSpace(eax);
-  __ jmp(&prepare_generic_code_call);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
-  __ mov(ebx, Operand(esp, 0));
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ONLY_ELEMENTS,
-      FAST_ELEMENTS,
-      edi,
-      eax,
-      &cant_transition_map);
-  __ mov(FieldOperand(ebx, HeapObject::kMapOffset), edi);
-  __ RecordWriteField(ebx, HeapObject::kMapOffset, edi, eax,
-                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  // Prepare to re-enter the loop
-  __ lea(edi, Operand(esp, last_arg_offset));
-
-  // Finish the array initialization loop.
-  Label loop2;
-  __ bind(&loop2);
-  __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
-  __ mov(Operand(edx, 0), eax);
-  __ add(edx, Immediate(kPointerSize));
-  __ dec(ecx);
-  __ j(greater_equal, &loop2);
-  __ jmp(&finish);
+  __ pop(ecx);
+  __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
+  __ push(ecx);
+  __ ret(0);
 
   // Restore argc and constructor before running the generic code.
   __ bind(&prepare_generic_code_call);
@@ -1309,40 +1276,6 @@
 }
 
 
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax : argc
-  //  -- esp[0] : return address
-  //  -- esp[4] : last argument
-  // -----------------------------------
-  Label generic_array_code;
-
-  // Get the InternalArray function.
-  __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
-
-  if (FLAG_debug_code) {
-    // Initial map for the builtin InternalArray function should be a map.
-    __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi.
-    __ test(ebx, Immediate(kSmiTagMask));
-    __ Assert(not_zero, "Unexpected initial map for InternalArray function");
-    __ CmpObjectType(ebx, MAP_TYPE, ecx);
-    __ Assert(equal, "Unexpected initial map for InternalArray function");
-  }
-
-  // Run the native code for the InternalArray function called as a normal
-  // function.
-  ArrayNativeCode(masm, false, &generic_array_code);
-
-  // Jump to the generic internal array code in case the specialized code cannot
-  // handle the construction.
-  __ bind(&generic_array_code);
-  Handle<Code> array_code =
-      masm->isolate()->builtins()->InternalArrayCodeGeneric();
-  __ jmp(array_code, RelocInfo::CODE_TARGET);
-}
-
-
 void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax : argc
@@ -1355,7 +1288,7 @@
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
 
   if (FLAG_debug_code) {
-    // Initial map for the builtin Array function should be a map.
+    // Initial map for the builtin Array function shoud be a map.
     __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
     __ test(ebx, Immediate(kSmiTagMask));
@@ -1423,14 +1356,14 @@
 
   if (FLAG_debug_code) {
     __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
-    __ cmp(edi, ecx);
+    __ cmp(edi, Operand(ecx));
     __ Assert(equal, "Unexpected String function");
   }
 
   // Load the first argument into eax and get rid of the rest
   // (including the receiver).
   Label no_arguments;
-  __ test(eax, eax);
+  __ test(eax, Operand(eax));
   __ j(zero, &no_arguments);
   __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
   __ pop(ecx);
@@ -1506,13 +1439,12 @@
   // Invoke the conversion builtin and put the result into ebx.
   __ bind(&convert_argument);
   __ IncrementCounter(counters->string_ctor_conversions(), 1);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(edi);  // Preserve the function.
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-    __ pop(edi);
-  }
+  __ EnterInternalFrame();
+  __ push(edi);  // Preserve the function.
+  __ push(eax);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  __ pop(edi);
+  __ LeaveInternalFrame();
   __ mov(ebx, eax);
   __ jmp(&argument_is_string);
 
@@ -1529,18 +1461,17 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(ebx);
-    __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  }
+  __ EnterInternalFrame();
+  __ push(ebx);
+  __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  __ LeaveInternalFrame();
   __ ret(0);
 }
 
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ push(ebp);
-  __ mov(ebp, esp);
+  __ mov(ebp, Operand(esp));
 
   // Store the arguments adaptor context sentinel.
   __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1584,7 +1515,7 @@
   __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
 
   Label enough, too_few;
-  __ cmp(eax, ebx);
+  __ cmp(eax, Operand(ebx));
   __ j(less, &too_few);
   __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
   __ j(equal, &dont_adapt_arguments);
@@ -1602,8 +1533,8 @@
     __ bind(&copy);
     __ inc(edi);
     __ push(Operand(eax, 0));
-    __ sub(eax, Immediate(kPointerSize));
-    __ cmp(edi, ebx);
+    __ sub(Operand(eax), Immediate(kPointerSize));
+    __ cmp(edi, Operand(ebx));
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1616,17 +1547,17 @@
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(edi, Operand(ebp, eax, times_4, offset));
     // ebx = expected - actual.
-    __ sub(ebx, eax);
+    __ sub(ebx, Operand(eax));
     // eax = -actual - 1
     __ neg(eax);
-    __ sub(eax, Immediate(1));
+    __ sub(Operand(eax), Immediate(1));
 
     Label copy;
     __ bind(&copy);
     __ inc(eax);
     __ push(Operand(edi, 0));
-    __ sub(edi, Immediate(kPointerSize));
-    __ test(eax, eax);
+    __ sub(Operand(edi), Immediate(kPointerSize));
+    __ test(eax, Operand(eax));
     __ j(not_zero, &copy);
 
     // Fill remaining expected arguments with undefined values.
@@ -1634,7 +1565,7 @@
     __ bind(&fill);
     __ inc(eax);
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
-    __ cmp(eax, ebx);
+    __ cmp(eax, Operand(ebx));
     __ j(less, &fill);
   }
 
@@ -1642,10 +1573,7 @@
   __ bind(&invoke);
   // Restore function pointer.
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ call(edx);
-
-  // Store offset of return address for deoptimizer.
-  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+  __ call(Operand(edx));
 
   // Leave frame and return.
   LeaveArgumentsAdaptorFrame(masm);
@@ -1655,13 +1583,13 @@
   // Dont adapt arguments.
   // -------------------------------------------
   __ bind(&dont_adapt_arguments);
-  __ jmp(edx);
+  __ jmp(Operand(edx));
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
   CpuFeatures::TryForceFeatureScope scope(SSE2);
-  if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
+  if (!CpuFeatures::IsSupported(SSE2)) {
     __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
     return;
   }
@@ -1688,22 +1616,20 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(eax);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  }
+  __ EnterInternalFrame();
+  __ push(eax);
+  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  __ LeaveInternalFrame();
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
   Label skip;
-  __ cmp(eax, Immediate(Smi::FromInt(-1)));
+  __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
   __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
-  // Insert a stack guard check so that if we decide not to perform
-  // on-stack replacement right away, the function calling this stub can
-  // still be interrupted.
+  // If we decide not to perform on-stack replacement we perform a
+  // stack guard check to enable interrupts.
   __ bind(&stack_check);
   Label ok;
   ExternalReference stack_limit =
@@ -1712,9 +1638,7 @@
   __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  if (FLAG_debug_code) {
-    __ Abort("Unreachable code: returned from tail call.");
-  }
+  __ Abort("Unreachable code: returned from tail call.");
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 4faa6a4..8a5bd50 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,8 +34,6 @@
 #include "isolate.h"
 #include "jsregexp.h"
 #include "regexp-macro-assembler.h"
-#include "stub-cache.h"
-#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -51,7 +49,7 @@
   __ bind(&check_heap_number);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
-  __ cmp(ebx, Immediate(factory->heap_number_map()));
+  __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
   __ j(not_equal, &call_builtin, Label::kNear);
   __ ret(0);
 
@@ -72,9 +70,9 @@
   // Get the function info from the stack.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
 
-  int map_index = (language_mode_ == CLASSIC_MODE)
-      ? Context::FUNCTION_MAP_INDEX
-      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+  int map_index = strict_mode_ == kStrictMode
+      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+      : Context::FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -128,14 +126,14 @@
   // Get the function from the stack.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
 
-  // Set up the object header.
+  // Setup the object header.
   Factory* factory = masm->isolate()->factory();
   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
          factory->function_context_map());
   __ mov(FieldOperand(eax, Context::kLengthOffset),
          Immediate(Smi::FromInt(length)));
 
-  // Set up the fixed slots.
+  // Setup the fixed slots.
   __ Set(ebx, Immediate(0));  // Set to NULL.
   __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
   __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
@@ -152,7 +150,7 @@
   }
 
   // Return and remove the on-stack parameter.
-  __ mov(esi, eax);
+  __ mov(esi, Operand(eax));
   __ ret(1 * kPointerSize);
 
   // Need to collect. Call into runtime system.
@@ -161,139 +159,6 @@
 }
 
 
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [esp + (1 * kPointerSize)]: function
-  // [esp + (2 * kPointerSize)]: serialized scope info
-
-  // Try to allocate the context in new space.
-  Label gc;
-  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-  __ AllocateInNewSpace(FixedArray::SizeFor(length),
-                        eax, ebx, ecx, &gc, TAG_OBJECT);
-
-  // Get the function or sentinel from the stack.
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));
-
-  // Get the serialized scope info from the stack.
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));
-
-  // Set up the object header.
-  Factory* factory = masm->isolate()->factory();
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
-         factory->block_context_map());
-  __ mov(FieldOperand(eax, Context::kLengthOffset),
-         Immediate(Smi::FromInt(length)));
-
-  // If this block context is nested in the global context we get a smi
-  // sentinel instead of a function. The block context should get the
-  // canonical empty function of the global context as its closure which
-  // we still have to look up.
-  Label after_sentinel;
-  __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
-  if (FLAG_debug_code) {
-    const char* message = "Expected 0 as a Smi sentinel";
-    __ cmp(ecx, 0);
-    __ Assert(equal, message);
-  }
-  __ mov(ecx, GlobalObjectOperand());
-  __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
-  __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
-  __ bind(&after_sentinel);
-
-  // Set up the fixed slots.
-  __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
-  __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
-  __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
-
-  // Copy the global object from the previous context.
-  __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
-  __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
-
-  // Initialize the rest of the slots to the hole value.
-  if (slots_ == 1) {
-    __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
-           factory->the_hole_value());
-  } else {
-    __ mov(ebx, factory->the_hole_value());
-    for (int i = 0; i < slots_; i++) {
-      __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
-    }
-  }
-
-  // Return and remove the on-stack parameters.
-  __ mov(esi, eax);
-  __ ret(2 * kPointerSize);
-
-  // Need to collect. Call into runtime system.
-  __ bind(&gc);
-  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
-    MacroAssembler* masm,
-    int length,
-    FastCloneShallowArrayStub::Mode mode,
-    Label* fail) {
-  // Registers on entry:
-  //
-  // ecx: boilerplate literal array.
-  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = 0;
-  if (length > 0) {
-    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        ? FixedDoubleArray::SizeFor(length)
-        : FixedArray::SizeFor(length);
-  }
-  int size = JSArray::kSize + elements_size;
-
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length == 0)) {
-      __ mov(ebx, FieldOperand(ecx, i));
-      __ mov(FieldOperand(eax, i), ebx);
-    }
-  }
-
-  if (length > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
-    __ lea(edx, Operand(eax, JSArray::kSize));
-    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
-    // Copy the elements array.
-    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
-      for (int i = 0; i < elements_size; i += kPointerSize) {
-        __ mov(ebx, FieldOperand(ecx, i));
-        __ mov(FieldOperand(edx, i), ebx);
-      }
-    } else {
-      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
-      int i;
-      for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
-        __ mov(ebx, FieldOperand(ecx, i));
-        __ mov(FieldOperand(edx, i), ebx);
-      }
-      while (i < elements_size) {
-        __ fld_d(FieldOperand(ecx, i));
-        __ fstp_d(FieldOperand(edx, i));
-        i += kDoubleSize;
-      }
-      ASSERT(i == elements_size);
-    }
-  }
-}
-
-
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -301,8 +166,13 @@
   // [esp + (2 * kPointerSize)]: literal index.
   // [esp + (3 * kPointerSize)]: literals array.
 
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
   // Load boilerplate object into ecx and check if we need to create a
   // boilerplate.
+  Label slow_case;
   __ mov(ecx, Operand(esp, 3 * kPointerSize));
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   STATIC_ASSERT(kPointerSize == 4);
@@ -312,43 +182,16 @@
                            FixedArray::kHeaderSize));
   Factory* factory = masm->isolate()->factory();
   __ cmp(ecx, factory->undefined_value());
-  Label slow_case;
   __ j(equal, &slow_case);
 
-  FastCloneShallowArrayStub::Mode mode = mode_;
-  // ecx is boilerplate object.
-  if (mode == CLONE_ANY_ELEMENTS) {
-    Label double_elements, check_fast_elements;
-    __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
-    __ CheckMap(ebx, factory->fixed_cow_array_map(),
-                &check_fast_elements, DONT_DO_SMI_CHECK);
-    GenerateFastCloneShallowArrayCommon(masm, 0,
-                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
-    __ ret(3 * kPointerSize);
-
-    __ bind(&check_fast_elements);
-    __ CheckMap(ebx, factory->fixed_array_map(),
-                &double_elements, DONT_DO_SMI_CHECK);
-    GenerateFastCloneShallowArrayCommon(masm, length_,
-                                        CLONE_ELEMENTS, &slow_case);
-    __ ret(3 * kPointerSize);
-
-    __ bind(&double_elements);
-    mode = CLONE_DOUBLE_ELEMENTS;
-    // Fall through to generate the code to handle double elements.
-  }
-
   if (FLAG_debug_code) {
     const char* message;
     Handle<Map> expected_map;
-    if (mode == CLONE_ELEMENTS) {
+    if (mode_ == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map = factory->fixed_array_map();
-    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
-      message = "Expected (writable) fixed double array";
-      expected_map = factory->fixed_double_array_map();
     } else {
-      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map = factory->fixed_cow_array_map();
     }
@@ -359,7 +202,32 @@
     __ pop(ecx);
   }
 
-  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ mov(ebx, FieldOperand(ecx, i));
+      __ mov(FieldOperand(eax, i), ebx);
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+    __ lea(edx, Operand(eax, JSArray::kSize));
+    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
+
+    // Copy the elements array.
+    for (int i = 0; i < elements_size; i += kPointerSize) {
+      __ mov(ebx, FieldOperand(ecx, i));
+      __ mov(FieldOperand(edx, i), ebx);
+    }
+  }
+
   // Return and remove the on-stack parameters.
   __ ret(3 * kPointerSize);
 
@@ -368,57 +236,9 @@
 }
 
 
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [esp + kPointerSize]: object literal flags.
-  // [esp + (2 * kPointerSize)]: constant properties.
-  // [esp + (3 * kPointerSize)]: literal index.
-  // [esp + (4 * kPointerSize)]: literals array.
-
-  // Load boilerplate object into ecx and check if we need to create a
-  // boilerplate.
-  Label slow_case;
-  __ mov(ecx, Operand(esp, 4 * kPointerSize));
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
-  STATIC_ASSERT(kPointerSize == 4);
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
-                           FixedArray::kHeaderSize));
-  Factory* factory = masm->isolate()->factory();
-  __ cmp(ecx, factory->undefined_value());
-  __ j(equal, &slow_case);
-
-  // Check that the boilerplate contains only fast properties and we can
-  // statically determine the instance size.
-  int size = JSObject::kHeaderSize + length_ * kPointerSize;
-  __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
-  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
-  __ cmp(eax, Immediate(size >> kPointerSizeLog2));
-  __ j(not_equal, &slow_case);
-
-  // Allocate the JS object and copy header together with all in-object
-  // properties from the boilerplate.
-  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ mov(ebx, FieldOperand(ecx, i));
-    __ mov(FieldOperand(eax, i), ebx);
-  }
-
-  // Return and remove the on-stack parameters.
-  __ ret(4 * kPointerSize);
-
-  __ bind(&slow_case);
-  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
-  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   Factory* factory = masm->isolate()->factory();
   const Register argument = eax;
@@ -516,41 +336,6 @@
 }
 
 
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
-  // We don't allow a GC during a store buffer overflow so there is no need to
-  // store the registers in any particular way, but we do have to store and
-  // restore them.
-  __ pushad();
-  if (save_doubles_ == kSaveFPRegs) {
-    CpuFeatures::Scope scope(SSE2);
-    __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
-    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
-      XMMRegister reg = XMMRegister::from_code(i);
-      __ movdbl(Operand(esp, i * kDoubleSize), reg);
-    }
-  }
-  const int argument_count = 1;
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  __ PrepareCallCFunction(argument_count, ecx);
-  __ mov(Operand(esp, 0 * kPointerSize),
-         Immediate(ExternalReference::isolate_address()));
-  __ CallCFunction(
-      ExternalReference::store_buffer_overflow_function(masm->isolate()),
-      argument_count);
-  if (save_doubles_ == kSaveFPRegs) {
-    CpuFeatures::Scope scope(SSE2);
-    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
-      XMMRegister reg = XMMRegister::from_code(i);
-      __ movdbl(reg, Operand(esp, i * kDoubleSize));
-    }
-    __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
-  }
-  __ popad();
-  __ ret(0);
-}
-
-
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -685,27 +470,27 @@
     // Check whether the exponent is too big for a 64 bit signed integer.
     static const uint32_t kTooBigExponent =
         (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-    __ cmp(scratch2, Immediate(kTooBigExponent));
+    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
     __ j(greater_equal, conversion_failure);
     // Load x87 register with heap number.
     __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
     // Reserve space for 64 bit answer.
-    __ sub(esp, Immediate(sizeof(uint64_t)));  // Nolint.
+    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
     // Do conversion, which cannot fail because we checked the exponent.
     __ fisttp_d(Operand(esp, 0));
     __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
-    __ add(esp, Immediate(sizeof(uint64_t)));  // Nolint.
+    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
   } else {
     // Load ecx with zero.  We use this either for the final shift or
     // for the answer.
-    __ xor_(ecx, ecx);
+    __ xor_(ecx, Operand(ecx));
     // Check whether the exponent matches a 32 bit signed int that cannot be
     // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
     // exponent is 30 (biased).  This is the exponent that we are fastest at and
     // also the highest exponent we can handle here.
     const uint32_t non_smi_exponent =
         (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-    __ cmp(scratch2, Immediate(non_smi_exponent));
+    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
     // If we have a match of the int32-but-not-Smi exponent then skip some
     // logic.
     __ j(equal, &right_exponent, Label::kNear);
@@ -718,7 +503,7 @@
       // >>> operator has a tendency to generate numbers with an exponent of 31.
       const uint32_t big_non_smi_exponent =
           (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(scratch2, Immediate(big_non_smi_exponent));
+      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
       __ j(not_equal, conversion_failure);
       // We have the big exponent, typically from >>>.  This means the number is
       // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
@@ -737,9 +522,9 @@
       // Shift down 21 bits to get the most significant 11 bits or the low
       // mantissa word.
       __ shr(ecx, 32 - big_shift_distance);
-      __ or_(ecx, scratch2);
+      __ or_(ecx, Operand(scratch2));
       // We have the answer in ecx, but we may need to negate it.
-      __ test(scratch, scratch);
+      __ test(scratch, Operand(scratch));
       __ j(positive, &done, Label::kNear);
       __ neg(ecx);
       __ jmp(&done, Label::kNear);
@@ -749,18 +534,18 @@
     // Exponent word in scratch, exponent part of exponent word in scratch2.
     // Zero in ecx.
     // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
     // it rounds to zero.
     const uint32_t zero_exponent =
         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ sub(scratch2, Immediate(zero_exponent));
+    __ sub(Operand(scratch2), Immediate(zero_exponent));
     // ecx already has a Smi zero.
     __ j(less, &done, Label::kNear);
 
     // We have a shifted exponent between 0 and 30 in scratch2.
     __ shr(scratch2, HeapNumber::kExponentShift);
     __ mov(ecx, Immediate(30));
-    __ sub(ecx, scratch2);
+    __ sub(ecx, Operand(scratch2));
 
     __ bind(&right_exponent);
     // Here ecx is the shift, scratch is the exponent word.
@@ -780,19 +565,19 @@
     // Shift down 22 bits to get the most significant 10 bits or the low
     // mantissa word.
     __ shr(scratch2, 32 - shift_distance);
-    __ or_(scratch2, scratch);
+    __ or_(scratch2, Operand(scratch));
     // Move down according to the exponent.
     __ shr_cl(scratch2);
     // Now the unsigned answer is in scratch2.  We need to move it to ecx and
     // we may need to fix the sign.
     Label negative;
-    __ xor_(ecx, ecx);
+    __ xor_(ecx, Operand(ecx));
     __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
     __ j(greater, &negative, Label::kNear);
     __ mov(ecx, scratch2);
     __ jmp(&done, Label::kNear);
     __ bind(&negative);
-    __ sub(ecx, scratch2);
+    __ sub(ecx, Operand(scratch2));
     __ bind(&done);
   }
 }
@@ -894,13 +679,13 @@
   __ JumpIfNotSmi(eax, non_smi, non_smi_near);
 
   // We can't handle -0 with smis, so use a type transition for that case.
-  __ test(eax, eax);
+  __ test(eax, Operand(eax));
   __ j(zero, slow, slow_near);
 
   // Try optimistic subtraction '0 - value', saving operand in eax for undo.
-  __ mov(edx, eax);
+  __ mov(edx, Operand(eax));
   __ Set(eax, Immediate(0));
-  __ sub(eax, edx);
+  __ sub(eax, Operand(edx));
   __ j(overflow, undo, undo_near);
   __ ret(0);
 }
@@ -921,7 +706,7 @@
 
 
 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
-  __ mov(eax, edx);
+  __ mov(eax, Operand(edx));
 }
 
 
@@ -975,7 +760,7 @@
     __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
             Immediate(HeapNumber::kSignMask));  // Flip sign.
   } else {
-    __ mov(edx, eax);
+    __ mov(edx, Operand(eax));
     // edx: operand
 
     Label slow_allocate_heapnumber, heapnumber_allocated;
@@ -983,12 +768,11 @@
     __ jmp(&heapnumber_allocated, Label::kNear);
 
     __ bind(&slow_allocate_heapnumber);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(edx);
-      __ CallRuntime(Runtime::kNumberAlloc, 0);
-      __ pop(edx);
-    }
+    __ EnterInternalFrame();
+    __ push(edx);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ pop(edx);
+    __ LeaveInternalFrame();
 
     __ bind(&heapnumber_allocated);
     // eax: allocated 'empty' number
@@ -1031,16 +815,15 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      // Push the original HeapNumber on the stack. The integer value can't
-      // be stored since it's untagged and not in the smi range (so we can't
-      // smi-tag it). We'll recalculate the value after the GC instead.
-      __ push(ebx);
-      __ CallRuntime(Runtime::kNumberAlloc, 0);
-      // New HeapNumber is in eax.
-      __ pop(edx);
-    }
+    __ EnterInternalFrame();
+    // Push the original HeapNumber on the stack. The integer value can't
+    // be stored since it's untagged and not in the smi range (so we can't
+    // smi-tag it). We'll recalculate the value after the GC instead.
+    __ push(ebx);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    // New HeapNumber is in eax.
+    __ pop(edx);
+    __ LeaveInternalFrame();
     // IntegerConvert uses ebx and edi as scratch registers.
     // This conversion won't go slow-case.
     IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
@@ -1050,7 +833,7 @@
   }
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
-    __ cvtsi2sd(xmm0, ecx);
+    __ cvtsi2sd(xmm0, Operand(ecx));
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
   } else {
     __ push(ecx);
@@ -1164,10 +947,6 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
-  // Explicitly allow generation of nested stubs. It is safe here because
-  // generation code does not use any raw pointers.
-  AllowStubCallsScope allow_stub_calls(masm, true);
-
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1243,7 +1022,7 @@
       // eax in case the result is not a smi.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, left);  // Bitwise or is commutative.
+      __ or_(right, Operand(left));  // Bitwise or is commutative.
       combined = right;
       break;
 
@@ -1255,7 +1034,7 @@
     case Token::DIV:
     case Token::MOD:
       __ mov(combined, right);
-      __ or_(combined, left);
+      __ or_(combined, Operand(left));
       break;
 
     case Token::SHL:
@@ -1265,7 +1044,7 @@
       // for the smi check register.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, left);
+      __ or_(right, Operand(left));
       combined = right;
       break;
 
@@ -1288,12 +1067,12 @@
 
     case Token::BIT_XOR:
       ASSERT(right.is(eax));
-      __ xor_(right, left);  // Bitwise xor is commutative.
+      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
       break;
 
     case Token::BIT_AND:
       ASSERT(right.is(eax));
-      __ and_(right, left);  // Bitwise and is commutative.
+      __ and_(right, Operand(left));  // Bitwise and is commutative.
       break;
 
     case Token::SHL:
@@ -1342,12 +1121,12 @@
 
     case Token::ADD:
       ASSERT(right.is(eax));
-      __ add(right, left);  // Addition is commutative.
+      __ add(right, Operand(left));  // Addition is commutative.
       __ j(overflow, &use_fp_on_smis);
       break;
 
     case Token::SUB:
-      __ sub(left, right);
+      __ sub(left, Operand(right));
       __ j(overflow, &use_fp_on_smis);
       __ mov(eax, left);
       break;
@@ -1361,7 +1140,7 @@
       // Remove tag from one of the operands (but keep sign).
       __ SmiUntag(right);
       // Do multiplication.
-      __ imul(right, left);  // Multiplication is commutative.
+      __ imul(right, Operand(left));  // Multiplication is commutative.
       __ j(overflow, &use_fp_on_smis);
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(right, combined, &use_fp_on_smis);
@@ -1372,7 +1151,7 @@
       // save the left operand.
       __ mov(edi, left);
       // Check for 0 divisor.
-      __ test(right, right);
+      __ test(right, Operand(right));
       __ j(zero, &use_fp_on_smis);
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
@@ -1388,7 +1167,7 @@
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
       // Check that the remainder is zero.
-      __ test(edx, edx);
+      __ test(edx, Operand(edx));
       __ j(not_zero, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(eax);
@@ -1396,7 +1175,7 @@
 
     case Token::MOD:
       // Check for 0 divisor.
-      __ test(right, right);
+      __ test(right, Operand(right));
       __ j(zero, &not_smis);
 
       // Sign extend left into edx:eax.
@@ -1447,11 +1226,11 @@
         break;
       case Token::ADD:
         // Revert right = right + left.
-        __ sub(right, left);
+        __ sub(right, Operand(left));
         break;
       case Token::SUB:
         // Revert left = left - right.
-        __ add(left, right);
+        __ add(left, Operand(right));
         break;
       case Token::MUL:
         // Right was clobbered but a copy is in ebx.
@@ -1489,7 +1268,7 @@
           ASSERT_EQ(Token::SHL, op_);
           if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatures::Scope use_sse2(SSE2);
-            __ cvtsi2sd(xmm0, left);
+            __ cvtsi2sd(xmm0, Operand(left));
             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
           } else {
             __ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1511,11 +1290,11 @@
         switch (op_) {
           case Token::ADD:
             // Revert right = right + left.
-            __ sub(right, left);
+            __ sub(right, Operand(left));
             break;
           case Token::SUB:
             // Revert left = left - right.
-            __ add(left, right);
+            __ add(left, Operand(right));
             break;
           case Token::MUL:
             // Right was clobbered but a copy is in ebx.
@@ -1707,7 +1486,7 @@
         // Check result type if it is currently Int32.
         if (result_type_ <= BinaryOpIC::INT32) {
           __ cvttsd2si(ecx, Operand(xmm0));
-          __ cvtsi2sd(xmm2, ecx);
+          __ cvtsi2sd(xmm2, Operand(ecx));
           __ ucomisd(xmm0, xmm2);
           __ j(not_zero, &not_int32);
           __ j(carry, &not_int32);
@@ -1769,9 +1548,9 @@
       FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
                                                         &not_int32);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, ecx); break;
-        case Token::BIT_AND: __ and_(eax, ecx); break;
-        case Token::BIT_XOR: __ xor_(eax, ecx); break;
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1795,7 +1574,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, eax);  // ebx: result
+        __ mov(ebx, Operand(eax));  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -1815,7 +1594,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, ebx);
+          __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1896,7 +1675,7 @@
   __ cmp(edx, factory->undefined_value());
   __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(edx, edx);
+    __ xor_(edx, Operand(edx));
   } else {
     __ mov(edx, Immediate(factory->nan_value()));
   }
@@ -1905,7 +1684,7 @@
   __ cmp(eax, factory->undefined_value());
   __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(eax, eax);
+    __ xor_(eax, Operand(eax));
   } else {
     __ mov(eax, Immediate(factory->nan_value()));
   }
@@ -1983,9 +1762,9 @@
                                                   use_sse3_,
                                                   &not_floats);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, ecx); break;
-        case Token::BIT_AND: __ and_(eax, ecx); break;
-        case Token::BIT_XOR: __ xor_(eax, ecx); break;
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -2009,7 +1788,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, eax);  // ebx: result
+        __ mov(ebx, Operand(eax));  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -2029,7 +1808,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, ebx);
+          __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2182,9 +1961,9 @@
                                                   use_sse3_,
                                                   &call_runtime);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, ecx); break;
-        case Token::BIT_AND: __ and_(eax, ecx); break;
-        case Token::BIT_XOR: __ xor_(eax, ecx); break;
+        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
+        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -2208,7 +1987,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, eax);  // ebx: result
+        __ mov(ebx, Operand(eax));  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -2228,7 +2007,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, ebx);
+          __ cvtsi2sd(xmm0, Operand(ebx));
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2338,10 +2117,10 @@
       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
       // Now edx can be overwritten losing one of the arguments as we are
       // now done and will not need it any more.
-      __ mov(edx, ebx);
+      __ mov(edx, Operand(ebx));
       __ bind(&skip_allocation);
       // Use object in edx as a result holder
-      __ mov(eax, edx);
+      __ mov(eax, Operand(edx));
       break;
     }
     case OVERWRITE_RIGHT:
@@ -2399,7 +2178,7 @@
     // Then load the low and high words of the double into ebx, edx.
     STATIC_ASSERT(kSmiTagSize == 1);
     __ sar(eax, 1);
-    __ sub(esp, Immediate(2 * kPointerSize));
+    __ sub(Operand(esp), Immediate(2 * kPointerSize));
     __ mov(Operand(esp, 0), eax);
     __ fild_s(Operand(esp, 0));
     __ fst_d(Operand(esp, 0));
@@ -2410,7 +2189,7 @@
     // Check if input is a HeapNumber.
     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
     Factory* factory = masm->isolate()->factory();
-    __ cmp(ebx, Immediate(factory->heap_number_map()));
+    __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
     __ j(not_equal, &runtime_call);
     // Input is a HeapNumber. Push it on the FPU stack and load its
     // low and high words into ebx, edx.
@@ -2422,12 +2201,12 @@
   } else {  // UNTAGGED.
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
-      __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
+      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
     } else {
       __ pshufd(xmm0, xmm1, 0x1);
-      __ movd(edx, xmm0);
+      __ movd(Operand(edx), xmm0);
     }
-    __ movd(ebx, xmm1);
+    __ movd(Operand(ebx), xmm1);
   }
 
   // ST[0] or xmm1  == double value
@@ -2436,15 +2215,15 @@
   // Compute hash (the shifts are arithmetic):
   //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
   __ mov(ecx, ebx);
-  __ xor_(ecx, edx);
+  __ xor_(ecx, Operand(edx));
   __ mov(eax, ecx);
   __ sar(eax, 16);
-  __ xor_(ecx, eax);
+  __ xor_(ecx, Operand(eax));
   __ mov(eax, ecx);
   __ sar(eax, 8);
-  __ xor_(ecx, eax);
+  __ xor_(ecx, Operand(eax));
   ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
-  __ and_(ecx,
+  __ and_(Operand(ecx),
           Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
 
   // ST[0] or xmm1 == double value.
@@ -2459,7 +2238,7 @@
   __ mov(eax, Operand(eax, cache_array_index));
   // Eax points to the cache for the type type_.
   // If NULL, the cache hasn't been initialized yet, so go through runtime.
-  __ test(eax, eax);
+  __ test(eax, Operand(eax));
   __ j(zero, &runtime_call_clear_stack);
 #ifdef DEBUG
   // Check that the layout of cache elements match expectations.
@@ -2485,8 +2264,6 @@
   __ cmp(edx, Operand(ecx, kIntSize));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
   __ mov(eax, Operand(ecx, 2 * kIntSize));
   if (tagged) {
     __ fstp(0);
@@ -2497,7 +2274,6 @@
   }
 
   __ bind(&cache_miss);
-  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
   // Update cache with new value.
   // We are short on registers, so use no_reg as scratch.
   // This gives slightly larger code.
@@ -2505,12 +2281,12 @@
     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
   } else {  // UNTAGGED.
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
-    __ sub(esp, Immediate(kDoubleSize));
+    __ sub(Operand(esp), Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
-    __ add(esp, Immediate(kDoubleSize));
+    __ add(Operand(esp), Immediate(kDoubleSize));
   }
-  GenerateOperation(masm, type_);
+  GenerateOperation(masm);
   __ mov(Operand(ecx, 0), ebx);
   __ mov(Operand(ecx, kIntSize), edx);
   __ mov(Operand(ecx, 2 * kIntSize), eax);
@@ -2523,21 +2299,20 @@
 
     // Skip cache and return answer directly, only in untagged case.
     __ bind(&skip_cache);
-    __ sub(esp, Immediate(kDoubleSize));
+    __ sub(Operand(esp), Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
-    GenerateOperation(masm, type_);
+    GenerateOperation(masm);
     __ fstp_d(Operand(esp, 0));
     __ movdbl(xmm1, Operand(esp, 0));
-    __ add(esp, Immediate(kDoubleSize));
+    __ add(Operand(esp), Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      // Allocate an unused object bigger than a HeapNumber.
-      __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
-      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    }
+    __ EnterInternalFrame();
+    // Allocate an unused object bigger than a HeapNumber.
+    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
     __ Ret();
   }
 
@@ -2554,11 +2329,10 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(eax);
-      __ CallRuntime(RuntimeFunction(), 1);
-    }
+    __ EnterInternalFrame();
+    __ push(eax);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2569,7 +2343,6 @@
   switch (type_) {
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -2578,15 +2351,12 @@
 }
 
 
-void TranscendentalCacheStub::GenerateOperation(
-    MacroAssembler* masm, TranscendentalCache::Type type) {
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
   // Only free register is edi.
   // Input value is on FP stack, and also in ebx/edx.
   // Input value is possibly in xmm1.
   // Address of result (a newly allocated HeapNumber) may be in eax.
-  if (type == TranscendentalCache::SIN ||
-      type == TranscendentalCache::COS ||
-      type == TranscendentalCache::TAN) {
+  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
@@ -2594,13 +2364,13 @@
     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
     // work. We must reduce it to the appropriate range.
     __ mov(edi, edx);
-    __ and_(edi, Immediate(0x7ff00000));  // Exponent only.
+    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
     int supported_exponent_limit =
         (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
-    __ cmp(edi, Immediate(supported_exponent_limit));
+    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
     __ j(below, &in_range, Label::kNear);
     // Check for infinity and NaN. Both return NaN for sin.
-    __ cmp(edi, Immediate(0x7ff00000));
+    __ cmp(Operand(edi), Immediate(0x7ff00000));
     Label non_nan_result;
     __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
@@ -2609,7 +2379,7 @@
     __ push(Immediate(0x7ff80000));
     __ push(Immediate(0));
     __ fld_d(Operand(esp, 0));
-    __ add(esp, Immediate(2 * kPointerSize));
+    __ add(Operand(esp), Immediate(2 * kPointerSize));
     __ jmp(&done, Label::kNear);
 
     __ bind(&non_nan_result);
@@ -2625,7 +2395,7 @@
       __ fwait();
       __ fnstsw_ax();
       // Clear if Illegal Operand or Zero Division exceptions are set.
-      __ test(eax, Immediate(5));
+      __ test(Operand(eax), Immediate(5));
       __ j(zero, &no_exceptions, Label::kNear);
       __ fnclex();
       __ bind(&no_exceptions);
@@ -2638,7 +2408,7 @@
       __ fprem1();
       __ fwait();
       __ fnstsw_ax();
-      __ test(eax, Immediate(0x400 /* C2 */));
+      __ test(Operand(eax), Immediate(0x400 /* C2 */));
       // If C2 is set, computation only has partial result. Loop to
       // continue computation.
       __ j(not_zero, &partial_remainder_loop);
@@ -2650,25 +2420,19 @@
 
     // FPU Stack: input % 2*pi
     __ bind(&in_range);
-    switch (type) {
+    switch (type_) {
       case TranscendentalCache::SIN:
         __ fsin();
         break;
       case TranscendentalCache::COS:
         __ fcos();
         break;
-      case TranscendentalCache::TAN:
-        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
-        // FP register stack.
-        __ fptan();
-        __ fstp(0);  // Pop FP register stack.
-        break;
       default:
         UNREACHABLE();
     }
     __ bind(&done);
   } else {
-    ASSERT(type == TranscendentalCache::LOG);
+    ASSERT(type_ == TranscendentalCache::LOG);
     __ fldln2();
     __ fxch();
     __ fyl2x();
@@ -2777,13 +2541,13 @@
 
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
 
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, eax);
+  __ cvtsi2sd(xmm1, Operand(eax));
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
 
   __ bind(&done);
@@ -2807,12 +2571,12 @@
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, eax);
+  __ cvtsi2sd(xmm1, Operand(eax));
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
@@ -2828,11 +2592,11 @@
   __ mov(scratch, left);
   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm0, scratch);
+  __ cvtsi2sd(xmm0, Operand(scratch));
 
   __ mov(scratch, right);
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm1, scratch);
+  __ cvtsi2sd(xmm1, Operand(scratch));
 }
 
 
@@ -2840,12 +2604,12 @@
                                                     Label* non_int32,
                                                     Register scratch) {
   __ cvttsd2si(scratch, Operand(xmm0));
-  __ cvtsi2sd(xmm2, scratch);
+  __ cvtsi2sd(xmm2, Operand(scratch));
   __ ucomisd(xmm0, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
   __ cvttsd2si(scratch, Operand(xmm1));
-  __ cvtsi2sd(xmm2, scratch);
+  __ cvtsi2sd(xmm2, Operand(scratch));
   __ ucomisd(xmm1, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
@@ -2939,263 +2703,157 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
+  // Registers are used as follows:
+  // edx = base
+  // eax = exponent
+  // ecx = temporary, result
+
   CpuFeatures::Scope use_sse2(SSE2);
+  Label allocate_return, call_runtime;
+
+  // Load input parameters.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+  // Save 1 in xmm3 - we need this several times later on.
+  __ mov(ecx, Immediate(1));
+  __ cvtsi2sd(xmm3, Operand(ecx));
+
+  Label exponent_nonsmi;
+  Label base_nonsmi;
+  // If the exponent is a heap number go to that specific case.
+  __ JumpIfNotSmi(eax, &exponent_nonsmi);
+  __ JumpIfNotSmi(edx, &base_nonsmi);
+
+  // Optimized version when both exponent and base are smis.
+  Label powi;
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&powi);
+  // exponent is smi and base is a heapnumber.
+  __ bind(&base_nonsmi);
   Factory* factory = masm->isolate()->factory();
-  const Register exponent = eax;
-  const Register base = edx;
-  const Register scratch = ecx;
-  const XMMRegister double_result = xmm3;
-  const XMMRegister double_base = xmm2;
-  const XMMRegister double_exponent = xmm1;
-  const XMMRegister double_scratch = xmm4;
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         factory->heap_number_map());
+  __ j(not_equal, &call_runtime);
 
-  Label call_runtime, done, exponent_not_smi, int_exponent;
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
 
-  // Save 1 in double_result - we need this several times later on.
-  __ mov(scratch, Immediate(1));
-  __ cvtsi2sd(double_result, scratch);
+  // Optimized version of pow if exponent is a smi.
+  // xmm0 contains the base.
+  __ bind(&powi);
+  __ SmiUntag(eax);
 
-  if (exponent_type_ == ON_STACK) {
-    Label base_is_smi, unpack_exponent;
-    // The exponent and base are supplied as arguments on the stack.
-    // This can only happen if the stub is called from non-optimized code.
-    // Load input parameters from stack.
-    __ mov(base, Operand(esp, 2 * kPointerSize));
-    __ mov(exponent, Operand(esp, 1 * kPointerSize));
-
-    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
-    __ cmp(FieldOperand(base, HeapObject::kMapOffset),
-           factory->heap_number_map());
-    __ j(not_equal, &call_runtime);
-
-    __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
-    __ jmp(&unpack_exponent, Label::kNear);
-
-    __ bind(&base_is_smi);
-    __ SmiUntag(base);
-    __ cvtsi2sd(double_base, base);
-
-    __ bind(&unpack_exponent);
-    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
-    __ SmiUntag(exponent);
-    __ jmp(&int_exponent);
-
-    __ bind(&exponent_not_smi);
-    __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
-           factory->heap_number_map());
-    __ j(not_equal, &call_runtime);
-    __ movdbl(double_exponent,
-              FieldOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
-    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
-    __ SmiUntag(exponent);
-    __ jmp(&int_exponent);
-
-    __ bind(&exponent_not_smi);
-    __ movdbl(double_exponent,
-              FieldOperand(exponent, HeapNumber::kValueOffset));
-  }
-
-  if (exponent_type_ != INTEGER) {
-    Label fast_power;
-    // Detect integer exponents stored as double.
-    __ cvttsd2si(exponent, Operand(double_exponent));
-    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
-    __ cmp(exponent, Immediate(0x80000000u));
-    __ j(equal, &call_runtime);
-    __ cvtsi2sd(double_scratch, exponent);
-    // Already ruled out NaNs for exponent.
-    __ ucomisd(double_exponent, double_scratch);
-    __ j(equal, &int_exponent);
-
-    if (exponent_type_ == ON_STACK) {
-      // Detect square root case.  Crankshaft detects constant +/-0.5 at
-      // compile time and uses DoMathPowHalf instead.  We then skip this check
-      // for non-constant cases of +/-0.5 as these hardly occur.
-      Label continue_sqrt, continue_rsqrt, not_plus_half;
-      // Test for 0.5.
-      // Load double_scratch with 0.5.
-      __ mov(scratch, Immediate(0x3F000000u));
-      __ movd(double_scratch, scratch);
-      __ cvtss2sd(double_scratch, double_scratch);
-      // Already ruled out NaNs for exponent.
-      __ ucomisd(double_scratch, double_exponent);
-      __ j(not_equal, &not_plus_half, Label::kNear);
-
-      // Calculates square root of base.  Check for the special case of
-      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
-      // According to IEEE-754, single-precision -Infinity has the highest
-      // 9 bits set and the lowest 23 bits cleared.
-      __ mov(scratch, 0xFF800000u);
-      __ movd(double_scratch, scratch);
-      __ cvtss2sd(double_scratch, double_scratch);
-      __ ucomisd(double_base, double_scratch);
-      // Comparing -Infinity with NaN results in "unordered", which sets the
-      // zero flag as if both were equal.  However, it also sets the carry flag.
-      __ j(not_equal, &continue_sqrt, Label::kNear);
-      __ j(carry, &continue_sqrt, Label::kNear);
-
-      // Set result to Infinity in the special case.
-      __ xorps(double_result, double_result);
-      __ subsd(double_result, double_scratch);
-      __ jmp(&done);
-
-      __ bind(&continue_sqrt);
-      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-      __ xorps(double_scratch, double_scratch);
-      __ addsd(double_scratch, double_base);  // Convert -0 to +0.
-      __ sqrtsd(double_result, double_scratch);
-      __ jmp(&done);
-
-      // Test for -0.5.
-      __ bind(&not_plus_half);
-      // Load double_exponent with -0.5 by substracting 1.
-      __ subsd(double_scratch, double_result);
-      // Already ruled out NaNs for exponent.
-      __ ucomisd(double_scratch, double_exponent);
-      __ j(not_equal, &fast_power, Label::kNear);
-
-      // Calculates reciprocal of square root of base.  Check for the special
-      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
-      // According to IEEE-754, single-precision -Infinity has the highest
-      // 9 bits set and the lowest 23 bits cleared.
-      __ mov(scratch, 0xFF800000u);
-      __ movd(double_scratch, scratch);
-      __ cvtss2sd(double_scratch, double_scratch);
-      __ ucomisd(double_base, double_scratch);
-      // Comparing -Infinity with NaN results in "unordered", which sets the
-      // zero flag as if both were equal.  However, it also sets the carry flag.
-      __ j(not_equal, &continue_rsqrt, Label::kNear);
-      __ j(carry, &continue_rsqrt, Label::kNear);
-
-      // Set result to 0 in the special case.
-      __ xorps(double_result, double_result);
-      __ jmp(&done);
-
-      __ bind(&continue_rsqrt);
-      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-      __ xorps(double_exponent, double_exponent);
-      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
-      __ sqrtsd(double_exponent, double_exponent);
-      __ divsd(double_result, double_exponent);
-      __ jmp(&done);
-    }
-
-    // Using FPU instructions to calculate power.
-    Label fast_power_failed;
-    __ bind(&fast_power);
-    __ fnclex();  // Clear flags to catch exceptions later.
-    // Transfer (B)ase and (E)xponent onto the FPU register stack.
-    __ sub(esp, Immediate(kDoubleSize));
-    __ movdbl(Operand(esp, 0), double_exponent);
-    __ fld_d(Operand(esp, 0));  // E
-    __ movdbl(Operand(esp, 0), double_base);
-    __ fld_d(Operand(esp, 0));  // B, E
-
-    // Exponent is in st(1) and base is in st(0)
-    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
-    // FYL2X calculates st(1) * log2(st(0))
-    __ fyl2x();    // X
-    __ fld(0);     // X, X
-    __ frndint();  // rnd(X), X
-    __ fsub(1);    // rnd(X), X-rnd(X)
-    __ fxch(1);    // X - rnd(X), rnd(X)
-    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
-    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
-    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
-    __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
-    // FSCALE calculates st(0) * 2^st(1)
-    __ fscale();   // 2^X, rnd(X)
-    __ fstp(1);
-    // Bail out to runtime in case of exceptions in the status word.
-    __ fnstsw_ax();
-    __ test_b(eax, 0x5F);  // We check for all but precision exception.
-    __ j(not_zero, &fast_power_failed, Label::kNear);
-    __ fstp_d(Operand(esp, 0));
-    __ movdbl(double_result, Operand(esp, 0));
-    __ add(esp, Immediate(kDoubleSize));
-    __ jmp(&done);
-
-    __ bind(&fast_power_failed);
-    __ fninit();
-    __ add(esp, Immediate(kDoubleSize));
-    __ jmp(&call_runtime);
-  }
-
-  // Calculate power with integer exponent.
-  __ bind(&int_exponent);
-  const XMMRegister double_scratch2 = double_exponent;
-  __ mov(scratch, exponent);  // Back up exponent.
-  __ movsd(double_scratch, double_base);  // Back up base.
-  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
+  // Save exponent in base as we need to check if exponent is negative later.
+  // We know that base and exponent are in different registers.
+  __ mov(edx, eax);
 
   // Get absolute value of exponent.
-  Label no_neg, while_true, no_multiply;
-  __ test(scratch, scratch);
-  __ j(positive, &no_neg, Label::kNear);
-  __ neg(scratch);
+  Label no_neg;
+  __ cmp(eax, 0);
+  __ j(greater_equal, &no_neg, Label::kNear);
+  __ neg(eax);
   __ bind(&no_neg);
 
-  __ bind(&while_true);
-  __ shr(scratch, 1);
-  __ j(not_carry, &no_multiply, Label::kNear);
-  __ mulsd(double_result, double_scratch);
-  __ bind(&no_multiply);
+  // Load xmm1 with 1.
+  __ movsd(xmm1, xmm3);
+  Label while_true;
+  Label no_multiply;
 
-  __ mulsd(double_scratch, double_scratch);
+  __ bind(&while_true);
+  __ shr(eax, 1);
+  __ j(not_carry, &no_multiply, Label::kNear);
+  __ mulsd(xmm1, xmm0);
+  __ bind(&no_multiply);
+  __ mulsd(xmm0, xmm0);
   __ j(not_zero, &while_true);
 
-  // scratch has the original value of the exponent - if the exponent is
-  // negative, return 1/result.
-  __ test(exponent, exponent);
-  __ j(positive, &done);
-  __ divsd(double_scratch2, double_result);
-  __ movsd(double_result, double_scratch2);
-  // Test whether result is zero.  Bail out to check for subnormal result.
-  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
-  __ xorps(double_scratch2, double_scratch2);
-  __ ucomisd(double_scratch2, double_result);  // Result cannot be NaN.
-  // double_exponent aliased as double_scratch2 has already been overwritten
-  // and may not have contained the exponent value in the first place when the
-  // exponent is a smi.  We reset it with exponent value before bailing out.
-  __ j(not_equal, &done);
-  __ cvtsi2sd(double_exponent, exponent);
+  // base has the original value of the exponent - if the exponent  is
+  // negative return 1/result.
+  __ test(edx, Operand(edx));
+  __ j(positive, &allocate_return);
+  // Special case if xmm1 has reached infinity.
+  __ mov(ecx, Immediate(0x7FB00000));
+  __ movd(xmm0, Operand(ecx));
+  __ cvtss2sd(xmm0, xmm0);
+  __ ucomisd(xmm0, xmm1);
+  __ j(equal, &call_runtime);
+  __ divsd(xmm3, xmm1);
+  __ movsd(xmm1, xmm3);
+  __ jmp(&allocate_return);
 
-  // Returning or bailing out.
-  Counters* counters = masm->isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
-    // The arguments are still on the stack.
-    __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+  // exponent (or both) is a heapnumber - no matter what we should now work
+  // on doubles.
+  __ bind(&exponent_nonsmi);
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         factory->heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  // Test if exponent is nan.
+  __ ucomisd(xmm1, xmm1);
+  __ j(parity_even, &call_runtime);
 
-    // The stub is called from non-optimized code, which expects the result
-    // as heap number in exponent.
-    __ bind(&done);
-    __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
-    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
-    __ IncrementCounter(counters->math_pow(), 1);
-    __ ret(2 * kPointerSize);
-  } else {
-    __ bind(&call_runtime);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ PrepareCallCFunction(4, scratch);
-      __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
-      __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
-      __ CallCFunction(
-          ExternalReference::power_double_double_function(masm->isolate()), 4);
-    }
-    // Return value is in st(0) on ia32.
-    // Store it into the (fixed) result register.
-    __ sub(esp, Immediate(kDoubleSize));
-    __ fstp_d(Operand(esp, 0));
-    __ movdbl(double_result, Operand(esp, 0));
-    __ add(esp, Immediate(kDoubleSize));
+  Label base_not_smi;
+  Label handle_special_cases;
+  __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
+  __ SmiUntag(edx);
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ jmp(&handle_special_cases, Label::kNear);
 
-    __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1);
-    __ ret(0);
-  }
+  __ bind(&base_not_smi);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         factory->heap_number_map());
+  __ j(not_equal, &call_runtime);
+  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+  __ and_(ecx, HeapNumber::kExponentMask);
+  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+  // base is NaN or +/-Infinity
+  __ j(greater_equal, &call_runtime);
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  // base is in xmm0 and exponent is in xmm1.
+  __ bind(&handle_special_cases);
+  Label not_minus_half;
+  // Test for -0.5.
+  // Load xmm2 with -0.5.
+  __ mov(ecx, Immediate(0xBF000000));
+  __ movd(xmm2, Operand(ecx));
+  __ cvtss2sd(xmm2, xmm2);
+  // xmm2 now has -0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &not_minus_half, Label::kNear);
+
+  // Calculates reciprocal of square root.
+  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);
+  __ sqrtsd(xmm1, xmm1);
+  __ divsd(xmm3, xmm1);
+  __ movsd(xmm1, xmm3);
+  __ jmp(&allocate_return);
+
+  // Test for 0.5.
+  __ bind(&not_minus_half);
+  // Load xmm2 with 0.5.
+  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+  __ addsd(xmm2, xmm3);
+  // xmm2 now has 0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &call_runtime);
+  // Calculates square root.
+  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);
+  __ sqrtsd(xmm1, xmm1);
+
+  __ bind(&allocate_return);
+  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
+  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
+  __ mov(eax, ecx);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 }
 
 
@@ -3215,13 +2873,13 @@
   Label adaptor;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor, Label::kNear);
 
   // Check index against formal parameters count limit passed in
   // through register eax. Use unsigned comparison to get negative
   // check for free.
-  __ cmp(edx, eax);
+  __ cmp(edx, Operand(eax));
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -3237,7 +2895,7 @@
   // comparison to get negative check for free.
   __ bind(&adaptor);
   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmp(edx, ecx);
+  __ cmp(edx, Operand(ecx));
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -3268,7 +2926,7 @@
   Label runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &runtime, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
@@ -3299,7 +2957,7 @@
   Label adaptor_frame, try_allocate;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // No adaptor, parameter count = argument count.
@@ -3318,7 +2976,7 @@
   // esp[4] = parameter count (tagged)
   // esp[8] = address of receiver argument
   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
-  __ cmp(ebx, ecx);
+  __ cmp(ebx, Operand(ecx));
   __ j(less_equal, &try_allocate, Label::kNear);
   __ mov(ebx, ecx);
 
@@ -3332,7 +2990,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   Label no_parameter_map;
-  __ test(ebx, ebx);
+  __ test(ebx, Operand(ebx));
   __ j(zero, &no_parameter_map, Label::kNear);
   __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
   __ bind(&no_parameter_map);
@@ -3341,7 +2999,7 @@
   __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
 
   // 3. Arguments object.
-  __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
+  __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
 
   // Do the allocation of all three objects in one go.
   __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -3356,7 +3014,7 @@
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
   __ mov(ebx, Operand(esp, 0 * kPointerSize));
-  __ test(ebx, ebx);
+  __ test(ebx, Operand(ebx));
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
   __ mov(edi, Operand(edi,
          Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
@@ -3380,7 +3038,7 @@
     __ mov(FieldOperand(eax, i), edx);
   }
 
-  // Set up the callee in-object property.
+  // Setup the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ mov(edx, Operand(esp, 4 * kPointerSize));
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
@@ -3393,7 +3051,7 @@
                       Heap::kArgumentsLengthIndex * kPointerSize),
          ecx);
 
-  // Set up the elements pointer in the allocated arguments object.
+  // Setup the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, edi will point there, otherwise to the
   // backing store.
   __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
@@ -3411,7 +3069,7 @@
 
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
-  __ test(ebx, ebx);
+  __ test(ebx, Operand(ebx));
   __ j(zero, &skip_parameter_map);
 
   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
@@ -3435,7 +3093,7 @@
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
   __ add(ebx, Operand(esp, 4 * kPointerSize));
-  __ sub(ebx, eax);
+  __ sub(ebx, Operand(eax));
   __ mov(ecx, FACTORY->the_hole_value());
   __ mov(edx, edi);
   __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
@@ -3452,12 +3110,12 @@
   __ jmp(&parameters_test, Label::kNear);
 
   __ bind(&parameters_loop);
-  __ sub(eax, Immediate(Smi::FromInt(1)));
+  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
   __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
   __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
+  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
   __ bind(&parameters_test);
-  __ test(eax, eax);
+  __ test(eax, Operand(eax));
   __ j(not_zero, &parameters_loop, Label::kNear);
   __ pop(ecx);
 
@@ -3477,18 +3135,18 @@
   Label arguments_loop, arguments_test;
   __ mov(ebx, Operand(esp, 1 * kPointerSize));
   __ mov(edx, Operand(esp, 4 * kPointerSize));
-  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
-  __ sub(edx, ebx);
+  __ sub(Operand(edx), ebx);  // Is there a smarter way to do negative scaling?
+  __ sub(Operand(edx), ebx);
   __ jmp(&arguments_test, Label::kNear);
 
   __ bind(&arguments_loop);
-  __ sub(edx, Immediate(kPointerSize));
+  __ sub(Operand(edx), Immediate(kPointerSize));
   __ mov(eax, Operand(edx, 0));
   __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
+  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
 
   __ bind(&arguments_test);
-  __ cmp(ebx, ecx);
+  __ cmp(ebx, Operand(ecx));
   __ j(less, &arguments_loop, Label::kNear);
 
   // Restore.
@@ -3516,7 +3174,7 @@
   Label adaptor_frame, try_allocate, runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // Get the length from the frame.
@@ -3535,11 +3193,11 @@
   // the arguments object and the elements array.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ test(ecx, ecx);
+  __ test(ecx, Operand(ecx));
   __ j(zero, &add_arguments_object, Label::kNear);
   __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
+  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
 
   // Do the allocation of both objects in one go.
   __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -3566,13 +3224,13 @@
 
   // If there are no actual arguments, we're done.
   Label done;
-  __ test(ecx, ecx);
+  __ test(ecx, Operand(ecx));
   __ j(zero, &done, Label::kNear);
 
   // Get the parameters pointer from the stack.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
 
-  // Set up the elements pointer in the allocated arguments object and
+  // Setup the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
   __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
@@ -3588,8 +3246,8 @@
   __ bind(&loop);
   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
-  __ add(edi, Immediate(kPointerSize));
-  __ sub(edx, Immediate(kPointerSize));
+  __ add(Operand(edi), Immediate(kPointerSize));
+  __ sub(Operand(edx), Immediate(kPointerSize));
   __ dec(ecx);
   __ j(not_zero, &loop);
 
@@ -3610,6 +3268,10 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
 
   // Stack frame on entry.
   //  esp[0]: return address
@@ -3632,7 +3294,7 @@
   ExternalReference address_of_regexp_stack_memory_size =
       ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
   __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ test(ebx, ebx);
+  __ test(ebx, Operand(ebx));
   __ j(zero, &runtime);
 
   // Check that the first argument is a JSRegExp object.
@@ -3653,7 +3315,7 @@
   // ecx: RegExp data (FixedArray)
   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
-  __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
   __ j(not_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3663,7 +3325,7 @@
   // uses the asumption that smis are 2 * their untagged value.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(edx, Immediate(2));  // edx was a smi.
+  __ add(Operand(edx), Immediate(2));  // edx was a smi.
   // Check that the static offsets vector buffer is large enough.
   __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
   __ j(above, &runtime);
@@ -3685,7 +3347,7 @@
   // string length. A negative value will be greater (unsigned comparison).
   __ mov(eax, Operand(esp, kPreviousIndexOffset));
   __ JumpIfNotSmi(eax, &runtime);
-  __ cmp(eax, ebx);
+  __ cmp(eax, Operand(ebx));
   __ j(above_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3705,8 +3367,8 @@
   // additional information.
   __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ SmiUntag(eax);
-  __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
-  __ cmp(edx, eax);
+  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
+  __ cmp(edx, Operand(eax));
   __ j(greater, &runtime);
 
   // Reset offset for possibly sliced string.
@@ -3718,40 +3380,27 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
-  __ and_(ebx, kIsNotStringMask |
-               kStringRepresentationMask |
-               kStringEncodingMask |
-               kShortExternalStringMask);
+  __ and_(ebx,
+          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ASCII string.  None of the following
-  // string type tests will succeed if subject is not a string or a short
-  // external string.
-  __ and_(ebx, Immediate(kIsNotStringMask |
-                         kStringRepresentationMask |
-                         kShortExternalStringMask));
+  // Any other flat string must be a flat ascii string.
+  __ and_(Operand(ebx),
+          Immediate(kIsNotStringMask | kStringRepresentationMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
-  // ebx: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, external_string, check_encoding;
+  Label cons_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
-  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
-  __ cmp(ebx, Immediate(kExternalStringTag));
+  __ cmp(Operand(ebx), Immediate(kExternalStringTag));
   __ j(less, &cons_string);
-  __ j(equal, &external_string);
-
-  // Catch non-string subject or short external string.
-  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
-  __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
-  __ j(not_zero, &runtime);
+  __ j(equal, &runtime);
 
   // String is sliced.
   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
@@ -3773,16 +3422,16 @@
             kStringRepresentationMask | kStringEncodingMask);
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be sequential ASCII or external.
+  // Any other flat string must be ascii.
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask);
-  __ j(not_zero, &external_string);
+  __ j(not_zero, &runtime);
 
   __ bind(&seq_ascii_string);
-  // eax: subject string (flat ASCII)
+  // eax: subject string (flat ascii)
   // ecx: RegExp data (FixedArray)
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(ecx, Immediate(1));  // Type is ASCII.
+  __ Set(ecx, Immediate(1));  // Type is ascii.
   __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
@@ -3799,7 +3448,7 @@
 
   // eax: subject string
   // edx: code
-  // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
   __ mov(ebx, Operand(esp, kPreviousIndexOffset));
@@ -3808,7 +3457,7 @@
   // eax: subject string
   // ebx: previous index
   // edx: code
-  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3848,21 +3497,21 @@
   // esi: original subject string
   // eax: underlying subject string
   // ebx: previous index
-  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
   // edx: code
   // Argument 4: End of string data
   // Argument 3: Start of string data
   // Prepare start and end index of the input.
   // Load the length from the original sliced string if that is the case.
   __ mov(esi, FieldOperand(esi, String::kLengthOffset));
-  __ add(esi, edi);  // Calculate input end wrt offset.
+  __ add(esi, Operand(edi));  // Calculate input end wrt offset.
   __ SmiUntag(edi);
-  __ add(ebx, edi);  // Calculate input start wrt offset.
+  __ add(ebx, Operand(edi));  // Calculate input start wrt offset.
 
   // ebx: start index of the input string
   // esi: end index of the input string
   Label setup_two_byte, setup_rest;
-  __ test(ecx, ecx);
+  __ test(ecx, Operand(ecx));
   __ j(zero, &setup_two_byte, Label::kNear);
   __ SmiUntag(esi);
   __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
@@ -3882,8 +3531,8 @@
   __ bind(&setup_rest);
 
   // Locate the code entry and call it.
-  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ call(edx);
+  __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ call(Operand(edx));
 
   // Drop arguments and come back to JS mode.
   __ LeaveApiExitFrame();
@@ -3904,9 +3553,11 @@
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
-  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+  __ mov(edx,
+         Operand::StaticVariable(ExternalReference::the_hole_value_location(
+             masm->isolate())));
   __ mov(eax, Operand::StaticVariable(pending_exception));
-  __ cmp(edx, eax);
+  __ cmp(edx, Operand(eax));
   __ j(equal, &runtime);
   // For exception, throw the exception again.
 
@@ -3923,11 +3574,11 @@
   __ Throw(eax);
 
   __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(eax);
+  __ ThrowUncatchable(TERMINATION, eax);
 
   __ bind(&failure);
   // For failure to match, return null.
-  __ mov(eax, factory->null_value());
+  __ mov(Operand(eax), factory->null_value());
   __ ret(4 * kPointerSize);
 
   // Load RegExp data.
@@ -3938,7 +3589,7 @@
   // Calculate number of capture registers (number_of_captures + 1) * 2.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(edx, Immediate(2));  // edx was a smi.
+  __ add(Operand(edx), Immediate(2));  // edx was a smi.
 
   // edx: Number of capture registers
   // Load last_match_info which is still known to be a fast case JSArray.
@@ -3954,18 +3605,12 @@
   // Store last subject and last input.
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastSubjectOffset,
-                      eax,
-                      edi,
-                      kDontSaveFPRegs);
+  __ mov(ecx, ebx);
+  __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastInputOffset,
-                      eax,
-                      edi,
-                      kDontSaveFPRegs);
+  __ mov(ecx, ebx);
+  __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -3979,7 +3624,7 @@
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
-  __ sub(edx, Immediate(1));
+  __ sub(Operand(edx), Immediate(1));
   __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer.
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
@@ -3997,27 +3642,6 @@
   __ mov(eax, Operand(esp, kLastMatchInfoOffset));
   __ ret(4 * kPointerSize);
 
-  // External string.  Short external strings have already been ruled out.
-  // eax: subject string (expected to be external)
-  // ebx: scratch
-  __ bind(&external_string);
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ test_b(ebx, kIsIndirectStringMask);
-    __ Assert(zero, "external string expected, but not found");
-  }
-  __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ test_b(ebx, kStringEncodingMask);
-  __ j(not_zero, &seq_ascii_string);
-  __ jmp(&seq_two_byte_string);
-
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -4031,7 +3655,7 @@
   Label done;
   __ mov(ebx, Operand(esp, kPointerSize * 3));
   __ JumpIfNotSmi(ebx, &slowcase);
-  __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
   __ j(above, &slowcase);
   // Smi-tagging is equivalent to multiplying by 2.
   STATIC_ASSERT(kSmiTag == 0);
@@ -4091,10 +3715,10 @@
   // ebx: Start of elements in FixedArray.
   // edx: the hole.
   Label loop;
-  __ test(ecx, ecx);
+  __ test(ecx, Operand(ecx));
   __ bind(&loop);
   __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
-  __ sub(ecx, Immediate(1));
+  __ sub(Operand(ecx), Immediate(1));
   __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
   __ jmp(&loop);
 
@@ -4119,16 +3743,16 @@
   Register scratch = scratch2;
 
   // Load the number string cache.
-  ExternalReference roots_array_start =
-      ExternalReference::roots_array_start(masm->isolate());
+  ExternalReference roots_address =
+      ExternalReference::roots_address(masm->isolate());
   __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
   __ mov(number_string_cache,
-         Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
   // Make the hash mask from the length of the number string cache. It
   // contains two elements (number and string) for each cache entry.
   __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
   __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
-  __ sub(mask, Immediate(1));  // Make mask.
+  __ sub(Operand(mask), Immediate(1));  // Make mask.
 
   // Calculate the entry in the number string cache. The hash value in the
   // number string cache for smis is just the smi value, and the hash for
@@ -4154,7 +3778,7 @@
     __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
     // Object is heap number and hash is now in scratch. Calculate cache index.
-    __ and_(scratch, mask);
+    __ and_(scratch, Operand(mask));
     Register index = scratch;
     Register probe = mask;
     __ mov(probe,
@@ -4180,7 +3804,7 @@
 
   __ bind(&smi_hash_calculated);
   // Object is smi and hash is now in scratch. Calculate cache index.
-  __ and_(scratch, mask);
+  __ and_(scratch, Operand(mask));
   Register index = scratch;
   // Check if the entry is the smi we are looking for.
   __ cmp(object,
@@ -4232,10 +3856,10 @@
   // Compare two smis if required.
   if (include_smi_compare_) {
     Label non_smi, smi_done;
-    __ mov(ecx, edx);
-    __ or_(ecx, eax);
+    __ mov(ecx, Operand(edx));
+    __ or_(ecx, Operand(eax));
     __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
-    __ sub(edx, eax);  // Return on the result of the subtraction.
+    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
     __ j(no_overflow, &smi_done, Label::kNear);
     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
     __ bind(&smi_done);
@@ -4243,8 +3867,8 @@
     __ ret(0);
     __ bind(&non_smi);
   } else if (FLAG_debug_code) {
-    __ mov(ecx, edx);
-    __ or_(ecx, eax);
+    __ mov(ecx, Operand(edx));
+    __ or_(ecx, Operand(eax));
     __ test(ecx, Immediate(kSmiTagMask));
     __ Assert(not_zero, "Unexpected smi operands.");
   }
@@ -4256,7 +3880,7 @@
   // for NaN and undefined.
   {
     Label not_identical;
-    __ cmp(eax, edx);
+    __ cmp(eax, Operand(edx));
     __ j(not_equal, &not_identical);
 
     if (cc_ != equal) {
@@ -4305,7 +3929,7 @@
       __ Set(eax, Immediate(0));
       // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
       // bits.
-      __ add(edx, edx);
+      __ add(edx, Operand(edx));
       __ cmp(edx, kQuietNaNHighBitsMask << 1);
       if (cc_ == equal) {
         STATIC_ASSERT(EQUAL != 1);
@@ -4339,19 +3963,19 @@
     STATIC_ASSERT(kSmiTag == 0);
     ASSERT_EQ(0, Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
-    __ and_(ecx, eax);
-    __ test(ecx, edx);
+    __ and_(ecx, Operand(eax));
+    __ test(ecx, Operand(edx));
     __ j(not_zero, &not_smis, Label::kNear);
     // One operand is a smi.
 
     // Check whether the non-smi is a heap number.
     STATIC_ASSERT(kSmiTagMask == 1);
     // ecx still holds eax & kSmiTag, which is either zero or one.
-    __ sub(ecx, Immediate(0x01));
+    __ sub(Operand(ecx), Immediate(0x01));
     __ mov(ebx, edx);
-    __ xor_(ebx, eax);
-    __ and_(ebx, ecx);  // ebx holds either 0 or eax ^ edx.
-    __ xor_(ebx, eax);
+    __ xor_(ebx, Operand(eax));
+    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
+    __ xor_(ebx, Operand(eax));
     // if eax was smi, ebx is now edx, else eax.
 
     // Check if the non-smi operand is a heap number.
@@ -4413,9 +4037,9 @@
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ mov(eax, 0);  // equal
       __ mov(ecx, Immediate(Smi::FromInt(1)));
-      __ cmov(above, eax, ecx);
+      __ cmov(above, eax, Operand(ecx));
       __ mov(ecx, Immediate(Smi::FromInt(-1)));
-      __ cmov(below, eax, ecx);
+      __ cmov(below, eax, Operand(ecx));
       __ ret(0);
     } else {
       FloatingPointHelper::CheckFloatOperands(
@@ -4476,7 +4100,7 @@
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
                                          &check_unequal_objects);
 
-  // Inline comparison of ASCII strings.
+  // Inline comparison of ascii strings.
   if (cc_ == equal) {
     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      edx,
@@ -4574,89 +4198,43 @@
 }
 
 
-void InterruptStub::Generate(MacroAssembler* masm) {
-  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // ebx : cache cell for call target
-  // edi : the function to call
-  Isolate* isolate = masm->isolate();
-  Label initialize, done;
-
-  // Load the cache state into ecx.
-  __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ cmp(ecx, edi);
-  __ j(equal, &done, Label::kNear);
-  __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
-  __ j(equal, &done, Label::kNear);
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
-  __ j(equal, &initialize, Label::kNear);
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
-         Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
-  __ jmp(&done, Label::kNear);
-
-  // An uninitialized cache is patched with the function.
-  __ bind(&initialize);
-  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
-  // No need for a write barrier here - cells are rescanned.
-
-  __ bind(&done);
-}
-
-
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  // ebx : cache cell for call target
-  // edi : the function to call
-  Isolate* isolate = masm->isolate();
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
   // function stub.
   if (ReceiverMightBeImplicit()) {
-    Label receiver_ok;
+    Label call;
     // Get the receiver from the stack.
     // +1 ~ return address
     __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
     // Call as function is indicated with the hole.
-    __ cmp(eax, isolate->factory()->the_hole_value());
-    __ j(not_equal, &receiver_ok, Label::kNear);
+    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ j(not_equal, &call, Label::kNear);
     // Patch the receiver on the stack with the global receiver object.
-    __ mov(ecx, GlobalObjectOperand());
-    __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
-    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
-    __ bind(&receiver_ok);
+    __ mov(ebx, GlobalObjectOperand());
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
+    __ bind(&call);
   }
 
+  // Get the function to call from the stack.
+  // +2 ~ receiver, return address
+  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
   // Check that the function really is a JavaScript function.
   __ JumpIfSmi(edi, &non_function);
   // Goto slow case if we do not have a function.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
   __ j(not_equal, &slow);
 
-  if (RecordCallTarget()) {
-    GenerateRecordCallTarget(masm);
-  }
-
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
 
   if (ReceiverMightBeImplicit()) {
     Label call_as_function;
-    __ cmp(eax, isolate->factory()->the_hole_value());
+    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
     __ j(equal, &call_as_function);
     __ InvokeFunction(edi,
                       actual,
@@ -4673,13 +4251,6 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
-  if (RecordCallTarget()) {
-    // If there is a call target cache, mark it megamorphic in the
-    // non-function case.  MegamorphicSentinel is an immortal immovable
-    // object (undefined) so no write barrier is needed.
-    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
-           Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
-  }
   // Check for function proxy.
   __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
   __ j(not_equal, &non_function);
@@ -4691,7 +4262,8 @@
   __ SetCallKind(ecx, CALL_AS_FUNCTION);
   __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
   {
-    Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
+    Handle<Code> adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
     __ jmp(adaptor, RelocInfo::CODE_TARGET);
   }
 
@@ -4703,52 +4275,9 @@
   __ Set(ebx, Immediate(0));
   __ SetCallKind(ecx, CALL_AS_METHOD);
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
-  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
-  __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-void CallConstructStub::Generate(MacroAssembler* masm) {
-  // eax : number of arguments
-  // ebx : cache cell for call target
-  // edi : constructor function
-  Label slow, non_function_call;
-
-  // Check that function is not a smi.
-  __ JumpIfSmi(edi, &non_function_call);
-  // Check that function is a JSFunction.
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &slow);
-
-  if (RecordCallTarget()) {
-    GenerateRecordCallTarget(masm);
-  }
-
-  // Jump to the function-specific construct stub.
-  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
-  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
-  __ jmp(ebx);
-
-  // edi: called object
-  // eax: number of arguments
-  // ecx: object map
-  Label do_call;
-  __ bind(&slow);
-  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
-  __ j(not_equal, &non_function_call);
-  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
-  __ jmp(&do_call);
-
-  __ bind(&non_function_call);
-  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ bind(&do_call);
-  // Set expected number of arguments to zero (not changing eax).
-  __ Set(ebx, Immediate(0));
-  Handle<Code> arguments_adaptor =
+  Handle<Code> adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
-  __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
+  __ jmp(adaptor, RelocInfo::CODE_TARGET);
 }
 
 
@@ -4757,32 +4286,8 @@
 }
 
 
-bool CEntryStub::IsPregenerated() {
-  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
-          result_size_ == 1;
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime() {
-  CEntryStub::GenerateAheadOfTime();
-  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
-  // It is important that the store buffer overflow stubs are generated first.
-  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  Handle<Code> code = save_doubles.GetCode();
-  code->set_is_pregenerated(true);
-  code->GetIsolate()->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
-  CEntryStub stub(1, kDontSaveFPRegs);
-  Handle<Code> code = stub.GetCode();
-  code->set_is_pregenerated(true);
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  __ Throw(eax);
 }
 
 
@@ -4827,7 +4332,7 @@
   __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
   __ mov(Operand(esp, 2 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  __ call(ebx);
+  __ call(Operand(ebx));
   // Result is in eax or edx:eax - do not destroy these registers!
 
   if (always_allocate_scope) {
@@ -4859,7 +4364,8 @@
   // should have returned some failure value.
   if (FLAG_debug_code) {
     __ push(edx);
-    __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+    __ mov(edx, Operand::StaticVariable(
+        ExternalReference::the_hole_value_location(masm->isolate())));
     Label okay;
     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
     // Cannot use check here as it attempts to generate call into runtime.
@@ -4870,7 +4376,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
+  __ LeaveExitFrame(save_doubles_);
   __ ret(0);
 
   // Handling of failure.
@@ -4887,8 +4393,10 @@
   __ j(equal, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
+  ExternalReference the_hole_location =
+      ExternalReference::the_hole_value_location(masm->isolate());
   __ mov(eax, Operand::StaticVariable(pending_exception_address));
-  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+  __ mov(edx, Operand::StaticVariable(the_hole_location));
   __ mov(Operand::StaticVariable(pending_exception_address), edx);
 
   // Special handling of termination exceptions which are uncatchable
@@ -4904,6 +4412,12 @@
 }
 
 
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  __ ThrowUncatchable(type, eax);
+}
+
+
 void CEntryStub::Generate(MacroAssembler* masm) {
   // eax: number of arguments including receiver
   // ebx: pointer to C function  (C callee-saved)
@@ -4917,7 +4431,7 @@
   // a garbage collection and retrying the builtin (twice).
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
+  __ EnterExitFrame(save_doubles_);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -4957,34 +4471,23 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  // Set external caught exception to false.
-  Isolate* isolate = masm->isolate();
-  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
-                                    isolate);
-  __ mov(Operand::StaticVariable(external_caught), Immediate(false));
-
-  // Set pending exception and eax to out of memory exception.
-  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
-                                      isolate);
-  __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
-  __ mov(Operand::StaticVariable(pending_exception), eax);
-  // Fall through to the next label.
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
 
   __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(eax);
+  GenerateThrowUncatchable(masm, TERMINATION);
 
   __ bind(&throw_normal_exception);
-  __ Throw(eax);
+  GenerateThrowTOS(masm);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, handler_entry, exit;
+  Label invoke, exit;
   Label not_outermost_js, not_outermost_js_2;
 
-  // Set up frame.
+  // Setup frame.
   __ push(ebp);
-  __ mov(ebp, esp);
+  __ mov(ebp, Operand(esp));
 
   // Push marker in two places.
   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
@@ -5012,38 +4515,38 @@
   __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
   __ bind(&cont);
 
-  // Jump to a faked try block that does the invoke, with a faked catch
-  // block that sets the pending exception.
-  __ jmp(&invoke);
-  __ bind(&handler_entry);
-  handler_offset_ = handler_entry.pos();
-  // Caught exception: Store result (exception) in the pending exception
-  // field in the JSEnv and return a failure sentinel.
+  // Call a faked try-block that does the invoke.
+  __ call(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
   __ mov(Operand::StaticVariable(pending_exception), eax);
   __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
   __ jmp(&exit);
 
-  // Invoke: Link this frame into the handler chain.  There's only one
-  // handler block in this code object, so its index is 0.
+  // Invoke: Link this frame into the handler chain.
   __ bind(&invoke);
-  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
 
   // Clear any pending exceptions.
-  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
+  ExternalReference the_hole_location =
+      ExternalReference::the_hole_value_location(masm->isolate());
+  __ mov(edx, Operand::StaticVariable(the_hole_location));
   __ mov(Operand::StaticVariable(pending_exception), edx);
 
   // Fake a receiver (NULL).
   __ push(Immediate(0));  // receiver
 
-  // Invoke the function by calling through JS entry trampoline builtin and
-  // pop the faked function when we return. Notice that we cannot store a
-  // reference to the trampoline code directly in this stub, because the
-  // builtin stubs may not have been generated yet.
+  // Invoke the function by calling through JS entry trampoline
+  // builtin and pop the faked function when we return. Notice that we
+  // cannot store a reference to the trampoline code directly in this
+  // stub, because the builtin stubs may not have been generated yet.
   if (is_construct) {
-    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
-                                      masm->isolate());
+    ExternalReference construct_entry(
+        Builtins::kJSConstructEntryTrampoline,
+        masm->isolate());
     __ mov(edx, Immediate(construct_entry));
   } else {
     ExternalReference entry(Builtins::kJSEntryTrampoline,
@@ -5052,7 +4555,7 @@
   }
   __ mov(edx, Operand(edx, 0));  // deref address
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-  __ call(edx);
+  __ call(Operand(edx));
 
   // Unlink this frame from the handler chain.
   __ PopTryHandler();
@@ -5060,7 +4563,8 @@
   __ bind(&exit);
   // Check if the current stack frame is marked as the outermost JS frame.
   __ pop(ebx);
-  __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ cmp(Operand(ebx),
+         Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   __ j(not_equal, &not_outermost_js_2);
   __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
   __ bind(&not_outermost_js_2);
@@ -5074,7 +4578,7 @@
   __ pop(ebx);
   __ pop(esi);
   __ pop(edi);
-  __ add(esp, Immediate(2 * kPointerSize));  // remove markers
+  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
 
   // Restore frame pointer and return.
   __ pop(ebp);
@@ -5113,12 +4617,12 @@
   static const int kDeltaToCmpImmediate = 2;
   static const int kDeltaToMov = 8;
   static const int kDeltaToMovImmediate = 9;
-  static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
-  static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
+  static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
+  static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
   static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
 
-  ExternalReference roots_array_start =
-      ExternalReference::roots_array_start(masm->isolate());
+  ExternalReference roots_address =
+      ExternalReference::roots_address(masm->isolate());
 
   ASSERT_EQ(object.code(), InstanceofStub::left().code());
   ASSERT_EQ(function.code(), InstanceofStub::right().code());
@@ -5140,23 +4644,22 @@
     // Look up the function and the map in the instanceof cache.
     Label miss;
     __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-    __ cmp(function, Operand::StaticArray(scratch,
-                                          times_pointer_size,
-                                          roots_array_start));
+    __ cmp(function,
+           Operand::StaticArray(scratch, times_pointer_size, roots_address));
     __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
     __ cmp(map, Operand::StaticArray(
-        scratch, times_pointer_size, roots_array_start));
+        scratch, times_pointer_size, roots_address));
     __ j(not_equal, &miss, Label::kNear);
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(eax, Operand::StaticArray(
-        scratch, times_pointer_size, roots_array_start));
+        scratch, times_pointer_size, roots_address));
     __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
     __ bind(&miss);
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -5166,10 +4669,9 @@
   // map and function. The cached answer will be set when it is known below.
   if (!HasCallSiteInlineCheck()) {
   __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
-  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
-         map);
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
   __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
-  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
+  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
          function);
   } else {
     // The constants for the code patching are based on no push instructions
@@ -5179,13 +4681,12 @@
     __ mov(scratch, Operand(esp, 0 * kPointerSize));
     __ sub(scratch, Operand(esp, 1 * kPointerSize));
     if (FLAG_debug_code) {
-      __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
+      __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
-      __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
+      __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
     }
-    __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
-    __ mov(Operand(scratch, 0), map);
+    __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
   }
 
   // Loop through the prototype chain of the object looking for the function
@@ -5193,10 +4694,10 @@
   __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
   Label loop, is_instance, is_not_instance;
   __ bind(&loop);
-  __ cmp(scratch, prototype);
+  __ cmp(scratch, Operand(prototype));
   __ j(equal, &is_instance, Label::kNear);
   Factory* factory = masm->isolate()->factory();
-  __ cmp(scratch, Immediate(factory->null_value()));
+  __ cmp(Operand(scratch), Immediate(factory->null_value()));
   __ j(equal, &is_not_instance, Label::kNear);
   __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
   __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
@@ -5207,7 +4708,7 @@
     __ Set(eax, Immediate(0));
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(Operand::StaticArray(scratch,
-                                times_pointer_size, roots_array_start), eax);
+                                times_pointer_size, roots_address), eax);
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->true_value());
@@ -5229,7 +4730,7 @@
     __ Set(eax, Immediate(Smi::FromInt(1)));
     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
     __ mov(Operand::StaticArray(
-        scratch, times_pointer_size, roots_array_start), eax);
+        scratch, times_pointer_size, roots_address), eax);
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->false_value());
@@ -5287,14 +4788,13 @@
     __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
     // Call the builtin and convert 0/1 to true/false.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(object);
-      __ push(function);
-      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    }
+    __ EnterInternalFrame();
+    __ push(object);
+    __ push(function);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    __ LeaveInternalFrame();
     Label true_value, done;
-    __ test(eax, eax);
+    __ test(eax, Operand(eax));
     __ j(zero, &true_value, Label::kNear);
     __ mov(eax, factory->false_value());
     __ jmp(&done, Label::kNear);
@@ -5354,6 +4854,11 @@
 // StringCharCodeAtGenerator
 
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+  Label flat_string;
+  Label ascii_string;
+  Label got_char_code;
+  Label sliced_string;
+
   // If the receiver is a smi trigger the non-string case.
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -5368,26 +4873,85 @@
   // If the index is non-smi trigger the non-smi case.
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfNotSmi(index_, &index_not_smi_);
+
+  // Put smi-tagged index into scratch register.
+  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
-  __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
+  __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
   __ j(above_equal, index_out_of_range_);
 
-  __ SmiUntag(index_);
+  // We need special handling for non-flat strings.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test(result_, Immediate(kStringRepresentationMask));
+  __ j(zero, &flat_string);
 
-  Factory* factory = masm->isolate()->factory();
-  StringCharLoadGenerator::Generate(
-      masm, factory, object_, index_, result_, &call_runtime_);
+  // Handle non-flat strings.
+  __ and_(result_, kStringRepresentationMask);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmp(result_, kExternalStringTag);
+  __ j(greater, &sliced_string, Label::kNear);
+  __ j(equal, &call_runtime_);
 
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  Label assure_seq_string;
+  __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
+         Immediate(masm->isolate()->factory()->empty_string()));
+  __ j(not_equal, &call_runtime_);
+  // Get the first of the two strings and load its instance type.
+  __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string, Label::kNear);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
+  __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
+  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test(result_, Immediate(kStringRepresentationMask));
+  __ j(not_zero, &call_runtime_);
+  __ jmp(&flat_string, Label::kNear);
+
+  // Check for 1-byte or 2-byte string.
+  __ bind(&flat_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ test(result_, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string, Label::kNear);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movzx_w(result_, FieldOperand(object_,
+                                   scratch_, times_1,  // Scratch is smi-tagged.
+                                   SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code, Label::kNear);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ SmiUntag(scratch_);
+  __ movzx_b(result_, FieldOperand(object_,
+                                   scratch_, times_1,
+                                   SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
   __ SmiTag(result_);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
@@ -5399,6 +4963,7 @@
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
+  __ push(index_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -5407,11 +4972,12 @@
     // NumberToSmi discards numbers that are not exact integers.
     __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
-  if (!index_.is(eax)) {
+  if (!scratch_.is(eax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
-    __ mov(index_, eax);
+    __ mov(scratch_, eax);
   }
+  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
@@ -5419,7 +4985,7 @@
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
   STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfNotSmi(index_, index_out_of_range_);
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -5429,7 +4995,6 @@
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
   __ push(object_);
-  __ SmiTag(index_);
   __ push(index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   if (!result_.is(eax)) {
@@ -5460,7 +5025,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  // At this point code register contains smi tagged ASCII char code.
+  // At this point code register contains smi tagged ascii char code.
   __ mov(result_, FieldOperand(result_,
                                code_, times_half_pointer_size,
                                FixedArray::kHeaderSize));
@@ -5471,8 +5036,7 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5499,15 +5063,14 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label call_runtime, call_builtin;
+  Label string_add_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   // Load the two arguments.
@@ -5516,14 +5079,14 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfSmi(eax, &call_runtime);
+    __ JumpIfSmi(eax, &string_add_runtime);
     __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
-    __ j(above_equal, &call_runtime);
+    __ j(above_equal, &string_add_runtime);
 
     // First argument is a a string, test second.
-    __ JumpIfSmi(edx, &call_runtime);
+    __ JumpIfSmi(edx, &string_add_runtime);
     __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
-    __ j(above_equal, &call_runtime);
+    __ j(above_equal, &string_add_runtime);
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -5547,7 +5110,7 @@
   Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ecx, ecx);
+  __ test(ecx, Operand(ecx));
   __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in eax.
   Counters* counters = masm->isolate()->counters();
@@ -5556,7 +5119,7 @@
   __ bind(&second_not_zero_length);
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ebx, ebx);
+  __ test(ebx, Operand(ebx));
   __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in edx.
   __ mov(eax, edx);
@@ -5571,17 +5134,18 @@
   // Look at the length of the result of adding the two strings.
   Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
-  __ add(ebx, ecx);
+  __ add(ebx, Operand(ecx));
   STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
   // Handle exceptionally long strings in the runtime system.
-  __ j(overflow, &call_runtime);
+  __ j(overflow, &string_add_runtime);
   // Use the symbol table when adding two one character strings, as it
   // helps later optimizations to return a symbol here.
-  __ cmp(ebx, Immediate(Smi::FromInt(2)));
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
   __ j(not_equal, &longer_than_two);
 
-  // Check that both strings are non-external ASCII strings.
-  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
+  // Check that both strings are non-external ascii strings.
+  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
+                                         &string_add_runtime);
 
   // Get the two characters forming the new string.
   __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
@@ -5606,10 +5170,14 @@
   __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
   __ bind(&make_two_character_string_no_reload);
   __ IncrementCounter(counters->string_add_make_two_char(), 1);
-  __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
+  __ AllocateAsciiString(eax,  // Result.
+                         2,    // Length.
+                         edi,  // Scratch 1.
+                         edx,  // Scratch 2.
+                         &string_add_runtime);
   // Pack both characters in ebx.
   __ shl(ecx, kBitsPerByte);
-  __ or_(ebx, ecx);
+  __ or_(ebx, Operand(ecx));
   // Set the characters in the new string.
   __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -5617,24 +5185,24 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
+  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
   __ j(below, &string_add_flat_result);
 
   // If result is not supposed to be flat allocate a cons string object. If both
-  // strings are ASCII the result is an ASCII cons string.
+  // strings are ascii the result is an ascii cons string.
   Label non_ascii, allocated, ascii_data;
   __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
-  __ and_(ecx, edi);
+  __ and_(ecx, Operand(edi));
   STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(ecx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
-  // Allocate an ASCII cons string.
-  __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
+  // Allocate an acsii cons string.
+  __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
   if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
@@ -5648,106 +5216,77 @@
   __ ret(2 * kPointerSize);
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
-  // to contain only ASCII characters.
+  // to contain only ascii characters.
   // ecx: first instance type AND second instance type.
   // edi: second instance type.
   __ test(ecx, Immediate(kAsciiDataHintMask));
   __ j(not_zero, &ascii_data);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ xor_(edi, ecx);
+  __ xor_(edi, Operand(ecx));
   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
+  __ AllocateTwoByteConsString(ecx, edi, no_reg, &string_add_runtime);
   __ jmp(&allocated);
 
-  // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
-  // Handle creating a flat result from either external or sequential strings.
-  // Locate the first characters' locations.
+  // Handle creating a flat result. First check that both strings are not
+  // external strings.
   // eax: first string
   // ebx: length of resulting flat string as a smi
   // edx: second string
-  Label first_prepared, second_prepared;
-  Label first_is_sequential, second_is_sequential;
   __ bind(&string_add_flat_result);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  // ecx: instance type of first string
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test_b(ecx, kStringRepresentationMask);
-  __ j(zero, &first_is_sequential, Label::kNear);
-  // Rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(ecx, kShortExternalStringMask);
-  __ j(not_zero, &call_runtime);
-  __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ jmp(&first_prepared, Label::kNear);
-  __ bind(&first_is_sequential);
-  __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ bind(&first_prepared);
-
-  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
-  __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
-  // Check whether both strings have same encoding.
-  // edi: instance type of second string
-  __ xor_(ecx, edi);
-  __ test_b(ecx, kStringEncodingMask);
-  __ j(not_zero, &call_runtime);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test_b(edi, kStringRepresentationMask);
-  __ j(zero, &second_is_sequential, Label::kNear);
-  // Rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ test_b(edi, kShortExternalStringMask);
-  __ j(not_zero, &call_runtime);
-  __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ jmp(&second_prepared, Label::kNear);
-  __ bind(&second_is_sequential);
-  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ bind(&second_prepared);
-
-  // Push the addresses of both strings' first characters onto the stack.
-  __ push(edx);
-  __ push(eax);
-
-  Label non_ascii_string_add_flat_result, call_runtime_drop_two;
-  // edi: instance type of second string
-  // First string and second string have the same encoding.
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ test_b(edi, kStringEncodingMask);
+  __ and_(ecx, kStringRepresentationMask);
+  __ cmp(ecx, kExternalStringTag);
+  __ j(equal, &string_add_runtime);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  __ and_(ecx, kStringRepresentationMask);
+  __ cmp(ecx, kExternalStringTag);
+  __ j(equal, &string_add_runtime);
+  // We cannot encounter sliced strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
+  // Now check if both strings are ascii strings.
+  // eax: first string
+  // ebx: length of resulting flat string as a smi
+  // edx: second string
+  Label non_ascii_string_add_flat_result;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
   __ j(zero, &non_ascii_string_add_flat_result);
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
+  __ j(zero, &string_add_runtime);
 
-  // Both strings are ASCII strings.
+  // Both strings are ascii strings.  As they are short they are both flat.
   // ebx: length of resulting flat string as a smi
   __ SmiUntag(ebx);
-  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
+  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // Load first argument's length and first character location.  Account for
-  // values currently on the stack when fetching arguments from it.
-  __ mov(edx, Operand(esp, 4 * kPointerSize));
+  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Load first argument and locate first character.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ pop(edx);
+  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
   // edi: length of first argument
   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
-  // Load second argument's length and first character location.  Account for
-  // values currently on the stack when fetching arguments from it.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  // Load second argument and locate first character.
+  __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ pop(edx);
+  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5761,30 +5300,34 @@
   // ebx: length of resulting flat string as a smi
   // edx: second string
   __ bind(&non_ascii_string_add_flat_result);
-  // Both strings are two byte strings.
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
+  __ j(not_zero, &string_add_runtime);
+  // Both strings are two byte strings. As they are short they are both
+  // flat.
   __ SmiUntag(ebx);
-  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
+  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // Load second argument's length and first character location.  Account for
-  // values currently on the stack when fetching arguments from it.
-  __ mov(edx, Operand(esp, 4 * kPointerSize));
+  __ add(Operand(ecx),
+         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Load first argument and locate first character.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ pop(edx);
+  __ add(Operand(edx),
+         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
   // edi: length of first argument
   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
-  // Load second argument's length and first character location.  Account for
-  // values currently on the stack when fetching arguments from it.
-  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  // Load second argument and locate first character.
+  __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ pop(edx);
+  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5793,11 +5336,8 @@
   __ IncrementCounter(counters->string_add_native(), 1);
   __ ret(2 * kPointerSize);
 
-  // Recover stack pointer before jumping to runtime.
-  __ bind(&call_runtime_drop_two);
-  __ Drop(2);
   // Just jump to runtime to add the two strings.
-  __ bind(&call_runtime);
+  __ bind(&string_add_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -5863,15 +5403,15 @@
   if (ascii) {
     __ mov_b(scratch, Operand(src, 0));
     __ mov_b(Operand(dest, 0), scratch);
-    __ add(src, Immediate(1));
-    __ add(dest, Immediate(1));
+    __ add(Operand(src), Immediate(1));
+    __ add(Operand(dest), Immediate(1));
   } else {
     __ mov_w(scratch, Operand(src, 0));
     __ mov_w(Operand(dest, 0), scratch);
-    __ add(src, Immediate(2));
-    __ add(dest, Immediate(2));
+    __ add(Operand(src), Immediate(2));
+    __ add(Operand(dest), Immediate(2));
   }
-  __ sub(count, Immediate(1));
+  __ sub(Operand(count), Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -5894,7 +5434,7 @@
 
   // Nothing to do for zero characters.
   Label done;
-  __ test(count, count);
+  __ test(count, Operand(count));
   __ j(zero, &done);
 
   // Make count the number of bytes to copy.
@@ -5919,7 +5459,7 @@
 
   // Check if there are more bytes to copy.
   __ bind(&last_bytes);
-  __ test(count, count);
+  __ test(count, Operand(count));
   __ j(zero, &done);
 
   // Copy remaining characters.
@@ -5927,9 +5467,9 @@
   __ bind(&loop);
   __ mov_b(scratch, Operand(src, 0));
   __ mov_b(Operand(dest, 0), scratch);
-  __ add(src, Immediate(1));
-  __ add(dest, Immediate(1));
-  __ sub(count, Immediate(1));
+  __ add(Operand(src), Immediate(1));
+  __ add(Operand(dest), Immediate(1));
+  __ sub(Operand(count), Immediate(1));
   __ j(not_zero, &loop);
 
   __ bind(&done);
@@ -5951,12 +5491,12 @@
   // different hash algorithm. Don't try to look for these in the symbol table.
   Label not_array_index;
   __ mov(scratch, c1);
-  __ sub(scratch, Immediate(static_cast<int>('0')));
-  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
+  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
   __ j(above, &not_array_index, Label::kNear);
   __ mov(scratch, c2);
-  __ sub(scratch, Immediate(static_cast<int>('0')));
-  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
+  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
   __ j(below_equal, not_probed);
 
   __ bind(&not_array_index);
@@ -5969,24 +5509,24 @@
   // Collect the two characters in a register.
   Register chars = c1;
   __ shl(c2, kBitsPerByte);
-  __ or_(chars, c2);
+  __ or_(chars, Operand(c2));
 
   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
   // hash:  hash of two character string.
 
   // Load the symbol table.
   Register symbol_table = c2;
-  ExternalReference roots_array_start =
-      ExternalReference::roots_array_start(masm->isolate());
+  ExternalReference roots_address =
+      ExternalReference::roots_address(masm->isolate());
   __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
   __ mov(symbol_table,
-         Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
 
   // Calculate capacity mask from the symbol table capacity.
   Register mask = scratch2;
   __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
   __ SmiUntag(mask);
-  __ sub(mask, Immediate(1));
+  __ sub(Operand(mask), Immediate(1));
 
   // Registers
   // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
@@ -6004,9 +5544,9 @@
     // Calculate entry in symbol table.
     __ mov(scratch, hash);
     if (i > 0) {
-      __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
     }
-    __ and_(scratch, mask);
+    __ and_(scratch, Operand(mask));
 
     // Load the entry from the symbol table.
     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
@@ -6020,7 +5560,7 @@
     Factory* factory = masm->isolate()->factory();
     __ cmp(candidate, factory->undefined_value());
     __ j(equal, not_found);
-    __ cmp(candidate, factory->the_hole_value());
+    __ cmp(candidate, factory->null_value());
     __ j(equal, &next_probe[i]);
 
     // If length is not 2 the string is not a candidate.
@@ -6033,7 +5573,7 @@
     __ push(mask);
     Register temp = mask;
 
-    // Check that the candidate is a non-external ASCII string.
+    // Check that the candidate is a non-external ascii string.
     __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
     __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
     __ JumpIfInstanceTypeIsNotSequentialAscii(
@@ -6042,7 +5582,7 @@
     // Check if the two characters match.
     __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
     __ and_(temp, 0x0000ffff);
-    __ cmp(chars, temp);
+    __ cmp(chars, Operand(temp));
     __ j(equal, &found_in_symbol_table);
     __ bind(&next_probe_pop_mask[i]);
     __ pop(mask);
@@ -6068,17 +5608,17 @@
                                     Register scratch) {
   // hash = (seed + character) + ((seed + character) << 10);
   if (Serializer::enabled()) {
-    ExternalReference roots_array_start =
-        ExternalReference::roots_array_start(masm->isolate());
+    ExternalReference roots_address =
+        ExternalReference::roots_address(masm->isolate());
     __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
     __ mov(scratch, Operand::StaticArray(scratch,
                                          times_pointer_size,
-                                         roots_array_start));
+                                         roots_address));
     __ SmiUntag(scratch);
-    __ add(scratch, character);
+    __ add(scratch, Operand(character));
     __ mov(hash, scratch);
     __ shl(scratch, 10);
-    __ add(hash, scratch);
+    __ add(hash, Operand(scratch));
   } else {
     int32_t seed = masm->isolate()->heap()->HashSeed();
     __ lea(scratch, Operand(character, seed));
@@ -6088,7 +5628,7 @@
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ shr(scratch, 6);
-  __ xor_(hash, scratch);
+  __ xor_(hash, Operand(scratch));
 }
 
 
@@ -6097,15 +5637,15 @@
                                             Register character,
                                             Register scratch) {
   // hash += character;
-  __ add(hash, character);
+  __ add(hash, Operand(character));
   // hash += hash << 10;
   __ mov(scratch, hash);
   __ shl(scratch, 10);
-  __ add(hash, scratch);
+  __ add(hash, Operand(scratch));
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ shr(scratch, 6);
-  __ xor_(hash, scratch);
+  __ xor_(hash, Operand(scratch));
 }
 
 
@@ -6115,15 +5655,15 @@
   // hash += hash << 3;
   __ mov(scratch, hash);
   __ shl(scratch, 3);
-  __ add(hash, scratch);
+  __ add(hash, Operand(scratch));
   // hash ^= hash >> 11;
   __ mov(scratch, hash);
   __ shr(scratch, 11);
-  __ xor_(hash, scratch);
+  __ xor_(hash, Operand(scratch));
   // hash += hash << 15;
   __ mov(scratch, hash);
   __ shl(scratch, 15);
-  __ add(hash, scratch);
+  __ add(hash, Operand(scratch));
 
   __ and_(hash, String::kHashBitMask);
 
@@ -6155,70 +5695,100 @@
   // ebx: instance type
 
   // Calculate length of sub string using the smi values.
+  Label result_longer_than_two;
   __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
   __ JumpIfNotSmi(ecx, &runtime);
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
   __ JumpIfNotSmi(edx, &runtime);
-  __ sub(ecx, edx);
+  __ sub(ecx, Operand(edx));
   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
-  Label not_original_string;
-  __ j(not_equal, &not_original_string, Label::kNear);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-  __ bind(&not_original_string);
+  Label return_eax;
+  __ j(equal, &return_eax);
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache.
+  __ SmiUntag(ecx);  // Result length is no longer smi.
+  __ cmp(ecx, 2);
+  __ j(greater, &result_longer_than_two);
+  __ j(less, &runtime);
 
+  // Sub string of length 2 requested.
   // eax: string
   // ebx: instance type
-  // ecx: sub string length (smi)
+  // ecx: sub string length (value is 2)
   // edx: from index (smi)
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into edi.
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ test(ebx, Immediate(kIsIndirectStringMask));
-  __ j(zero, &seq_or_external_string, Label::kNear);
+  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
 
-  Factory* factory = masm->isolate()->factory();
-  __ test(ebx, Immediate(kSlicedNotConsMask));
-  __ j(not_zero, &sliced_string, Label::kNear);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  // Flat cons strings have an empty second part.
-  __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
-         factory->empty_string());
-  __ j(not_equal, &runtime);
-  __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
-  // Update instance type.
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+  // Get the two characters forming the sub string.
+  __ SmiUntag(edx);  // From index is no longer smi.
+  __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ecx,
+             FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  StringHelper::GenerateTwoCharacterSymbolTableProbe(
+      masm, ebx, ecx, eax, edx, edi,
+      &make_two_character_string, &make_two_character_string);
+  __ ret(3 * kPointerSize);
+
+  __ bind(&make_two_character_string);
+  // Setup registers for allocating the two character string.
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and adjust start index by offset.
-  __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
-  __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
-  // Update instance type.
-  __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(edi, eax);
-
-  __ bind(&underlying_unpacked);
+  __ Set(ecx, Immediate(2));
 
   if (FLAG_string_slices) {
     Label copy_routine;
-    // edi: underlying subject string
-    // ebx: instance type of underlying subject string
-    // edx: adjusted start index (smi)
-    // ecx: length (smi)
-    __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
+    // If coming from the make_two_character_string path, the string
+    // is too short to be sliced anyways.
+    STATIC_ASSERT(2 < SlicedString::kMinLength);
+    __ jmp(&copy_routine);
+    __ bind(&result_longer_than_two);
+
+    // eax: string
+    // ebx: instance type
+    // ecx: sub string length
+    // edx: from index (smi)
+    Label allocate_slice, sliced_string, seq_string;
+    __ cmp(ecx, SlicedString::kMinLength);
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ test(ebx, Immediate(kStringRepresentationMask));
+    __ j(zero, &seq_string, Label::kNear);
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ test(ebx, Immediate(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ j(zero, &runtime);
+
+    Factory* factory = masm->isolate()->factory();
+    __ test(ebx, Immediate(kSlicedNotConsMask));
+    __ j(not_zero, &sliced_string, Label::kNear);
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+           factory->empty_string());
+    __ j(not_equal, &runtime);
+    __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+    __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ mov(edi, eax);
+
+    __ bind(&allocate_slice);
+    // edi: underlying subject string
+    // ebx: instance type of original subject string
+    // edx: offset
+    // ecx: length
     // Allocate new sliced string.  At this point we do not reload the instance
     // type including the string encoding because we simply rely on the info
     // provided by the original string.  It does not matter if the original
@@ -6234,62 +5804,41 @@
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
     __ bind(&set_slice_header);
+    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
+    __ SmiTag(ecx);
     __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
+    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
     __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
            Immediate(String::kEmptyHashField));
-    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
-    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
-    __ IncrementCounter(counters->sub_string_native(), 1);
-    __ ret(3 * kPointerSize);
+    __ jmp(&return_eax);
 
     __ bind(&copy_routine);
+  } else {
+    __ bind(&result_longer_than_two);
   }
 
-  // edi: underlying subject string
-  // ebx: instance type of underlying subject string
-  // edx: adjusted start index (smi)
-  // ecx: length (smi)
-  // The subject string can only be external or sequential string of either
-  // encoding at this point.
-  Label two_byte_sequential, runtime_drop_two, sequential_string;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test_b(ebx, kExternalStringTag);
-  __ j(zero, &sequential_string);
+  // eax: string
+  // ebx: instance type
+  // ecx: result string length
+  // Check for flat ascii string
+  Label non_ascii_flat;
+  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
 
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ test_b(ebx, kShortExternalStringMask);
-  __ j(not_zero, &runtime);
-  __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&sequential_string);
-  // Stash away (adjusted) index and (underlying) string.
-  __ push(edx);
-  __ push(edi);
-  __ SmiUntag(ecx);
-  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
-  __ test_b(ebx, kStringEncodingMask);
-  __ j(zero, &two_byte_sequential);
-
-  // Sequential ASCII string.  Allocate the result.
-  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+  // Allocate the result.
+  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
 
   // eax: result string
   // ecx: result string length
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
-  __ pop(esi);
-  __ pop(ebx);
+  __ mov(esi, Operand(esp, 3 * kPointerSize));
+  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   __ SmiUntag(ebx);
-  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
+  __ add(esi, Operand(ebx));
 
   // eax: result string
   // ecx: result length
@@ -6298,28 +5847,38 @@
   // esi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
   __ mov(esi, edx);  // Restore esi.
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
-  __ bind(&two_byte_sequential);
-  // Sequential two-byte string.  Allocate the result.
-  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+  __ bind(&non_ascii_flat);
+  // eax: string
+  // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
+  // ecx: result string length
+  // Check for flat two byte string
+  __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
+  __ j(not_equal, &runtime);
+
+  // Allocate the result.
+  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
 
   // eax: result string
   // ecx: result string length
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(edi,
+  __ add(Operand(edi),
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
-  __ pop(esi);
-  __ pop(ebx);
+  __ mov(esi, Operand(esp, 3 * kPointerSize));
+  __ add(Operand(esi),
+         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
+  __ add(esi, Operand(ebx));
 
   // eax: result string
   // ecx: result length
@@ -6328,13 +5887,11 @@
   // esi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
   __ mov(esi, edx);  // Restore esi.
+
+  __ bind(&return_eax);
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(3 * kPointerSize);
 
-  // Drop pushed values on the stack before tail call.
-  __ bind(&runtime_drop_two);
-  __ Drop(2);
-
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kSubString, 3, 1);
@@ -6361,7 +5918,7 @@
   Label compare_chars;
   __ bind(&check_zero_length);
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(length, length);
+  __ test(length, Operand(length));
   __ j(not_zero, &compare_chars, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(0);
@@ -6396,14 +5953,14 @@
 
   __ j(less_equal, &left_shorter, Label::kNear);
   // Right string is shorter. Change scratch1 to be length of right string.
-  __ sub(scratch1, length_delta);
+  __ sub(scratch1, Operand(length_delta));
   __ bind(&left_shorter);
 
   Register min_length = scratch1;
 
   // If either length is zero, just compare lengths.
   Label compare_lengths;
-  __ test(min_length, min_length);
+  __ test(min_length, Operand(min_length));
   __ j(zero, &compare_lengths, Label::kNear);
 
   // Compare characters.
@@ -6413,7 +5970,7 @@
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
-  __ test(length_delta, length_delta);
+  __ test(length_delta, Operand(length_delta));
   __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
@@ -6462,7 +6019,7 @@
   __ mov_b(scratch, Operand(left, index, times_1, 0));
   __ cmpb(scratch, Operand(right, index, times_1, 0));
   __ j(not_equal, chars_not_equal, chars_not_equal_near);
-  __ inc(index);
+  __ add(Operand(index), Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -6479,7 +6036,7 @@
   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
 
   Label not_same;
-  __ cmp(edx, eax);
+  __ cmp(edx, Operand(eax));
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6489,13 +6046,13 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ASCII strings.
+  // Check that both objects are sequential ascii strings.
   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
 
-  // Compare flat ASCII strings.
+  // Compare flat ascii strings.
   // Drop arguments from the stack.
   __ pop(ecx);
-  __ add(esp, Immediate(2 * kPointerSize));
+  __ add(Operand(esp), Immediate(2 * kPointerSize));
   __ push(ecx);
   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
 
@@ -6509,16 +6066,16 @@
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
   Label miss;
-  __ mov(ecx, edx);
-  __ or_(ecx, eax);
+  __ mov(ecx, Operand(edx));
+  __ or_(ecx, Operand(eax));
   __ JumpIfNotSmi(ecx, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
-    __ sub(eax, edx);
+    __ sub(eax, Operand(edx));
   } else {
     Label done;
-    __ sub(edx, eax);
+    __ sub(edx, Operand(eax));
     __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ not_(edx);
@@ -6536,16 +6093,16 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered, maybe_undefined1, maybe_undefined2;
+  Label unordered;
   Label miss;
-  __ mov(ecx, edx);
-  __ and_(ecx, eax);
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
   __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
 
   __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &maybe_undefined1, Label::kNear);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &maybe_undefined2, Label::kNear);
+  __ j(not_equal, &miss, Label::kNear);
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or SS2 or CMOV is unsupported.
@@ -6567,32 +6124,18 @@
     // Performing mov, because xor would destroy the flag register.
     __ mov(eax, 0);  // equal
     __ mov(ecx, Immediate(Smi::FromInt(1)));
-    __ cmov(above, eax, ecx);
+    __ cmov(above, eax, Operand(ecx));
     __ mov(ecx, Immediate(Smi::FromInt(-1)));
-    __ cmov(below, eax, ecx);
+    __ cmov(below, eax, Operand(ecx));
     __ ret(0);
+
+    __ bind(&unordered);
   }
 
-  __ bind(&unordered);
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
   __ bind(&generic_stub);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
-  __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
-    __ j(not_equal, &miss);
-    __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
-    __ j(not_equal, &maybe_undefined2, Label::kNear);
-    __ jmp(&unordered);
-  }
-
-  __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
-    __ j(equal, &unordered);
-  }
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -6610,9 +6153,9 @@
 
   // Check that both operands are heap objects.
   Label miss;
-  __ mov(tmp1, left);
+  __ mov(tmp1, Operand(left));
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, right);
+  __ and_(tmp1, Operand(right));
   __ JumpIfSmi(tmp1, &miss, Label::kNear);
 
   // Check that both operands are symbols.
@@ -6621,13 +6164,13 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, tmp2);
+  __ and_(tmp1, Operand(tmp2));
   __ test(tmp1, Immediate(kIsSymbolMask));
   __ j(zero, &miss, Label::kNear);
 
   // Symbols are compared by identity.
   Label done;
-  __ cmp(left, right);
+  __ cmp(left, Operand(right));
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
   ASSERT(right.is(eax));
@@ -6645,10 +6188,9 @@
 
 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::STRINGS);
+  ASSERT(GetCondition() == equal);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
-
   // Registers containing left and right operands respectively.
   Register left = edx;
   Register right = eax;
@@ -6657,9 +6199,9 @@
   Register tmp3 = edi;
 
   // Check that both operands are heap objects.
-  __ mov(tmp1, left);
+  __ mov(tmp1, Operand(left));
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, right);
+  __ and_(tmp1, Operand(right));
   __ JumpIfSmi(tmp1, &miss);
 
   // Check that both operands are strings. This leaves the instance
@@ -6670,13 +6212,13 @@
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   __ mov(tmp3, tmp1);
   STATIC_ASSERT(kNotStringTag != 0);
-  __ or_(tmp3, tmp2);
+  __ or_(tmp3, Operand(tmp2));
   __ test(tmp3, Immediate(kIsNotStringMask));
   __ j(not_zero, &miss);
 
   // Fast check for identical strings.
   Label not_same;
-  __ cmp(left, right);
+  __ cmp(left, Operand(right));
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6687,33 +6229,25 @@
   __ bind(&not_same);
 
   // Check that both strings are symbols. If they are, we're done
-  // because we already know they are not identical.  But in the case of
-  // non-equality compare, we still need to determine the order.
-  if (equality) {
-    Label do_compare;
-    STATIC_ASSERT(kSymbolTag != 0);
-    __ and_(tmp1, tmp2);
-    __ test(tmp1, Immediate(kIsSymbolMask));
-    __ j(zero, &do_compare, Label::kNear);
-    // Make sure eax is non-zero. At this point input operands are
-    // guaranteed to be non-zero.
-    ASSERT(right.is(eax));
-    __ ret(0);
-    __ bind(&do_compare);
-  }
+  // because we already know they are not identical.
+  Label do_compare;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, Operand(tmp2));
+  __ test(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &do_compare, Label::kNear);
+  // Make sure eax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(eax));
+  __ ret(0);
 
   // Check that both strings are sequential ASCII.
   Label runtime;
+  __ bind(&do_compare);
   __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2);
-  } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3);
-  }
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2);
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
@@ -6721,11 +6255,7 @@
   __ push(left);
   __ push(right);
   __ push(tmp1);
-  if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
-  } else {
-    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-  }
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -6735,8 +6265,8 @@
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
   Label miss;
-  __ mov(ecx, edx);
-  __ and_(ecx, eax);
+  __ mov(ecx, Operand(edx));
+  __ and_(ecx, Operand(eax));
   __ JumpIfSmi(ecx, &miss, Label::kNear);
 
   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
@@ -6745,28 +6275,7 @@
   __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
-  __ sub(eax, edx);
-  __ ret(0);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
-  Label miss;
-  __ mov(ecx, edx);
-  __ and_(ecx, eax);
-  __ JumpIfSmi(ecx, &miss, Label::kNear);
-
-  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ cmp(ecx, known_map_);
-  __ j(not_equal, &miss, Label::kNear);
-  __ cmp(ebx, known_map_);
-  __ j(not_equal, &miss, Label::kNear);
-
-  __ sub(eax, edx);
+  __ sub(eax, Operand(edx));
   __ ret(0);
 
   __ bind(&miss);
@@ -6775,25 +6284,33 @@
 
 
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  {
-    // Call the runtime system in a fresh internal frame.
-    ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
-                                               masm->isolate());
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(edx);  // Preserve edx and eax.
-    __ push(eax);
-    __ push(edx);  // And also use them as the arguments.
-    __ push(eax);
-    __ push(Immediate(Smi::FromInt(op_)));
-    __ CallExternalReference(miss, 3);
-    // Compute the entry point of the rewritten stub.
-    __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
-    __ pop(eax);
-    __ pop(edx);
-  }
+  // Save the registers.
+  __ pop(ecx);
+  __ push(edx);
+  __ push(eax);
+  __ push(ecx);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                             masm->isolate());
+  __ EnterInternalFrame();
+  __ push(edx);
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(op_)));
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+
+  // Compute the entry point of the rewritten stub.
+  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+
+  // Restore registers.
+  __ pop(ecx);
+  __ pop(eax);
+  __ pop(edx);
+  __ push(ecx);
 
   // Do a tail call to the rewritten stub.
-  __ jmp(edi);
+  __ jmp(Operand(edi));
 }
 
 
@@ -6802,27 +6319,28 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
-                                                        Label* miss,
-                                                        Label* done,
-                                                        Register properties,
-                                                        Handle<String> name,
-                                                        Register r0) {
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register properties,
+    String* name,
+    Register r0) {
   ASSERT(name->IsSymbol());
 
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the hole value).
+  // (their names are the null value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
     Register index = r0;
     // Capacity is smi 2^n.
     __ mov(index, FieldOperand(properties, kCapacityOffset));
     __ dec(index);
-    __ and_(index,
-            Immediate(Smi::FromInt(name->Hash() +
+    __ and_(Operand(index),
+           Immediate(Smi::FromInt(name->Hash() +
                                    StringDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
@@ -6840,17 +6358,11 @@
     __ cmp(entity_name, Handle<String>(name));
     __ j(equal, miss);
 
-    Label the_hole;
-    // Check for the hole and skip.
-    __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
-    __ j(equal, &the_hole, Label::kNear);
-
     // Check if the entry name is not a symbol.
     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
     __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
               kIsSymbolMask);
     __ j(zero, miss);
-    __ bind(&the_hole);
   }
 
   StringDictionaryLookupStub stub(properties,
@@ -6859,10 +6371,12 @@
                                   StringDictionaryLookupStub::NEGATIVE_LOOKUP);
   __ push(Immediate(Handle<Object>(name)));
   __ push(Immediate(name->Hash()));
-  __ CallStub(&stub);
-  __ test(r0, r0);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
+  __ test(r0, Operand(r0));
   __ j(not_zero, miss);
   __ jmp(done);
+  return result;
 }
 
 
@@ -6877,11 +6391,6 @@
                                                         Register name,
                                                         Register r0,
                                                         Register r1) {
-  ASSERT(!elements.is(r0));
-  ASSERT(!elements.is(r1));
-  ASSERT(!name.is(r0));
-  ASSERT(!name.is(r1));
-
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -6897,9 +6406,9 @@
     __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
     __ shr(r0, String::kHashShift);
     if (i > 0) {
-      __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
     }
-    __ and_(r0, r1);
+    __ and_(r0, Operand(r1));
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
@@ -6923,15 +6432,13 @@
   __ push(r0);
   __ CallStub(&stub);
 
-  __ test(r1, r1);
+  __ test(r1, Operand(r1));
   __ j(zero, miss);
   __ jmp(done);
 }
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
-  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
-  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -6962,7 +6469,8 @@
     // Compute the masked index: (hash + i + i * i) & mask.
     __ mov(scratch, Operand(esp, 2 * kPointerSize));
     if (i > 0) {
-      __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(Operand(scratch),
+             Immediate(StringDictionary::GetProbeOffset(i)));
     }
     __ and_(scratch, Operand(esp, 0));
 
@@ -7018,369 +6526,6 @@
 }
 
 
-struct AheadOfTimeWriteBarrierStubList {
-  Register object, value, address;
-  RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
-  // Used in RegExpExecStub.
-  { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
-  // Used in CompileArrayPushCall.
-  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
-  { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
-  // Used in CompileStoreGlobal and CallFunctionStub.
-  { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
-  // Used in StoreStubCompiler::CompileStoreField and
-  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
-  // GenerateStoreField calls the stub with two different permutations of
-  // registers.  This is the second.
-  { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
-  // StoreIC::GenerateNormal via GenerateDictionaryStore
-  { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
-  // KeyedStoreIC::GenerateGeneric.
-  { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
-  // KeyedStoreStubCompiler::GenerateStoreFastElement.
-  { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
-  { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
-  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
-  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
-  // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
-  { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
-  // ElementsTransitionGenerator::GenerateDoubleToObject
-  { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
-  { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
-  // StoreArrayLiteralElementStub::Generate
-  { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
-  // Null termination.
-  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
-  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    if (object_.is(entry->object) &&
-        value_.is(entry->value) &&
-        address_.is(entry->address) &&
-        remembered_set_action_ == entry->action &&
-        save_fp_regs_mode_ == kDontSaveFPRegs) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
-  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
-  stub1.GetCode()->set_is_pregenerated(true);
-
-  CpuFeatures::TryForceFeatureScope scope(SSE2);
-  if (CpuFeatures::IsSupported(SSE2)) {
-    StoreBufferOverflowStub stub2(kSaveFPRegs);
-    stub2.GetCode()->set_is_pregenerated(true);
-  }
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
-  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    RecordWriteStub stub(entry->object,
-                         entry->value,
-                         entry->address,
-                         entry->action,
-                         kDontSaveFPRegs);
-    stub.GetCode()->set_is_pregenerated(true);
-  }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed.  The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
-  Label skip_to_incremental_noncompacting;
-  Label skip_to_incremental_compacting;
-
-  // The first two instructions are generated with labels so as to get the
-  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
-  // forth between a compare instructions (a nop in this position) and the
-  // real branch when we start and stop incremental heap marking.
-  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
-  __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ ret(0);
-  }
-
-  __ bind(&skip_to_incremental_noncompacting);
-  GenerateIncremental(masm, INCREMENTAL);
-
-  __ bind(&skip_to_incremental_compacting);
-  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
-  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
-  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-  masm->set_byte_at(0, kTwoByteNopInstruction);
-  masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
-  regs_.Save(masm);
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    Label dont_need_remembered_set;
-
-    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
-                           regs_.scratch0(),
-                           &dont_need_remembered_set);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     not_zero,
-                     &dont_need_remembered_set);
-
-    // First notify the incremental marker if necessary, then update the
-    // remembered set.
-    CheckNeedsToInformIncrementalMarker(
-        masm,
-        kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
-        mode);
-    InformIncrementalMarker(masm, mode);
-    regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-
-    __ bind(&dont_need_remembered_set);
-  }
-
-  CheckNeedsToInformIncrementalMarker(
-      masm,
-      kReturnOnNoNeedToInformIncrementalMarker,
-      mode);
-  InformIncrementalMarker(masm, mode);
-  regs_.Restore(masm);
-  __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-  int argument_count = 3;
-  __ PrepareCallCFunction(argument_count, regs_.scratch0());
-  __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
-  if (mode == INCREMENTAL_COMPACTION) {
-    __ mov(Operand(esp, 1 * kPointerSize), regs_.address());  // Slot.
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-    __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0());  // Value.
-  }
-  __ mov(Operand(esp, 2 * kPointerSize),
-         Immediate(ExternalReference::isolate_address()));
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  if (mode == INCREMENTAL_COMPACTION) {
-    __ CallCFunction(
-        ExternalReference::incremental_evacuation_record_write_function(
-            masm->isolate()),
-        argument_count);
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ CallCFunction(
-        ExternalReference::incremental_marking_record_write_function(
-            masm->isolate()),
-        argument_count);
-  }
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
-    MacroAssembler* masm,
-    OnNoNeedToInformIncrementalMarker on_no_need,
-    Mode mode) {
-  Label object_is_black, need_incremental, need_incremental_pop_object;
-
-  // Let's look at the color of the object:  If it is not black we don't have
-  // to inform the incremental marker.
-  __ JumpIfBlack(regs_.object(),
-                 regs_.scratch0(),
-                 regs_.scratch1(),
-                 &object_is_black,
-                 Label::kNear);
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ ret(0);
-  }
-
-  __ bind(&object_is_black);
-
-  // Get the value from the slot.
-  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
-
-  if (mode == INCREMENTAL_COMPACTION) {
-    Label ensure_not_white;
-
-    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kEvacuationCandidateMask,
-                     zero,
-                     &ensure_not_white,
-                     Label::kNear);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
-                     not_zero,
-                     &ensure_not_white,
-                     Label::kNear);
-
-    __ jmp(&need_incremental);
-
-    __ bind(&ensure_not_white);
-  }
-
-  // We need an extra register for this, so we push the object register
-  // temporarily.
-  __ push(regs_.object());
-  __ EnsureNotWhite(regs_.scratch0(),  // The value.
-                    regs_.scratch1(),  // Scratch.
-                    regs_.object(),  // Scratch.
-                    &need_incremental_pop_object,
-                    Label::kNear);
-  __ pop(regs_.object());
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ ret(0);
-  }
-
-  __ bind(&need_incremental_pop_object);
-  __ pop(regs_.object());
-
-  __ bind(&need_incremental);
-
-  // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : element value to store
-  //  -- ebx    : array literal
-  //  -- edi    : map of array literal
-  //  -- ecx    : element index as smi
-  //  -- edx    : array literal index in function
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  Label element_done;
-  Label double_elements;
-  Label smi_element;
-  Label slow_elements;
-  Label slow_elements_from_double;
-  Label fast_elements;
-
-  __ CheckFastElements(edi, &double_elements);
-
-  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
-  __ JumpIfSmi(eax, &smi_element);
-  __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
-
-  // Store into the array literal requires a elements transition. Call into
-  // the runtime.
-
-  __ bind(&slow_elements);
-  __ pop(edi);  // Pop return address and remember to put back later for tail
-                // call.
-  __ push(ebx);
-  __ push(ecx);
-  __ push(eax);
-  __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
-  __ push(edx);
-  __ push(edi);  // Return return address so that tail call returns to right
-                 // place.
-  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
-  __ bind(&slow_elements_from_double);
-  __ pop(edx);
-  __ jmp(&slow_elements);
-
-  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
-  __ bind(&fast_elements);
-  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-  __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
-                           FixedArrayBase::kHeaderSize));
-  __ mov(Operand(ecx, 0), eax);
-  // Update the write barrier for the array store.
-  __ RecordWrite(ebx, ecx, eax,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ ret(0);
-
-  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
-  // FAST_ELEMENTS, and value is Smi.
-  __ bind(&smi_element);
-  __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-  __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
-                      FixedArrayBase::kHeaderSize), eax);
-  __ ret(0);
-
-  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
-  __ bind(&double_elements);
-
-  __ push(edx);
-  __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
-  __ StoreNumberToDoubleElements(eax,
-                                 edx,
-                                 ecx,
-                                 edi,
-                                 xmm0,
-                                 &slow_elements_from_double,
-                                 false);
-  __ pop(edx);
-  __ ret(0);
-}
-
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 803a711..fa255da 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,8 +49,6 @@
                           ArgumentType argument_type)
       : type_(type), argument_type_(argument_type) {}
   void Generate(MacroAssembler* masm);
-  static void GenerateOperation(MacroAssembler* masm,
-                                TranscendentalCache::Type type);
  private:
   TranscendentalCache::Type type_;
   ArgumentType argument_type_;
@@ -58,25 +56,7 @@
   Major MajorKey() { return TranscendentalCache; }
   int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public CodeStub {
- public:
-  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
-      : save_doubles_(save_fp) { }
-
-  void Generate(MacroAssembler* masm);
-
-  virtual bool IsPregenerated() { return true; }
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+  void GenerateOperation(MacroAssembler* masm);
 };
 
 
@@ -148,7 +128,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -235,7 +215,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -422,12 +402,13 @@
 
   void Generate(MacroAssembler* masm);
 
-  static void GenerateNegativeLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register properties,
-                                     Handle<String> name,
-                                     Register r0);
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register properties,
+      String* name,
+      Register r0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -437,8 +418,6 @@
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -451,7 +430,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryLookup; }
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -472,265 +451,6 @@
 };
 
 
-class RecordWriteStub: public CodeStub {
- public:
-  RecordWriteStub(Register object,
-                  Register value,
-                  Register address,
-                  RememberedSetAction remembered_set_action,
-                  SaveFPRegsMode fp_mode)
-      : object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
-        regs_(object,   // An input reg.
-              address,  // An input reg.
-              value) {  // One scratch reg.
-  }
-
-  enum Mode {
-    STORE_BUFFER_ONLY,
-    INCREMENTAL,
-    INCREMENTAL_COMPACTION
-  };
-
-  virtual bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
-  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
-  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
-
-  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
-  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
-
-  static Mode GetMode(Code* stub) {
-    byte first_instruction = stub->instruction_start()[0];
-    byte second_instruction = stub->instruction_start()[2];
-
-    if (first_instruction == kTwoByteJumpInstruction) {
-      return INCREMENTAL;
-    }
-
-    ASSERT(first_instruction == kTwoByteNopInstruction);
-
-    if (second_instruction == kFiveByteJumpInstruction) {
-      return INCREMENTAL_COMPACTION;
-    }
-
-    ASSERT(second_instruction == kFiveByteNopInstruction);
-
-    return STORE_BUFFER_ONLY;
-  }
-
-  static void Patch(Code* stub, Mode mode) {
-    switch (mode) {
-      case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
-               GetMode(stub) == INCREMENTAL_COMPACTION);
-        stub->instruction_start()[0] = kTwoByteNopInstruction;
-        stub->instruction_start()[2] = kFiveByteNopInstruction;
-        break;
-      case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        stub->instruction_start()[0] = kTwoByteJumpInstruction;
-        break;
-      case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        stub->instruction_start()[0] = kTwoByteNopInstruction;
-        stub->instruction_start()[2] = kFiveByteJumpInstruction;
-        break;
-    }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 7);
-  }
-
- private:
-  // This is a helper class for freeing up 3 scratch registers, where the third
-  // is always ecx (needed for shift operations).  The input is two registers
-  // that must be preserved and one scratch register provided by the caller.
-  class RegisterAllocation {
-   public:
-    RegisterAllocation(Register object,
-                       Register address,
-                       Register scratch0)
-        : object_orig_(object),
-          address_orig_(address),
-          scratch0_orig_(scratch0),
-          object_(object),
-          address_(address),
-          scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
-      scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
-      if (scratch0.is(ecx)) {
-        scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
-      }
-      if (object.is(ecx)) {
-        object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
-      }
-      if (address.is(ecx)) {
-        address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
-      }
-      ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
-    }
-
-    void Save(MacroAssembler* masm) {
-      ASSERT(!address_orig_.is(object_));
-      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
-      // We don't have to save scratch0_orig_ because it was given to us as
-      // a scratch register.  But if we had to switch to a different reg then
-      // we should save the new scratch0_.
-      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
-      if (!ecx.is(scratch0_orig_) &&
-          !ecx.is(object_orig_) &&
-          !ecx.is(address_orig_)) {
-        masm->push(ecx);
-      }
-      masm->push(scratch1_);
-      if (!address_.is(address_orig_)) {
-        masm->push(address_);
-        masm->mov(address_, address_orig_);
-      }
-      if (!object_.is(object_orig_)) {
-        masm->push(object_);
-        masm->mov(object_, object_orig_);
-      }
-    }
-
-    void Restore(MacroAssembler* masm) {
-      // These will have been preserved the entire time, so we just need to move
-      // them back.  Only in one case is the orig_ reg different from the plain
-      // one, since only one of them can alias with ecx.
-      if (!object_.is(object_orig_)) {
-        masm->mov(object_orig_, object_);
-        masm->pop(object_);
-      }
-      if (!address_.is(address_orig_)) {
-        masm->mov(address_orig_, address_);
-        masm->pop(address_);
-      }
-      masm->pop(scratch1_);
-      if (!ecx.is(scratch0_orig_) &&
-          !ecx.is(object_orig_) &&
-          !ecx.is(address_orig_)) {
-        masm->pop(ecx);
-      }
-      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
-    }
-
-    // If we have to call into C then we need to save and restore all caller-
-    // saved registers that were not already preserved.  The caller saved
-    // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
-    // will be restored by other means so we don't bother pushing them here.
-    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
-      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
-      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
-      if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(SSE2);
-        masm->sub(esp,
-                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
-        // Save all XMM registers except XMM0.
-        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
-          XMMRegister reg = XMMRegister::from_code(i);
-          masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
-        }
-      }
-    }
-
-    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
-                                           SaveFPRegsMode mode) {
-      if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(SSE2);
-        // Restore all XMM registers except XMM0.
-        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
-          XMMRegister reg = XMMRegister::from_code(i);
-          masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
-        }
-        masm->add(esp,
-                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
-      }
-      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
-      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
-    }
-
-    inline Register object() { return object_; }
-    inline Register address() { return address_; }
-    inline Register scratch0() { return scratch0_; }
-    inline Register scratch1() { return scratch1_; }
-
-   private:
-    Register object_orig_;
-    Register address_orig_;
-    Register scratch0_orig_;
-    Register object_;
-    Register address_;
-    Register scratch0_;
-    Register scratch1_;
-    // Third scratch register is always ecx.
-
-    Register GetRegThatIsNotEcxOr(Register r1,
-                                  Register r2,
-                                  Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
-        Register candidate = Register::FromAllocationIndex(i);
-        if (candidate.is(ecx)) continue;
-        if (candidate.is(r1)) continue;
-        if (candidate.is(r2)) continue;
-        if (candidate.is(r3)) continue;
-        return candidate;
-      }
-      UNREACHABLE();
-      return no_reg;
-    }
-    friend class RecordWriteStub;
-  };
-
-  enum OnNoNeedToInformIncrementalMarker {
-    kReturnOnNoNeedToInformIncrementalMarker,
-    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
-  }
-;
-  void Generate(MacroAssembler* masm);
-  void GenerateIncremental(MacroAssembler* masm, Mode mode);
-  void CheckNeedsToInformIncrementalMarker(
-      MacroAssembler* masm,
-      OnNoNeedToInformIncrementalMarker on_no_need,
-      Mode mode);
-  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
-  void Activate(Code* code) {
-    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-  }
-
-  class ObjectBits: public BitField<int, 0, 3> {};
-  class ValueBits: public BitField<int, 3, 3> {};
-  class AddressBits: public BitField<int, 6, 3> {};
-  class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
-  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
-
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
-  RegisterAllocation regs_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index ea61910..3a657bd 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,8 +30,6 @@
 #if defined(V8_TARGET_ARCH_IA32)
 
 #include "codegen.h"
-#include "heap.h"
-#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -41,100 +39,17 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
-  masm->set_has_frame(true);
+  masm->EnterInternalFrame();
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
-  masm->set_has_frame(false);
+  masm->LeaveInternalFrame();
 }
 
 
 #define __ masm.
 
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
-  size_t actual_size;
-  // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
-  if (buffer == NULL) {
-    // Fallback to library function if function cannot be created.
-    switch (type) {
-      case TranscendentalCache::SIN: return &sin;
-      case TranscendentalCache::COS: return &cos;
-      case TranscendentalCache::TAN: return &tan;
-      case TranscendentalCache::LOG: return &log;
-      default: UNIMPLEMENTED();
-    }
-  }
-
-  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-  // esp[1 * kPointerSize]: raw double input
-  // esp[0 * kPointerSize]: return address
-  // Move double input into registers.
-
-  __ push(ebx);
-  __ push(edx);
-  __ push(edi);
-  __ fld_d(Operand(esp, 4 * kPointerSize));
-  __ mov(ebx, Operand(esp, 4 * kPointerSize));
-  __ mov(edx, Operand(esp, 5 * kPointerSize));
-  TranscendentalCacheStub::GenerateOperation(&masm, type);
-  // The return value is expected to be on ST(0) of the FPU stack.
-  __ pop(edi);
-  __ pop(edx);
-  __ pop(ebx);
-  __ Ret();
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  ASSERT(desc.reloc_size == 0);
-
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
-  size_t actual_size;
-  // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
-  // If SSE2 is not available, we can use libc's implementation to ensure
-  // consistency since code by fullcodegen's calls into runtime in that case.
-  if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
-  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-  // esp[1 * kPointerSize]: raw double input
-  // esp[0 * kPointerSize]: return address
-  // Move double input into registers.
-  {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
-    __ sqrtsd(xmm0, xmm0);
-    __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
-    // Load result into floating point register as return value.
-    __ fld_d(Operand(esp, 1 * kPointerSize));
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  ASSERT(desc.reloc_size == 0);
-
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
 static void MemCopyWrapper(void* dest, const void* src, size_t size) {
   memcpy(dest, src, size);
 }
@@ -193,14 +108,14 @@
     __ mov(edx, dst);
     __ and_(edx, 0xF);
     __ neg(edx);
-    __ add(edx, Immediate(16));
-    __ add(dst, edx);
-    __ add(src, edx);
-    __ sub(count, edx);
+    __ add(Operand(edx), Immediate(16));
+    __ add(dst, Operand(edx));
+    __ add(src, Operand(edx));
+    __ sub(Operand(count), edx);
 
     // edi is now aligned. Check if esi is also aligned.
     Label unaligned_source;
-    __ test(src, Immediate(0x0F));
+    __ test(Operand(src), Immediate(0x0F));
     __ j(not_zero, &unaligned_source);
     {
       // Copy loop for aligned source and destination.
@@ -215,11 +130,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqa(xmm0, Operand(src, 0x00));
         __ movdqa(xmm1, Operand(src, 0x10));
-        __ add(src, Immediate(0x20));
+        __ add(Operand(src), Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(dst, Immediate(0x20));
+        __ add(Operand(dst), Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -227,12 +142,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(count, Immediate(0x10));
+      __ test(Operand(count), Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqa(xmm0, Operand(src, 0));
-      __ add(src, Immediate(0x10));
+      __ add(Operand(src), Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(dst, Immediate(0x10));
+      __ add(Operand(dst), Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -261,11 +176,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqu(xmm0, Operand(src, 0x00));
         __ movdqu(xmm1, Operand(src, 0x10));
-        __ add(src, Immediate(0x20));
+        __ add(Operand(src), Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(dst, Immediate(0x20));
+        __ add(Operand(dst), Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -273,12 +188,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(count, Immediate(0x10));
+      __ test(Operand(count), Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqu(xmm0, Operand(src, 0));
-      __ add(src, Immediate(0x10));
+      __ add(Operand(src), Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(dst, Immediate(0x10));
+      __ add(Operand(dst), Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -313,10 +228,10 @@
     __ mov(edx, dst);
     __ and_(edx, 0x03);
     __ neg(edx);
-    __ add(edx, Immediate(4));  // edx = 4 - (dst & 3)
-    __ add(dst, edx);
-    __ add(src, edx);
-    __ sub(count, edx);
+    __ add(Operand(edx), Immediate(4));  // edx = 4 - (dst & 3)
+    __ add(dst, Operand(edx));
+    __ add(src, Operand(edx));
+    __ sub(Operand(count), edx);
     // edi is now aligned, ecx holds number of remaning bytes to copy.
 
     __ mov(edx, count);
@@ -346,401 +261,6 @@
 
 #undef __
 
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label loop, entry, convert_hole, gc_required, only_change_map;
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-  __ j(equal, &only_change_map);
-
-  __ push(eax);
-  __ push(ebx);
-
-  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
-
-  // Allocate new FixedDoubleArray.
-  // edx: receiver
-  // edi: length of source FixedArray (smi-tagged)
-  __ lea(esi, Operand(edi, times_4, FixedDoubleArray::kHeaderSize));
-  __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
-
-  // eax: destination FixedDoubleArray
-  // edi: number of elements
-  // edx: receiver
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
-  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
-  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ mov(ebx, eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
-
-  // Prepare for conversion loop.
-  ExternalReference canonical_the_hole_nan_reference =
-      ExternalReference::address_of_the_hole_nan();
-  XMMRegister the_hole_nan = xmm1;
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(the_hole_nan,
-              Operand::StaticVariable(canonical_the_hole_nan_reference));
-  }
-  __ jmp(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  // Restore registers before jumping into runtime.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ pop(ebx);
-  __ pop(eax);
-  __ jmp(fail);
-
-  // Convert and copy elements
-  // esi: source FixedArray
-  __ bind(&loop);
-  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
-  // ebx: current element from source
-  // edi: index of current element
-  __ JumpIfNotSmi(ebx, &convert_hole);
-
-  // Normal smi, convert it to double and store.
-  __ SmiUntag(ebx);
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope fscope(SSE2);
-    __ cvtsi2sd(xmm0, ebx);
-    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
-              xmm0);
-  } else {
-    __ push(ebx);
-    __ fild_s(Operand(esp, 0));
-    __ pop(ebx);
-    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
-  }
-  __ jmp(&entry);
-
-  // Found hole, store hole_nan_as_double instead.
-  __ bind(&convert_hole);
-
-  if (FLAG_debug_code) {
-    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
-    __ Assert(equal, "object found in smi-only array");
-  }
-
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
-              the_hole_nan);
-  } else {
-    __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
-    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
-  }
-
-  __ bind(&entry);
-  __ sub(edi, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &loop);
-
-  __ pop(ebx);
-  __ pop(eax);
-
-  // Restore esi.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  __ bind(&only_change_map);
-  // eax: value
-  // ebx: target map
-  // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label loop, entry, convert_hole, gc_required, only_change_map, success;
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-  __ j(equal, &only_change_map);
-
-  __ push(eax);
-  __ push(edx);
-  __ push(ebx);
-
-  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-
-  // Allocate new FixedArray.
-  // ebx: length of source FixedDoubleArray (smi-tagged)
-  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
-  __ AllocateInNewSpace(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
-
-  // eax: destination FixedArray
-  // ebx: number of elements
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-
-  __ jmp(&entry);
-
-  // ebx: target map
-  // edx: receiver
-  // Set transitioned map.
-  __ bind(&only_change_map);
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&success);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ pop(ebx);
-  __ pop(edx);
-  __ pop(eax);
-  __ jmp(fail);
-
-  // Box doubles into heap numbers.
-  // edi: source FixedDoubleArray
-  // eax: destination FixedArray
-  __ bind(&loop);
-  // ebx: index of current element (smi-tagged)
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(equal, &convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
-  // edx: new heap number
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope fscope(SSE2);
-    __ movdbl(xmm0,
-              FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
-    __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
-  } else {
-    __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
-    __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
-    __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
-    __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
-  }
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
-  __ mov(esi, ebx);
-  __ RecordWriteArray(eax,
-                      edx,
-                      esi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&entry, Label::kNear);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
-         masm->isolate()->factory()->the_hole_value());
-
-  __ bind(&entry);
-  __ sub(ebx, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &loop);
-
-  __ pop(ebx);
-  __ pop(edx);
-  // ebx: target map
-  // edx: receiver
-  // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      eax,
-                      edi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Restore registers.
-  __ pop(eax);
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  __ bind(&success);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
-                                       Factory* factory,
-                                       Register string,
-                                       Register index,
-                                       Register result,
-                                       Label* call_runtime) {
-  // Fetch the instance type of the receiver into result register.
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ test(result, Immediate(kIsIndirectStringMask));
-  __ j(zero, &check_sequential, Label::kNear);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ test(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
-  __ SmiUntag(result);
-  __ add(index, result);
-  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
-
-  // Handle cons strings.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
-         Immediate(factory->empty_string()));
-  __ j(not_equal, call_runtime);
-  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // Distinguish sequential and external strings. Only these two string
-  // representations can reach here (slices and flat cons strings have been
-  // reduced to the underlying sequential or external string).
-  Label seq_string;
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &seq_string, Label::kNear);
-
-  // Handle external strings.
-  Label ascii_external, done;
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ test(result, Immediate(kIsIndirectStringMask));
-    __ Assert(zero, "external string expected, but not found");
-  }
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ test_b(result, kShortExternalStringMask);
-  __ j(not_zero, call_runtime);
-  // Check encoding.
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ test_b(result, kStringEncodingMask);
-  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
-  __ j(not_equal, &ascii_external, Label::kNear);
-  // Two-byte string.
-  __ movzx_w(result, Operand(result, index, times_2, 0));
-  __ jmp(&done, Label::kNear);
-  __ bind(&ascii_external);
-  // Ascii string.
-  __ movzx_b(result, Operand(result, index, times_1, 0));
-  __ jmp(&done, Label::kNear);
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii;
-  __ bind(&seq_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii, Label::kNear);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  __ movzx_w(result, FieldOperand(string,
-                                  index,
-                                  times_2,
-                                  SeqTwoByteString::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  // Ascii string.
-  // Load the byte into the result register.
-  __ bind(&ascii);
-  __ movzx_b(result, FieldOperand(string,
-                                  index,
-                                  times_1,
-                                  SeqAsciiString::kHeaderSize));
-  __ bind(&done);
-}
-
-#undef __
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index f4ab0b5..c85fa83 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -72,22 +72,6 @@
 };
 
 
-class StringCharLoadGenerator : public AllStatic {
- public:
-  // Generates the code for handling different string types and loading the
-  // indexed character into |result|.  We expect |index| as untagged input and
-  // |result| as untagged output.
-  static void Generate(MacroAssembler* masm,
-                       Factory* factory,
-                       Register string,
-                       Register index,
-                       Register result,
-                       Label* call_runtime);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 9eabb2a..57e66df 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -41,7 +41,7 @@
 namespace v8 {
 namespace internal {
 
-void CPU::SetUp() {
+void CPU::Setup() {
   CpuFeatures::Probe();
 }
 
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index d13fa75..2389948 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -100,64 +100,63 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Store the registers containing live values on the expression stack to
-    // make sure that these are correctly updated during GC. Non object values
-    // are stored as a smi causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((object_regs & (1 << r)) != 0) {
-        __ push(reg);
-      }
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ test(reg, Immediate(0xc0000000));
-          __ Assert(zero, "Unable to encode value as smi");
-        }
-        __ SmiTag(reg);
-        __ push(reg);
-      }
+  // Store the registers containing live values on the expression stack to
+  // make sure that these are correctly updated during GC. Non object values
+  // are stored as a smi causing it to be untouched by GC.
+  ASSERT((object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((object_regs & non_object_regs) == 0);
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    Register reg = { r };
+    if ((object_regs & (1 << r)) != 0) {
+      __ push(reg);
     }
+    if ((non_object_regs & (1 << r)) != 0) {
+      if (FLAG_debug_code) {
+        __ test(reg, Immediate(0xc0000000));
+        __ Assert(zero, "Unable to encode value as smi");
+      }
+      __ SmiTag(reg);
+      __ push(reg);
+    }
+  }
 
 #ifdef DEBUG
-    __ RecordComment("// Calling from debug break to runtime - come in - over");
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-    __ Set(eax, Immediate(0));  // No arguments.
-    __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+  __ Set(eax, Immediate(0));  // No arguments.
+  __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
 
-    CEntryStub ceb(1);
-    __ CallStub(&ceb);
+  CEntryStub ceb(1);
+  __ CallStub(&ceb);
 
-    // Restore the register values containing object pointers from the
-    // expression stack.
-    for (int i = kNumJSCallerSaved; --i >= 0;) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if (FLAG_debug_code) {
-        __ Set(reg, Immediate(kDebugZapValue));
-      }
-      if ((object_regs & (1 << r)) != 0) {
-        __ pop(reg);
-      }
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ pop(reg);
-        __ SmiUntag(reg);
-      }
+  // Restore the register values containing object pointers from the expression
+  // stack.
+  for (int i = kNumJSCallerSaved; --i >= 0;) {
+    int r = JSCallerSavedCode(i);
+    Register reg = { r };
+    if (FLAG_debug_code) {
+      __ Set(reg, Immediate(kDebugZapValue));
     }
-
-    // Get rid of the internal frame.
+    if ((object_regs & (1 << r)) != 0) {
+      __ pop(reg);
+    }
+    if ((non_object_regs & (1 << r)) != 0) {
+      __ pop(reg);
+      __ SmiUntag(reg);
+    }
   }
 
+  // Get rid of the internal frame.
+  __ LeaveInternalFrame();
+
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
-    __ add(esp, Immediate(kPointerSize));
+    __ add(Operand(esp), Immediate(kPointerSize));
   }
 
   // Now that the break point has been handled, resume normal execution by
@@ -222,36 +221,8 @@
 }
 
 
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
   // Register state just before return from JS function (from codegen-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- eax: return value
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- edi: function
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- ebx: cache cell for call target
-  //  -- edi: function
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallConstructStub (from code-stubs-ia32.cc).
   // eax is the actual number of arguments not encoded as a smi see comment
   // above IC call.
   // ----------- S t a t e -------------
@@ -263,17 +234,21 @@
 }
 
 
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
-  // Register state for CallConstructStub (from code-stubs-ia32.cc).
-  // eax is the actual number of arguments not encoded as a smi see comment
-  // above IC call.
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-ia32.cc).
   // ----------- S t a t e -------------
-  //  -- eax: number of arguments (not smi)
-  //  -- ebx: cache cell for call target
-  //  -- edi: constructor function
+  //  -- eax: return value
   // -----------------------------------
-  // The number of arguments in eax is not smi encoded.
-  Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
+  Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  // Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, 0, false);
 }
 
 
@@ -282,7 +257,9 @@
   Label check_codesize;
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
-  __ Nop(Assembler::kDebugBreakSlotLength);
+  for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
+    __ nop();
+  }
   ASSERT_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
@@ -321,7 +298,7 @@
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
 
   // Re-run JSFunction, edi is function, esi is context.
-  __ jmp(edx);
+  __ jmp(Operand(edx));
 }
 
 const bool Debug::kFrameDropperSupported = true;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 92d7cc1..080ad64 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -99,7 +99,7 @@
         new_reloc->GetDataStartAddress() + padding, 0);
     intptr_t comment_string
         = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
-    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
+    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
     for (int i = 0; i < additional_comments; ++i) {
 #ifdef DEBUG
       byte* pos_before = reloc_info_writer.pos();
@@ -156,8 +156,7 @@
     // We use RUNTIME_ENTRY for deoptimization bailouts.
     RelocInfo rinfo(call_address + 1,  // 1 after the call opcode.
                     RelocInfo::RUNTIME_ENTRY,
-                    reinterpret_cast<intptr_t>(deopt_entry),
-                    NULL);
+                    reinterpret_cast<intptr_t>(deopt_entry));
     reloc_info_writer.Write(&rinfo);
     ASSERT_GE(reloc_info_writer.pos(),
               reloc_info->address() + ByteArray::kHeaderSize);
@@ -189,11 +188,6 @@
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
-  // We might be in the middle of incremental marking with compaction.
-  // Tell collector to treat this code object in a special way and
-  // ignore all slots that might have been recorded on it.
-  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -205,22 +199,12 @@
 }
 
 
-static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x13;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
-static const byte kCallInstruction = 0xe8;
-static const byte kNopByteOne = 0x66;
-static const byte kNopByteTwo = 0x90;
-
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
-                                        Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
-  ASSERT_EQ(check_code->entry(),
-            Assembler::target_address_at(call_target_address));
+  ASSERT(check_code->entry() ==
+         Assembler::target_address_at(call_target_address));
   // The stack check code matches the pattern:
   //
   //     cmp esp, <limit>
@@ -237,50 +221,31 @@
   //     call <on-stack replacment>
   //     test eax, <loop nesting depth>
   // ok:
-
-  if (FLAG_count_based_interrupts) {
-    ASSERT_EQ(*(call_target_address - 3), kJnsInstruction);
-    ASSERT_EQ(*(call_target_address - 2), kJnsOffset);
-  } else {
-    ASSERT_EQ(*(call_target_address - 3), kJaeInstruction);
-    ASSERT_EQ(*(call_target_address - 2), kJaeOffset);
-  }
-  ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
-  *(call_target_address - 3) = kNopByteOne;
-  *(call_target_address - 2) = kNopByteTwo;
+  ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
+         *(call_target_address - 2) == 0x07 &&  // offset
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x90;  // nop
+  *(call_target_address - 2) = 0x90;  // nop
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
-
-  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
-                                         Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
-  ASSERT_EQ(replacement_code->entry(),
-            Assembler::target_address_at(call_target_address));
-
+  ASSERT(replacement_code->entry() ==
+         Assembler::target_address_at(call_target_address));
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT_EQ(*(call_target_address - 3), kNopByteOne);
-  ASSERT_EQ(*(call_target_address - 2), kNopByteTwo);
-  ASSERT_EQ(*(call_target_address - 1), kCallInstruction);
-  if (FLAG_count_based_interrupts) {
-    *(call_target_address - 3) = kJnsInstruction;
-    *(call_target_address - 2) = kJnsOffset;
-  } else {
-    *(call_target_address - 3) = kJaeInstruction;
-    *(call_target_address - 2) = kJaeOffset;
-  }
+  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
+         *(call_target_address - 2) == 0x90 &&  // nop
+         *(call_target_address - 1) == 0xe8);   // call
+  *(call_target_address - 3) = 0x73;  // jae
+  *(call_target_address - 2) = 0x07;  // offset
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
-
-  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, call_target_address, check_code);
 }
 
 
@@ -320,13 +285,12 @@
   ASSERT(Translation::BEGIN == opcode);
   USE(opcode);
   int count = iterator.Next();
-  iterator.Next();  // Drop JS frames count.
   ASSERT(count == 1);
   USE(count);
 
   opcode = static_cast<Translation::Opcode>(iterator.Next());
   USE(opcode);
-  ASSERT(Translation::JS_FRAME == opcode);
+  ASSERT(Translation::FRAME == opcode);
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
@@ -362,7 +326,9 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
-  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -426,7 +392,7 @@
     output_[0] = input_;
     output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
   } else {
-    // Set up the frame pointer and the context pointer.
+    // Setup the frame pointer and the context pointer.
     output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
     output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
 
@@ -450,208 +416,13 @@
 }
 
 
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
-                                                 int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
-  // Arguments adaptor can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  uint32_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // A marker value is used in place of the context.
-  output_offset -= kPointerSize;
-  intptr_t context = reinterpret_cast<intptr_t>(
-      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  output_frame->SetFrameSlot(output_offset, context);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
-           top_address + output_offset, output_offset, context);
-  }
-
-  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(function);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* adaptor_trampoline =
-      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
-  uint32_t pc = reinterpret_cast<uint32_t>(
-      adaptor_trampoline->instruction_start() +
-      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
-                                              int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = 6 * kPointerSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
-  // Construct stub can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  uint32_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // The context can be gotten from the previous frame.
-  output_offset -= kPointerSize;
-  value = output_[frame_index - 1]->GetContext();
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // A marker value is used in place of the function.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  // The newly allocated object was passed as receiver in the artificial
-  // constructor stub environment created by HEnvironment::CopyForInlining().
-  output_offset -= kPointerSize;
-  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
-  uint32_t pc = reinterpret_cast<uint32_t>(
-      construct_stub->instruction_start() +
-      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
-                                   int frame_index) {
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
   int node_id = iterator->Next();
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
@@ -671,7 +442,9 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -757,7 +530,6 @@
     value = reinterpret_cast<uint32_t>(function->context());
   }
   output_frame->SetFrameSlot(output_offset, value);
-  output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(esi.code(), value);
   if (FLAG_trace_deopt) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
@@ -844,7 +616,7 @@
 
   const int kDoubleRegsSize = kDoubleSize *
                               XMMRegister::kNumAllocatableRegisters;
-  __ sub(esp, Immediate(kDoubleRegsSize));
+  __ sub(Operand(esp), Immediate(kDoubleRegsSize));
   for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
     XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
     int offset = i * kDoubleSize;
@@ -868,7 +640,7 @@
     __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
     __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
   }
-  __ sub(edx, ebp);
+  __ sub(edx, Operand(ebp));
   __ neg(edx);
 
   // Allocate a new deoptimizer object.
@@ -881,10 +653,7 @@
   __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
   __ mov(Operand(esp, 5 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
-  }
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
 
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
@@ -907,15 +676,15 @@
 
   // Remove the bailout id and the double registers from the stack.
   if (type() == EAGER) {
-    __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
+    __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
   } else {
-    __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
+    __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
   }
 
   // Compute a pointer to the unwinding limit in register ecx; that is
   // the first stack slot not part of the input frame.
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
-  __ add(ecx, esp);
+  __ add(ecx, Operand(esp));
 
   // Unwind the stack down to - but not including - the unwinding
   // limit and copy the contents of the activation frame to the input
@@ -924,19 +693,16 @@
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(edx, 0));
-  __ add(edx, Immediate(sizeof(uint32_t)));
-  __ cmp(ecx, esp);
+  __ add(Operand(edx), Immediate(sizeof(uint32_t)));
+  __ cmp(ecx, Operand(esp));
   __ j(not_equal, &pop_loop);
 
   // Compute the output frame in the deoptimizer.
   __ push(eax);
   __ PrepareCallCFunction(1, ebx);
   __ mov(Operand(esp, 0 * kPointerSize), eax);
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(
-        ExternalReference::compute_output_frames_function(isolate), 1);
-  }
+  __ CallCFunction(
+      ExternalReference::compute_output_frames_function(isolate), 1);
   __ pop(eax);
 
   // Replace the current frame with the output frames.
@@ -951,12 +717,12 @@
   __ mov(ebx, Operand(eax, 0));
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
   __ bind(&inner_push_loop);
-  __ sub(ecx, Immediate(sizeof(uint32_t)));
+  __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
   __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
-  __ test(ecx, ecx);
+  __ test(ecx, Operand(ecx));
   __ j(not_zero, &inner_push_loop);
-  __ add(eax, Immediate(kPointerSize));
-  __ cmp(eax, edx);
+  __ add(Operand(eax), Immediate(kPointerSize));
+  __ cmp(eax, Operand(edx));
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index b5ddcca..a936277 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -55,7 +55,6 @@
 
 
 static const ByteMnemonic two_operands_instr[] = {
-  {0x01, "add", OPER_REG_OP_ORDER},
   {0x03, "add", REG_OPER_OP_ORDER},
   {0x09, "or", OPER_REG_OP_ORDER},
   {0x0B, "or", REG_OPER_OP_ORDER},
@@ -118,19 +117,6 @@
 };
 
 
-// Generally we don't want to generate these because they are subject to partial
-// register stalls.  They are included for completeness and because the cmp
-// variant is used by the RecordWrite stub.  Because it does not update the
-// register it is not subject to partial register stalls.
-static ByteMnemonic byte_immediate_instr[] = {
-  {0x0c, "or", UNSET_OP_ORDER},
-  {0x24, "and", UNSET_OP_ORDER},
-  {0x34, "xor", UNSET_OP_ORDER},
-  {0x3c, "cmp", UNSET_OP_ORDER},
-  {-1, "", UNSET_OP_ORDER}
-};
-
-
 static const char* const jump_conditional_mnem[] = {
   /*0*/ "jo", "jno", "jc", "jnc",
   /*4*/ "jz", "jnz", "jna", "ja",
@@ -163,8 +149,7 @@
   REGISTER_INSTR,
   MOVE_REG_INSTR,
   CALL_JUMP_INSTR,
-  SHORT_IMMEDIATE_INSTR,
-  BYTE_IMMEDIATE_INSTR
+  SHORT_IMMEDIATE_INSTR
 };
 
 
@@ -179,10 +164,6 @@
  public:
   InstructionTable();
   const InstructionDesc& Get(byte x) const { return instructions_[x]; }
-  static InstructionTable* get_instance() {
-    static InstructionTable table;
-    return &table;
-  }
 
  private:
   InstructionDesc instructions_[256];
@@ -217,7 +198,6 @@
   CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
   CopyTable(call_jump_instr, CALL_JUMP_INSTR);
   CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
-  CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
   AddJumpConditionalShort();
   SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
   SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@@ -263,13 +243,15 @@
 }
 
 
+static InstructionTable instruction_table;
+
+
 // The IA32 disassembler implementation.
 class DisassemblerIA32 {
  public:
   DisassemblerIA32(const NameConverter& converter,
                    bool abort_on_unimplemented = true)
       : converter_(converter),
-        instruction_table_(InstructionTable::get_instance()),
         tmp_buffer_pos_(0),
         abort_on_unimplemented_(abort_on_unimplemented) {
     tmp_buffer_[0] = '\0';
@@ -283,11 +265,11 @@
 
  private:
   const NameConverter& converter_;
-  InstructionTable* instruction_table_;
   v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
   unsigned int tmp_buffer_pos_;
   bool abort_on_unimplemented_;
 
+
   enum {
     eax = 0,
     ecx = 1,
@@ -763,13 +745,10 @@
             case 0xEB: mnem = "fldpi"; break;
             case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
-            case 0xF0: mnem = "f2xm1"; break;
             case 0xF1: mnem = "fyl2x"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
-            case 0xFC: mnem = "frndint"; break;
-            case 0xFD: mnem = "fscale"; break;
             case 0xFE: mnem = "fsin"; break;
             case 0xFF: mnem = "fcos"; break;
             default: UnimplementedInstruction();
@@ -791,8 +770,6 @@
         has_register = true;
       } else if (modrm_byte  == 0xE2) {
         mnem = "fclex";
-      } else if (modrm_byte == 0xE3) {
-        mnem = "fninit";
       } else {
         UnimplementedInstruction();
       }
@@ -891,7 +868,7 @@
   }
   bool processed = true;  // Will be set to false if the current instruction
                           // is not in 'instructions' table.
-  const InstructionDesc& idesc = instruction_table_->Get(*data);
+  const InstructionDesc& idesc = instruction_table.Get(*data);
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
       AppendToBuffer(idesc.mnem);
@@ -935,12 +912,6 @@
       break;
     }
 
-    case BYTE_IMMEDIATE_INSTR: {
-      AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
-      data += 2;
-      break;
-    }
-
     case NO_INSTR:
       processed = false;
       break;
@@ -992,7 +963,7 @@
         break;
 
       case 0x0F:
-        { byte f0byte = data[1];
+        { byte f0byte = *(data+1);
           const char* f0mnem = F0Mnem(f0byte);
           if (f0byte == 0x18) {
             int mod, regop, rm;
@@ -1000,25 +971,6 @@
             const char* suffix[] = {"nta", "1", "2", "3"};
             AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
             data += PrintRightOperand(data);
-          } else if (f0byte == 0x1F && data[2] == 0) {
-            AppendToBuffer("nop");  // 3 byte nop.
-            data += 3;
-          } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
-            AppendToBuffer("nop");  // 4 byte nop.
-            data += 4;
-          } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
-                     data[4] == 0) {
-            AppendToBuffer("nop");  // 5 byte nop.
-            data += 5;
-          } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
-                     data[4] == 0 && data[5] == 0 && data[6] == 0) {
-            AppendToBuffer("nop");  // 7 byte nop.
-            data += 7;
-          } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
-                     data[4] == 0 && data[5] == 0 && data[6] == 0 &&
-                     data[7] == 0) {
-            AppendToBuffer("nop");  // 8 byte nop.
-            data += 8;
           } else if (f0byte == 0xA2 || f0byte == 0x31) {
             AppendToBuffer("%s", f0mnem);
             data += 2;
@@ -1154,12 +1106,8 @@
         break;
 
       case 0x66:  // prefix
-        while (*data == 0x66) data++;
-        if (*data == 0xf && data[1] == 0x1f) {
-          AppendToBuffer("nop");  // 0x66 prefix
-        } else if (*data == 0x90) {
-          AppendToBuffer("nop");  // 0x66 prefix
-        } else if (*data == 0x8B) {
+        data++;
+        if (*data == 0x8B) {
           data++;
           data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
         } else if (*data == 0x89) {
@@ -1213,16 +1161,6 @@
                              NameOfXMMRegister(rm),
                              static_cast<int>(imm8));
               data += 2;
-            } else if (*data == 0x17) {
-              data++;
-              int mod, regop, rm;
-              get_modrm(*data, &mod, &regop, &rm);
-              int8_t imm8 = static_cast<int8_t>(data[1]);
-              AppendToBuffer("extractps %s,%s,%d",
-                             NameOfCPURegister(regop),
-                             NameOfXMMRegister(rm),
-                             static_cast<int>(imm8));
-              data += 2;
             } else if (*data == 0x22) {
               data++;
               int mod, regop, rm;
@@ -1296,9 +1234,6 @@
                            NameOfXMMRegister(rm),
                            static_cast<int>(imm8));
             data += 2;
-          } else if (*data == 0x90) {
-            data++;
-            AppendToBuffer("nop");  // 2 byte nop.
           } else if (*data == 0xF3) {
             data++;
             int mod, regop, rm;
@@ -1411,6 +1346,11 @@
         data += 2;
         break;
 
+      case 0x2C:
+        AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+        data += 2;
+        break;
+
       case 0xA9:
         AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
         data += 5;
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 9e51857..2f1b2a9 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,37 +34,37 @@
 
 // Register lists
 // Note that the bit values must match those used in actual instruction encoding
-const int kNumRegs = 8;
+static const int kNumRegs = 8;
 
 
 // Caller-saved registers
-const RegList kJSCallerSaved =
+static const RegList kJSCallerSaved =
   1 << 0 |  // eax
   1 << 1 |  // ecx
   1 << 2 |  // edx
   1 << 3 |  // ebx - used as a caller-saved register in JavaScript code
   1 << 7;   // edi - callee function
 
-const int kNumJSCallerSaved = 5;
+static const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 
 // Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 8;
+static const int kNumSafepointRegisters = 8;
 
 // ----------------------------------------------------
 
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset     = 0 * kPointerSize;
-  static const int kCodeOffset     = 1 * kPointerSize;
-  static const int kStateOffset    = 2 * kPointerSize;
-  static const int kContextOffset  = 3 * kPointerSize;
-  static const int kFPOffset       = 4 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kContextOffset = 1 * kPointerSize;
+  static const int kFPOffset      = 2 * kPointerSize;
+  static const int kStateOffset   = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
-  static const int kSize = kFPOffset + kPointerSize;
+  static const int kSize = kPCOffset + kPointerSize;
 };
 
 
@@ -95,11 +95,9 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
   // StandardFrame::IterateExpressions assumes that kContextOffset is the last
   // object pointer.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
+  static const int kFixedFrameSize    =  4;  // Currently unused.
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -125,8 +123,6 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 62a2c2a..ca6ce6e 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -34,7 +34,6 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
-#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
@@ -45,6 +44,11 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -101,32 +105,23 @@
 };
 
 
-// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
-int FullCodeGenerator::self_optimization_header_size() {
-  UNREACHABLE();
-  return 13;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
 // formal parameter count expected by the function.
 //
 // The live registers are:
-//   o edi: the JS function object being called (i.e. ourselves)
+//   o edi: the JS function object being called (ie, ourselves)
 //   o esi: our context
 //   o ebp: our caller's frame pointer
 //   o esp: stack pointer (pointing to return address)
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-ia32.h for its layout.
-void FullCodeGenerator::Generate() {
-  CompilationInfo* info = info_;
-  handler_table_ =
-      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
-      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  scope_ = info->scope();
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -141,26 +136,17 @@
   // with undefined when called as functions (without an explicit
   // receiver object). ecx is zero for method calls and non-zero for
   // function calls.
-  if (!info->is_classic_mode() || info->is_native()) {
+  if (info->is_strict_mode() || info->is_native()) {
     Label ok;
-    __ test(ecx, ecx);
+    __ test(ecx, Operand(ecx));
     __ j(zero, &ok, Label::kNear);
     // +1 for return address.
     int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
-    __ mov(ecx, Operand(esp, receiver_offset));
-    __ JumpIfSmi(ecx, &ok);
-    __ CmpObjectType(ecx, JS_GLOBAL_PROXY_TYPE, ecx);
-    __ j(not_equal, &ok, Label::kNear);
     __ mov(Operand(esp, receiver_offset),
            Immediate(isolate()->factory()->undefined_value()));
     __ bind(&ok);
   }
 
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done below).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
@@ -178,6 +164,11 @@
     }
   }
 
+  set_stack_height(2 + scope()->num_stack_slots());
+  if (FLAG_verify_stack_height) {
+    verify_stack_height();
+  }
+
   bool function_in_register = true;
 
   // Possibly allocate a local context.
@@ -209,12 +200,11 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers eax and ebx.
-        __ RecordWriteContextSlot(esi,
-                                  context_offset,
-                                  eax,
-                                  ebx,
-                                  kDontSaveFPRegs);
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have use a third register to avoid
+        // clobbering esi.
+        __ mov(ecx, esi);
+        __ RecordWrite(ecx, context_offset, eax, ebx);
       }
     }
   }
@@ -235,12 +225,12 @@
            Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
     __ push(edx);
     __ SafePush(Immediate(Smi::FromInt(num_parameters)));
-    // Arguments to ArgumentsAccessStub:
+    // Arguments to ArgumentsAccessStub and/or New...:
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (!is_classic_mode()) {
+    if (is_strict_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -269,11 +259,8 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -311,62 +298,15 @@
 }
 
 
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
-  __ mov(ebx, Immediate(profiling_counter_));
-  __ sub(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
-         Immediate(Smi::FromInt(delta)));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
-  int reset_value = FLAG_interrupt_budget;
-  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
-    // Self-optimization is a one-off thing: if it fails, don't try again.
-    reset_value = Smi::kMaxValue;
-  }
-  if (isolate()->IsDebuggerActive()) {
-    // Detect debug break requests as soon as possible.
-    reset_value = 10;
-  }
-  __ mov(ebx, Immediate(profiling_counter_));
-  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
-         Immediate(Smi::FromInt(reset_value)));
-}
-
-
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 100;
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
-                                       Label* back_edge_target) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-
-  if (FLAG_count_based_interrupts) {
-    int weight = 1;
-    if (FLAG_weighted_back_edges) {
-      ASSERT(back_edge_target->is_bound());
-      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kBackEdgeDistanceDivisor));
-    }
-    EmitProfilingCounterDecrement(weight);
-    __ j(positive, &ok, Label::kNear);
-    InterruptStub stub;
-    __ CallStub(&stub);
-  } else {
-    // Count based interrupts happen often enough when they are enabled
-    // that the additional stack checks are not necessary (they would
-    // only check for interrupts).
-    ExternalReference stack_limit =
-        ExternalReference::address_of_stack_limit(isolate());
-    __ cmp(esp, Operand::StaticVariable(stack_limit));
-    __ j(above_equal, &ok, Label::kNear);
-    StackCheckStub stub;
-    __ CallStub(&stub);
-  }
-
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(isolate());
+  __ cmp(esp, Operand::StaticVariable(stack_limit));
+  __ j(above_equal, &ok, Label::kNear);
+  StackCheckStub stub;
+  __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
@@ -379,10 +319,6 @@
   ASSERT(loop_depth() > 0);
   __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
 
-  if (FLAG_count_based_interrupts) {
-    EmitProfilingCounterReset();
-  }
-
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -403,31 +339,6 @@
       __ push(eax);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
-    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
-      // Pretend that the exit is a backwards jump to the entry.
-      int weight = 1;
-      if (info_->ShouldSelfOptimize()) {
-        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-      } else if (FLAG_weighted_back_edges) {
-        int distance = masm_->pc_offset();
-        weight = Min(kMaxBackEdgeWeight,
-                     Max(1, distance / kBackEdgeDistanceDivisor));
-      }
-      EmitProfilingCounterDecrement(weight);
-      Label ok;
-      __ j(positive, &ok, Label::kNear);
-      __ push(eax);
-      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
-        __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
-      } else {
-        InterruptStub stub;
-        __ CallStub(&stub);
-      }
-      __ pop(eax);
-      EmitProfilingCounterReset();
-      __ bind(&ok);
-    }
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
     Label check_exit_codesize;
@@ -452,6 +363,15 @@
 }
 
 
+void FullCodeGenerator::verify_stack_height() {
+  ASSERT(FLAG_verify_stack_height);
+  __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
+  __ cmp(ebp, Operand(esp));
+  __ Assert(equal, "Full codegen stack height not as expected.");
+  __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
+}
+
+
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
   ASSERT(var->IsStackAllocated() || var->IsContextSlot());
 }
@@ -468,13 +388,14 @@
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
   __ push(operand);
+  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -521,11 +442,12 @@
   } else {
     __ push(Immediate(lit));
   }
+  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -558,6 +480,7 @@
                                                    Register reg) const {
   ASSERT(count > 0);
   __ Drop(count);
+  codegen()->decrement_stack_height(count);
 }
 
 
@@ -567,6 +490,7 @@
   ASSERT(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
+  codegen()->decrement_stack_height(count);
 }
 
 
@@ -575,6 +499,7 @@
   ASSERT(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ mov(Operand(esp, 0), reg);
+  codegen()->decrement_stack_height(count - 1);
 }
 
 
@@ -584,8 +509,9 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
+  codegen()->decrement_stack_height(count);
 }
 
 
@@ -619,6 +545,7 @@
   __ bind(materialize_false);
   __ push(Immediate(isolate()->factory()->false_value()));
   __ bind(&done);
+  codegen()->increment_stack_height();
 }
 
 
@@ -646,11 +573,12 @@
       ? isolate()->factory()->true_value()
       : isolate()->factory()->false_value();
   __ push(Immediate(value));
+  codegen()->increment_stack_height();
 }
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -669,7 +597,7 @@
   ToBooleanStub stub(result_register());
   __ push(result_register());
   __ CallStub(&stub, condition->test_id());
-  __ test(result_register(), result_register());
+  __ test(result_register(), Operand(result_register()));
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -733,17 +661,16 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ mov(location, src);
-
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
     ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
-    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
+    __ RecordWrite(scratch0, offset, src, scratch1);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -754,7 +681,13 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-  PrepareForBailout(expr, TOS_REG);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
   if (should_normalize) {
     __ cmp(eax, isolate()->factory()->true_value());
     Split(equal, if_true, if_false, NULL);
@@ -764,17 +697,16 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function,
+                                        int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      ++(*global_count);
       break;
 
     case Variable::PARAMETER:
@@ -783,7 +715,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ mov(StackOperand(variable), result_register());
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
         __ mov(StackOperand(variable),
                Immediate(isolate()->factory()->the_hole_value()));
@@ -806,16 +738,11 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ mov(ContextOperand(esi, variable->index()), result_register());
-        // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(esi,
-                                  Context::SlotOffset(variable->index()),
-                                  result_register(),
-                                  ecx,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
+        int offset = Context::SlotOffset(variable->index());
+        __ mov(ebx, esi);
+        __ RecordWrite(ebx, offset, result_register(), ecx);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
         __ mov(ContextOperand(esi, variable->index()),
                Immediate(isolate()->factory()->the_hole_value()));
@@ -828,32 +755,37 @@
       Comment cmnt(masm_, "[ Declaration");
       __ push(esi);
       __ push(Immediate(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
-      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
-          ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
       __ push(Immediate(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
       // 'undefined') because we may have a (legal) redeclaration and we
       // must not destroy the current value.
+      increment_stack_height(3);
       if (function != NULL) {
         VisitForStackValue(function);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         __ push(Immediate(isolate()->factory()->the_hole_value()));
+        increment_stack_height();
       } else {
         __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
+        increment_stack_height();
       }
       __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+      decrement_stack_height(4);
       break;
     }
   }
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
@@ -869,6 +801,7 @@
   Breakable nested_statement(this, stmt);
   SetStatementPosition(stmt);
 
+  int switch_clause_stack_height = stack_height();
   // Keep the switch value on the stack until a case matches.
   VisitForStackValue(stmt->tag());
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -902,10 +835,10 @@
     if (inline_smi_code) {
       Label slow_case;
       __ mov(ecx, edx);
-      __ or_(ecx, eax);
+      __ or_(ecx, Operand(eax));
       patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
 
-      __ cmp(edx, eax);
+      __ cmp(edx, Operand(eax));
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
       __ jmp(clause->body_target());
@@ -915,9 +848,9 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
-    __ test(eax, eax);
+    __ test(eax, Operand(eax));
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
     __ jmp(clause->body_target());
@@ -933,6 +866,7 @@
     __ jmp(default_clause->body_target());
   }
 
+  set_stack_height(switch_clause_stack_height);
   // Compile all the case bodies.
   for (int i = 0; i < clauses->length(); i++) {
     Comment cmnt(masm_, "[ Case body");
@@ -964,8 +898,6 @@
   __ cmp(eax, isolate()->factory()->null_value());
   __ j(equal, &exit);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(eax, &convert, Label::kNear);
@@ -976,30 +908,67 @@
   __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
   __ bind(&done_convert);
   __ push(eax);
-
-  // Check for proxies.
-  Label call_runtime, use_cache, fixed_array;
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
-  __ j(below_equal, &call_runtime);
+  increment_stack_height();
 
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(&call_runtime);
+  Label next, call_runtime;
+  __ mov(ecx, eax);
+  __ bind(&next);
 
+  // Check that there are no elements.  Register ecx contains the
+  // current JS object we've reached through the prototype chain.
+  __ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
+         isolate()->factory()->empty_fixed_array());
+  __ j(not_equal, &call_runtime);
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in ebx for the subsequent
+  // prototype load.
+  __ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(edx, &call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (edx).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
+  __ JumpIfSmi(edx, &call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  __ cmp(ecx, Operand(eax));
+  __ j(equal, &check_prototype, Label::kNear);
+  __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ cmp(edx, isolate()->factory()->empty_fixed_array());
+  __ j(not_equal, &call_runtime);
+
+  // Load the prototype from the map and loop if non-null.
+  __ bind(&check_prototype);
+  __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
+  __ cmp(ecx, isolate()->factory()->null_value());
+  __ j(not_equal, &next);
+
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  Label use_cache;
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
   __ jmp(&use_cache, Label::kNear);
 
   // Get the set of properties to enumerate.
   __ bind(&call_runtime);
-  __ push(eax);
+  __ push(eax);  // Duplicate the enumerable object on the stack.
   __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
-  __ j(not_equal, &fixed_array);
-
+  __ j(not_equal, &fixed_array, Label::kNear);
 
   // We got a map in register eax. Get the enumeration cache from it.
   __ bind(&use_cache);
@@ -1007,7 +976,7 @@
   __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
   __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Set up the four remaining stack slots.
+  // Setup the four remaining stack slots.
   __ push(eax);  // Map.
   __ push(edx);  // Enumeration cache.
   __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
@@ -1016,33 +985,16 @@
   __ jmp(&loop);
 
   // We got a fixed array in register eax. Iterate through that.
-  Label non_proxy;
   __ bind(&fixed_array);
-
-  Handle<JSGlobalPropertyCell> cell =
-      isolate()->factory()->NewJSGlobalPropertyCell(
-          Handle<Object>(
-              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
-  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
-  __ LoadHeapObject(ebx, cell);
-  __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
-         Immediate(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
-
-  __ mov(ebx, Immediate(Smi::FromInt(1)));  // Smi indicates slow check
-  __ mov(ecx, Operand(esp, 0 * kPointerSize));  // Get enumerated object
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
-  __ j(above, &non_proxy);
-  __ mov(ebx, Immediate(Smi::FromInt(0)));  // Zero indicates proxy
-  __ bind(&non_proxy);
-  __ push(ebx);  // Smi
-  __ push(eax);  // Array
+  __ push(Immediate(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ push(eax);
   __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
   __ push(eax);  // Fixed array length (as smi).
   __ push(Immediate(Smi::FromInt(0)));  // Initial index.
 
+  // 1 ~ The object has already been pushed.
+  increment_stack_height(ForIn::kElementCount - 1);
   // Generate code for doing the condition check.
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
   __ cmp(eax, Operand(esp, 1 * kPointerSize));  // Compare to the array length.
@@ -1052,32 +1004,26 @@
   __ mov(ebx, Operand(esp, 2 * kPointerSize));
   __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
 
-  // Get the expected map from the stack or a smi in the
+  // Get the expected map from the stack or a zero map in the
   // permanent slow case into register edx.
   __ mov(edx, Operand(esp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we may have to filter the key.
+  // If not, we have to filter the key.
   Label update_each;
   __ mov(ecx, Operand(esp, 4 * kPointerSize));
   __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
-  // For proxies, no filtering is done.
-  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  ASSERT(Smi::FromInt(0) == 0);
-  __ test(edx, edx);
-  __ j(zero, &update_each);
-
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
   __ push(ecx);  // Enumerable.
   __ push(ebx);  // Current entry.
   __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
-  __ test(eax, eax);
+  __ test(eax, Operand(eax));
   __ j(equal, loop_statement.continue_label());
-  __ mov(ebx, eax);
+  __ mov(ebx, Operand(eax));
 
   // Update the 'each' property or variable from the possibly filtered
   // entry in register ebx.
@@ -1085,7 +1031,7 @@
   __ mov(result_register(), ebx);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each());
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
   }
 
   // Generate code for the body of the loop.
@@ -1096,15 +1042,15 @@
   __ bind(loop_statement.continue_label());
   __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
 
-  EmitStackCheck(stmt, &loop);
+  EmitStackCheck(stmt);
   __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ add(esp, Immediate(5 * kPointerSize));
+  __ add(Operand(esp), Immediate(5 * kPointerSize));
 
+  decrement_stack_height(ForIn::kElementCount);
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1123,7 +1069,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->language_mode());
+    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ push(Immediate(info));
     __ CallStub(&stub);
   } else {
@@ -1153,7 +1099,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
                Immediate(0));
@@ -1167,7 +1113,7 @@
     // If no outer scope calls eval, we do not need to check more
     // context extensions.  If we have reached an eval scope, we check
     // all extensions from this point.
-    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1200,7 +1146,7 @@
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
-  CallIC(ic, mode);
+  __ call(ic, mode);
 }
 
 
@@ -1212,7 +1158,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
                Immediate(0));
@@ -1243,23 +1189,16 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == DYNAMIC_GLOBAL) {
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == CONST ||
-        local->mode() == CONST_HARMONY ||
-        local->mode() == LET) {
+    if (local->mode() == Variable::CONST) {
       __ cmp(eax, isolate()->factory()->the_hole_value());
       __ j(not_equal, done);
-      if (local->mode() == CONST) {
-        __ mov(eax, isolate()->factory()->undefined_value());
-      } else {  // LET || CONST_HARMONY
-        __ push(Immediate(var->name()));
-        __ CallRuntime(Runtime::kThrowReferenceError, 1);
-      }
+      __ mov(eax, isolate()->factory()->undefined_value());
     }
     __ jmp(done);
   }
@@ -1281,7 +1220,7 @@
       __ mov(eax, GlobalObjectOperand());
       __ mov(ecx, var->name());
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(eax);
       break;
     }
@@ -1292,63 +1231,23 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->binding_needs_init()) {
-        // var->scope() may be NULL when the proxy is located in eval code and
-        // refers to a potential outside binding. Currently those bindings are
-        // always looked up dynamically, i.e. in that case
-        //     var->location() == LOOKUP.
-        // always holds.
-        ASSERT(var->scope() != NULL);
-
-        // Check if the binding really needs an initialization check. The check
-        // can be skipped in the following situation: we have a LET or CONST
-        // binding in harmony mode, both the Variable and the VariableProxy have
-        // the same declaration scope (i.e. they are both in global code, in the
-        // same function or in the same eval code) and the VariableProxy is in
-        // the source physically located after the initializer of the variable.
-        //
-        // We cannot skip any initialization checks for CONST in non-harmony
-        // mode because const variables may be declared but never initialized:
-        //   if (false) { const x; }; var y = x;
-        //
-        // The condition on the declaration scopes is a conservative check for
-        // nested functions that access a binding and are called before the
-        // binding is initialized:
-        //   function() { f(); let x = 1; function f() { x = 2; } }
-        //
-        bool skip_init_check;
-        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
-          skip_init_check = false;
-        } else {
-          // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
-          skip_init_check = var->mode() != CONST &&
-              var->initializer_position() < proxy->position();
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        Label done;
+        GetVar(eax, var);
+        __ cmp(eax, isolate()->factory()->the_hole_value());
+        __ j(not_equal, &done, Label::kNear);
+        if (var->mode() == Variable::LET) {
+          __ push(Immediate(var->name()));
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+        } else {  // Variable::CONST
+          __ mov(eax, isolate()->factory()->undefined_value());
         }
-
-        if (!skip_init_check) {
-          // Let and const need a read barrier.
-          Label done;
-          GetVar(eax, var);
-          __ cmp(eax, isolate()->factory()->the_hole_value());
-          __ j(not_equal, &done, Label::kNear);
-          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
-            // Throw a reference error when using an uninitialized let/const
-            // binding in harmony mode.
-            __ push(Immediate(var->name()));
-            __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          } else {
-            // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST);
-            __ mov(eax, isolate()->factory()->undefined_value());
-          }
-          __ bind(&done);
-          context()->Plug(eax);
-          break;
-        }
+        __ bind(&done);
+        context()->Plug(eax);
       }
-      context()->Plug(var);
       break;
     }
 
@@ -1424,22 +1323,12 @@
 }
 
 
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
-  if (expression == NULL) {
-    __ push(Immediate(isolate()->factory()->null_value()));
-  } else {
-    VisitForStackValue(expression);
-  }
-}
-
-
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  __ push(Immediate(constant_properties));
+  __ push(Immediate(expr->constant_properties()));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1447,15 +1336,10 @@
       ? ObjectLiteral::kHasFunction
       : ObjectLiteral::kNoFlags;
   __ push(Immediate(Smi::FromInt(flags)));
-  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    __ CallStub(&stub);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1467,7 +1351,6 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
-  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1477,10 +1360,9 @@
     if (!result_saved) {
       __ push(eax);  // Save result on the stack
       result_saved = true;
+      increment_stack_height();
     }
     switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-        UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
         ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
@@ -1490,10 +1372,10 @@
             VisitForAccumulatorValue(value);
             __ mov(ecx, Immediate(key->handle()));
             __ mov(edx, Operand(esp, 0));
-            Handle<Code> ic = is_classic_mode()
-                ? isolate()->builtins()->StoreIC_Initialize()
-                : isolate()->builtins()->StoreIC_Initialize_Strict();
-            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            __ call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1503,6 +1385,7 @@
         // Fall through.
       case ObjectLiteral::Property::PROTOTYPE:
         __ push(Operand(esp, 0));  // Duplicate receiver.
+        increment_stack_height();
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
@@ -1511,29 +1394,25 @@
         } else {
           __ Drop(3);
         }
-        break;
-      case ObjectLiteral::Property::GETTER:
-        accessor_table.lookup(key)->second->getter = value;
+        decrement_stack_height(3);
         break;
       case ObjectLiteral::Property::SETTER:
-        accessor_table.lookup(key)->second->setter = value;
+      case ObjectLiteral::Property::GETTER:
+        __ push(Operand(esp, 0));  // Duplicate receiver.
+        increment_stack_height();
+        VisitForStackValue(key);
+        __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
+                          Smi::FromInt(1) :
+                          Smi::FromInt(0)));
+        increment_stack_height();
+        VisitForStackValue(value);
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        decrement_stack_height(4);
         break;
+      default: UNREACHABLE();
     }
   }
 
-  // Emit code to define accessors, using only a single call to the runtime for
-  // each pair of corresponding getters and setters.
-  for (AccessorTable::Iterator it = accessor_table.begin();
-       it != accessor_table.end();
-       ++it) {
-    __ push(Operand(esp, 0));  // Duplicate receiver.
-    VisitForStackValue(it->first);
-    EmitAccessor(it->second->getter);
-    EmitAccessor(it->second->setter);
-    __ push(Immediate(Smi::FromInt(NONE)));
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
-  }
-
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ push(Operand(esp, 0));
@@ -1553,42 +1432,25 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
-  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
-  __ push(Immediate(constant_elements));
-  Heap* heap = isolate()->heap();
-  if (has_constant_fast_elements &&
-      constant_elements_values->map() == heap->fixed_cow_array_map()) {
-    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
-    // change, so it's possible to specialize the stub in advance.
-    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+  __ push(Immediate(expr->constant_elements()));
+  if (expr->constant_elements()->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
+    ASSERT(expr->depth() == 1);
     FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
-        length);
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
     __ CallStub(&stub);
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
-           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-           FLAG_smi_only_arrays);
-    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
-    // change, so it's possible to specialize the stub in advance.
-    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
-        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-    FastCloneShallowArrayStub stub(mode, length);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
     __ CallStub(&stub);
   }
 
@@ -1608,31 +1470,18 @@
     if (!result_saved) {
       __ push(eax);
       result_saved = true;
+      increment_stack_height();
     }
     VisitForAccumulatorValue(subexpr);
 
-    if (constant_elements_kind == FAST_ELEMENTS) {
-      // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
-      // transition and don't need to call the runtime stub.
-      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-      __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
-      __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
-      // Store the subexpression value in the array's elements.
-      __ mov(FieldOperand(ebx, offset), result_register());
-      // Update the write barrier for the array store.
-      __ RecordWriteField(ebx, offset, result_register(), ecx,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          INLINE_SMI_CHECK);
-    } else {
-      // Store the subexpression value in the array's elements.
-      __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
-      __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
-      __ mov(ecx, Immediate(Smi::FromInt(i)));
-      __ mov(edx, Immediate(Smi::FromInt(expr->literal_index())));
-      StoreArrayLiteralElementStub stub;
-      __ CallStub(&stub);
-    }
+    // Store the subexpression value in the array's elements.
+    __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
+    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ mov(FieldOperand(ebx, offset), result_register());
+
+    // Update the write barrier for the array store.
+    __ RecordWrite(ebx, offset, result_register(), ecx);
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1650,7 +1499,9 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // on the left-hand side.
   if (!expr->target()->IsValidLeftHandSide()) {
-    VisitForEffect(expr->target());
+    ASSERT(expr->target()->AsThrow() != NULL);
+    VisitInCurrentContext(expr->target());  // Throw does not plug the context
+    context()->Plug(eax);
     return;
   }
 
@@ -1675,6 +1526,7 @@
         // We need the receiver both on the stack and in the accumulator.
         VisitForAccumulatorValue(property->obj());
         __ push(result_register());
+        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1685,6 +1537,7 @@
         VisitForAccumulatorValue(property->key());
         __ mov(edx, Operand(esp, 0));
         __ push(eax);
+        increment_stack_height();
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1716,6 +1569,7 @@
 
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
+    increment_stack_height();
     VisitForAccumulatorValue(expr->value());
 
     OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1765,14 +1619,14 @@
   ASSERT(!key->handle()->IsSmi());
   __ mov(ecx, Immediate(key->handle()));
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1785,15 +1639,16 @@
   // stack. Right operand is in eax.
   Label smi_case, done, stub_call;
   __ pop(edx);
+  decrement_stack_height();
   __ mov(ecx, eax);
-  __ or_(eax, edx);
+  __ or_(eax, Operand(edx));
   JumpPatchSite patch_site(masm_);
   patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
   BinaryOpStub stub(op, mode);
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
 
@@ -1836,32 +1691,32 @@
       break;
     }
     case Token::ADD:
-      __ add(eax, ecx);
+      __ add(eax, Operand(ecx));
       __ j(overflow, &stub_call);
       break;
     case Token::SUB:
-      __ sub(eax, ecx);
+      __ sub(eax, Operand(ecx));
       __ j(overflow, &stub_call);
       break;
     case Token::MUL: {
       __ SmiUntag(eax);
-      __ imul(eax, ecx);
+      __ imul(eax, Operand(ecx));
       __ j(overflow, &stub_call);
-      __ test(eax, eax);
+      __ test(eax, Operand(eax));
       __ j(not_zero, &done, Label::kNear);
       __ mov(ebx, edx);
-      __ or_(ebx, ecx);
+      __ or_(ebx, Operand(ecx));
       __ j(negative, &stub_call);
       break;
     }
     case Token::BIT_OR:
-      __ or_(eax, ecx);
+      __ or_(eax, Operand(ecx));
       break;
     case Token::BIT_AND:
-      __ and_(eax, ecx);
+      __ and_(eax, Operand(ecx));
       break;
     case Token::BIT_XOR:
-      __ xor_(eax, ecx);
+      __ xor_(eax, Operand(ecx));
       break;
     default:
       UNREACHABLE();
@@ -1876,19 +1731,22 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
+  decrement_stack_height();
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
-    VisitForEffect(expr);
+    ASSERT(expr->AsThrow() != NULL);
+    VisitInCurrentContext(expr);  // Throw does not plug the context
+    context()->Plug(eax);
     return;
   }
 
@@ -1912,30 +1770,36 @@
     }
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
+      increment_stack_height();
       VisitForAccumulatorValue(prop->obj());
       __ mov(edx, eax);
       __ pop(eax);  // Restore value.
+      decrement_stack_height();
       __ mov(ecx, prop->key()->AsLiteral()->handle());
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
-      CallIC(ic);
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ call(ic);
       break;
     }
     case KEYED_PROPERTY: {
       __ push(eax);  // Preserve value.
+      increment_stack_height();
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(ecx, eax);
       __ pop(edx);
+      decrement_stack_height();
       __ pop(eax);  // Restore value.
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      CallIC(ic);
+      decrement_stack_height();
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ call(ic);
       break;
     }
   }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(eax);
 }
 
@@ -1946,10 +1810,10 @@
     // Global var, const, or let.
     __ mov(ecx, var->name());
     __ mov(edx, GlobalObjectOperand());
-    Handle<Code> ic = is_classic_mode()
-        ? isolate()->builtins()->StoreIC_Initialize()
-        : isolate()->builtins()->StoreIC_Initialize_Strict();
-    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -1974,13 +1838,13 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == LET && op != Token::INIT_LET) {
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(eax);  // Value.
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(language_mode())));
+      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
       ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1995,14 +1859,12 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        int offset = Context::SlotOffset(var->index());
-        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
       }
     }
 
-  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
+  } else if (var->mode() != Variable::CONST) {
+    // Assignment to var or initializing assignment to let.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, ecx);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -2015,15 +1877,14 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        int offset = Context::SlotOffset(var->index());
-        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
+        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(eax);  // Value.
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ push(Immediate(Smi::FromInt(language_mode())));
+      __ push(Immediate(Smi::FromInt(strict_mode_flag())));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
@@ -2054,11 +1915,12 @@
     __ mov(edx, Operand(esp, 0));
   } else {
     __ pop(edx);
+    decrement_stack_height();
   }
-  Handle<Code> ic = is_classic_mode()
-      ? isolate()->builtins()->StoreIC_Initialize()
-      : isolate()->builtins()->StoreIC_Initialize_Strict();
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2067,6 +1929,7 @@
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
     __ Drop(1);
+    decrement_stack_height();
   }
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
@@ -2088,17 +1951,19 @@
   }
 
   __ pop(ecx);
+  decrement_stack_height();
   if (expr->ends_initialization_block()) {
     __ mov(edx, Operand(esp, 0));  // Leave receiver on the stack for later.
   } else {
     __ pop(edx);
+    decrement_stack_height();
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = is_classic_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2107,6 +1972,7 @@
     __ push(edx);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(eax);
+    decrement_stack_height();
   }
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2126,22 +1992,13 @@
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
     __ pop(edx);
+    decrement_stack_height();
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
 }
 
 
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               RelocInfo::Mode rmode,
-                               unsigned ast_id) {
-  ic_total_count_++;
-  __ call(code, rmode, ast_id);
-}
-
-
-
-
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2158,10 +2015,11 @@
   SetSourcePosition(expr->position());
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  CallIC(ic, mode, expr->id());
+  __ call(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
@@ -2176,6 +2034,7 @@
   __ pop(ecx);
   __ push(eax);
   __ push(ecx);
+  increment_stack_height();
 
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2190,10 +2049,11 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ mov(ecx, Operand(esp, (arg_count + 1) * kPointerSize));  // Key.
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  __ call(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);  // Drop the key still on the stack.
 }
 
@@ -2209,30 +2069,19 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-
-  // Record call targets in unoptimized code, but not in the snapshot.
-  if (!Serializer::enabled()) {
-    flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
-    Handle<Object> uninitialized =
-        TypeFeedbackCells::UninitializedSentinel(isolate());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
-    RecordTypeFeedbackCell(expr->id(), cell);
-    __ mov(ebx, cell);
-  }
-
   CallFunctionStub stub(arg_count, flags);
-  __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  __ CallStub(&stub, expr->id());
-
+  __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+  decrement_stack_height(arg_count + 1);
   context()->DropAndPlug(1, eax);
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+                                                      int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(esp, arg_count * kPointerSize));
@@ -2242,14 +2091,18 @@
 
   // Push the receiver of the enclosing function.
   __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
-  // Push the language mode.
-  __ push(Immediate(Smi::FromInt(language_mode())));
 
-  // Push the start position of the scope the calls resides in.
-  __ push(Immediate(Smi::FromInt(scope()->start_position())));
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ push(Immediate(Smi::FromInt(strict_mode)));
 
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
+                 : Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2275,15 +2128,33 @@
       VisitForStackValue(callee);
       // Reserved receiver slot.
       __ push(Immediate(isolate()->factory()->undefined_value()));
+      increment_stack_height();
       // Push the arguments.
       for (int i = 0; i < arg_count; i++) {
         VisitForStackValue(args->at(i));
       }
 
+      // If we know that eval can only be shadowed by eval-introduced
+      // variables we attempt to load the global eval function directly in
+      // generated code. If we succeed, there is no need to perform a
+      // context lookup in the runtime system.
+      Label done;
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
+        Label slow;
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
+        // Push the function and resolve eval.
+        __ push(eax);
+        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+        __ jmp(&done);
+        __ bind(&slow);
+      }
+
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(arg_count);
+      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+      __ bind(&done);
 
       // The runtime call returns a pair of values in eax (function) and
       // edx (receiver). Touch up the stack with the right values.
@@ -2293,16 +2164,17 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
-    __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    decrement_stack_height(arg_count + 1);  // Function is left on the stack.
     context()->DropAndPlug(1, eax);
 
   } else if (proxy != NULL && proxy->var()->IsUnallocated()) {
     // Push global object as receiver for the call IC.
     __ push(GlobalObjectOperand());
+    increment_stack_height();
     EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2321,6 +2193,7 @@
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     __ push(eax);  // Function.
     __ push(edx);  // Receiver.
+    increment_stack_height(2);
 
     // If fast case code has been generated, emit code to push the function
     // and receiver and have the slow path jump around this code.
@@ -2328,7 +2201,8 @@
       Label call;
       __ jmp(&call, Label::kNear);
       __ bind(&done);
-      // Push function.
+      // Push function.  Stack height already incremented in slow case
+      // above.
       __ push(eax);
       // The receiver is implicitly the global receiver. Indicate this by
       // passing the hole to the call function stub.
@@ -2361,6 +2235,7 @@
     // Load global receiver object.
     __ mov(ebx, GlobalObjectOperand());
     __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    increment_stack_height();
     // Emit function call.
     EmitCallWithStub(expr, NO_CALL_FUNCTION_FLAGS);
   }
@@ -2398,29 +2273,16 @@
   __ SafeSet(eax, Immediate(arg_count));
   __ mov(edi, Operand(esp, arg_count * kPointerSize));
 
-  // Record call targets in unoptimized code, but not in the snapshot.
-  CallFunctionFlags flags;
-  if (!Serializer::enabled()) {
-    flags = RECORD_CALL_TARGET;
-    Handle<Object> uninitialized =
-        TypeFeedbackCells::UninitializedSentinel(isolate());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
-    RecordTypeFeedbackCell(expr->id(), cell);
-    __ mov(ebx, cell);
-  } else {
-    flags = NO_CALL_FUNCTION_FLAGS;
-  }
+  Handle<Code> construct_builtin =
+      isolate()->builtins()->JSConstructCall();
+  __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
 
-  CallConstructStub stub(flags);
-  __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2432,7 +2294,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2440,8 +2302,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2453,7 +2314,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   Split(zero, if_true, if_false, fall_through);
 
@@ -2461,8 +2322,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2486,15 +2346,14 @@
   __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   __ j(below, if_false);
   __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2508,15 +2367,14 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2532,7 +2390,7 @@
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
   __ test(ebx, Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2540,8 +2398,7 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+    ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2581,9 +2438,9 @@
   STATIC_ASSERT(kPointerSize == 4);
   __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
   // Calculate location of the first key name.
-  __ add(ebx,
-         Immediate(FixedArray::kHeaderSize +
-                   DescriptorArray::kFirstIndex * kPointerSize));
+  __ add(Operand(ebx),
+           Immediate(FixedArray::kHeaderSize +
+                     DescriptorArray::kFirstIndex * kPointerSize));
   // Loop through all the keys in the descriptor array. If one of these is the
   // symbol valueOf the result is false.
   Label entry, loop;
@@ -2592,9 +2449,9 @@
   __ mov(edx, FieldOperand(ebx, 0));
   __ cmp(edx, FACTORY->value_of_symbol());
   __ j(equal, if_false);
-  __ add(ebx, Immediate(kPointerSize));
+  __ add(Operand(ebx), Immediate(kPointerSize));
   __ bind(&entry);
-  __ cmp(ebx, ecx);
+  __ cmp(ebx, Operand(ecx));
   __ j(not_equal, &loop);
 
   // Reload map as register ebx was used as temporary above.
@@ -2618,13 +2475,12 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2638,15 +2494,14 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2660,15 +2515,14 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2682,7 +2536,7 @@
 
   __ JumpIfSmi(eax, if_false);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2690,8 +2544,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2714,15 +2568,14 @@
   __ bind(&check_frame_marker);
   __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2737,16 +2590,16 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(ebx);
-  __ cmp(eax, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  decrement_stack_height();
+  __ cmp(eax, Operand(ebx));
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
@@ -2760,8 +2613,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2783,8 +2636,7 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2795,24 +2647,20 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  // Assume that there are only two callable types, and one of them is at
-  // either end of the type range for JS object types. Saves extra comparisons.
-  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
   // Map is now in eax.
   __ j(below, &null);
-  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                FIRST_SPEC_OBJECT_TYPE + 1);
-  __ j(equal, &function);
 
-  __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_SPEC_OBJECT_TYPE - 1);
-  __ j(equal, &function);
-  // Assume that there is no larger type.
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+  __ j(above_equal, &function);
 
-  // Check if the constructor in the map is a JS function.
+  // Check if the constructor in the map is a function.
   __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
   __ j(not_equal, &non_function_constructor);
@@ -2844,7 +2692,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2852,12 +2700,12 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
     VisitForStackValue(args->at(2));
     __ CallRuntime(Runtime::kLog, 2);
+    decrement_stack_height(2);
   }
   // Finally, we're expected to leave a value on the top of the stack.
   __ mov(eax, isolate()->factory()->undefined_value());
@@ -2865,8 +2713,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2882,10 +2730,9 @@
   __ bind(&heapnumber_allocated);
 
   __ PrepareCallCFunction(1, ebx);
-  __ mov(eax, ContextOperand(context_register(), Context::GLOBAL_INDEX));
-  __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
-  __ mov(Operand(esp, 0), eax);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+  __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
+  __ CallCFunction(ExternalReference::random_uint32_function(isolate()),
+                   1);
 
   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   // by computing:
@@ -2894,8 +2741,8 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope fscope(SSE2);
     __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-    __ movd(xmm1, ebx);
-    __ movd(xmm0, eax);
+    __ movd(xmm1, Operand(ebx));
+    __ movd(xmm0, Operand(eax));
     __ cvtss2sd(xmm1, xmm1);
     __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
@@ -2916,35 +2763,34 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
+  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   VisitForStackValue(args->at(3));
   __ CallStub(&stub);
+  decrement_stack_height(4);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2962,72 +2808,30 @@
 }
 
 
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
-  VisitForAccumulatorValue(args->at(0));  // Load the object.
-
-  Label runtime, done;
-  Register object = eax;
-  Register result = eax;
-  Register scratch = ecx;
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  __ Assert(equal, "Trying to get date field from non-date.");
-#endif
-
-  if (index->value() == 0) {
-    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ mov(scratch, Operand::StaticVariable(stamp));
-      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
-      __ j(not_equal, &runtime, Label::kNear);
-      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
-                                          kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch);
-    __ mov(Operand(esp, 0), object);
-    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ bind(&done);
-  }
-  context()->Plug(result);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
   if (CpuFeatures::IsSupported(SSE2)) {
-    MathPowStub stub(MathPowStub::ON_STACK);
+    MathPowStub stub;
     __ CallStub(&stub);
   } else {
     __ CallRuntime(Runtime::kMath_pow, 2);
   }
+  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(ebx);  // eax = value. ebx = object.
+  decrement_stack_height();
 
   Label done;
   // If the object is a smi, return the value.
@@ -3039,19 +2843,17 @@
 
   // Store the value.
   __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ mov(edx, eax);
-  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
+  __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
 
   __ bind(&done);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -3059,12 +2861,12 @@
 
   NumberToStringStub stub;
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3082,8 +2884,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3091,15 +2892,18 @@
 
   Register object = ebx;
   Register index = eax;
+  Register scratch = ecx;
   Register result = edx;
 
   __ pop(object);
+  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
+                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -3128,8 +2932,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3137,17 +2940,20 @@
 
   Register object = ebx;
   Register index = eax;
-  Register scratch = edx;
+  Register scratch1 = ecx;
+  Register scratch2 = edx;
   Register result = eax;
 
   __ pop(object);
+  decrement_stack_height();
 
   Label need_conversion;
   Label index_out_of_range;
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch,
+                                  scratch1,
+                                  scratch2,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -3176,8 +2982,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3185,12 +2990,12 @@
 
   StringAddStub stub(NO_STRING_ADD_FLAGS);
   __ CallStub(&stub);
+  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3198,70 +3003,58 @@
 
   StringCompareStub stub;
   __ CallStub(&stub);
+  decrement_stack_height(2);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
-  // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
-  VisitForStackValue(args->at(0));
-  __ CallStub(&stub);
-  context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3270,43 +3063,31 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
-  // Check for proxy.
-  Label proxy, done;
-  __ CmpObjectType(eax, JS_FUNCTION_PROXY_TYPE, ebx);
-  __ j(equal, &proxy);
-
   // InvokeFunction requires the function in edi. Move it in there.
   __ mov(edi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(edi, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ jmp(&done);
-
-  __ bind(&proxy);
-  __ push(eax);
-  __ CallRuntime(Runtime::kCall, args->length());
-  __ bind(&done);
-
+  decrement_stack_height(arg_count + 1);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   RegExpConstructResultStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
   __ CallStub(&stub);
+  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3338,14 +3119,14 @@
   __ mov(index_1, Operand(esp, 1 * kPointerSize));
   __ mov(index_2, Operand(esp, 0));
   __ mov(temp, index_1);
-  __ or_(temp, index_2);
+  __ or_(temp, Operand(index_2));
   __ JumpIfNotSmi(temp, &slow_case);
 
   // Check that both indices are valid.
   __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
-  __ cmp(temp, index_1);
+  __ cmp(temp, Operand(index_1));
   __ j(below_equal, &slow_case);
-  __ cmp(temp, index_2);
+  __ cmp(temp, Operand(index_2));
   __ j(below_equal, &slow_case);
 
   // Bring addresses into index1 and index2.
@@ -3358,35 +3139,16 @@
   __ mov(Operand(index_2, 0), object);
   __ mov(Operand(index_1, 0), temp);
 
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   temp,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   not_zero,
-                   &no_remembered_set,
-                   Label::kNear);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
+  Label new_space;
+  __ InNewSpace(elements, temp, equal, &new_space);
 
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index_1,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index_2,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
+  __ mov(object, elements);
+  __ RecordWriteHelper(object, index_1, temp);
+  __ RecordWriteHelper(elements, index_2, temp);
 
-  __ bind(&no_remembered_set);
-
+  __ bind(&new_space);
   // We are done. Drop elements from the stack, and return undefined.
-  __ add(esp, Immediate(3 * kPointerSize));
+  __ add(Operand(esp), Immediate(3 * kPointerSize));
   __ mov(eax, isolate()->factory()->undefined_value());
   __ jmp(&done);
 
@@ -3394,12 +3156,12 @@
   __ CallRuntime(Runtime::kSwapElements, 3);
 
   __ bind(&done);
+  decrement_stack_height(3);
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3447,8 +3209,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   Register right = eax;
@@ -3460,11 +3221,11 @@
   __ pop(left);
 
   Label done, fail, ok;
-  __ cmp(left, right);
+  __ cmp(left, Operand(right));
   __ j(equal, &ok);
   // Fail if either is a non-HeapObject.
   __ mov(tmp, left);
-  __ and_(tmp, right);
+  __ and_(Operand(tmp), right);
   __ JumpIfSmi(tmp, &fail);
   __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
   __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@@ -3481,12 +3242,12 @@
   __ mov(eax, Immediate(isolate()->factory()->true_value()));
   __ bind(&done);
 
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3504,15 +3265,14 @@
 
   __ test(FieldOperand(eax, String::kHashFieldOffset),
           Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3527,12 +3287,11 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
 
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3557,7 +3316,7 @@
   Operand separator_operand = Operand(esp, 2 * kPointerSize);
   Operand result_operand = Operand(esp, 1 * kPointerSize);
   Operand array_length_operand = Operand(esp, 0);
-  __ sub(esp, Immediate(2 * kPointerSize));
+  __ sub(Operand(esp), Immediate(2 * kPointerSize));
   __ cld();
   // Check that the array is a JSArray
   __ JumpIfSmi(array, &bailout);
@@ -3593,7 +3352,7 @@
   // Live loop registers: index, array_length, string,
   //                      scratch, string_length, elements.
   if (FLAG_debug_code) {
-    __ cmp(index, array_length);
+    __ cmp(index, Operand(array_length));
     __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
   }
   __ bind(&loop);
@@ -3611,8 +3370,8 @@
   __ add(string_length,
          FieldOperand(string, SeqAsciiString::kLengthOffset));
   __ j(overflow, &bailout);
-  __ add(index, Immediate(1));
-  __ cmp(index, array_length);
+  __ add(Operand(index), Immediate(1));
+  __ cmp(index, Operand(array_length));
   __ j(less, &loop);
 
   // If array_length is 1, return elements[0], a string.
@@ -3646,10 +3405,10 @@
   // to string_length.
   __ mov(scratch, separator_operand);
   __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
-  __ sub(string_length, scratch);  // May be negative, temporarily.
+  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
   __ imul(scratch, array_length_operand);
   __ j(overflow, &bailout);
-  __ add(string_length, scratch);
+  __ add(string_length, Operand(scratch));
   __ j(overflow, &bailout);
 
   __ shr(string_length, 1);
@@ -3690,7 +3449,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
+  __ add(Operand(index), Immediate(1));
   __ bind(&loop_1_condition);
   __ cmp(index, array_length_operand);
   __ j(less, &loop_1);  // End while (index < length).
@@ -3700,7 +3459,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its ascii character value.
   __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ mov_b(separator_operand, scratch);
 
@@ -3731,7 +3490,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
+  __ add(Operand(index), Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_2);  // End while (index < length).
@@ -3772,7 +3531,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(index, Immediate(1));
+  __ add(Operand(index), Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_3);  // End while (index < length).
@@ -3784,9 +3543,10 @@
   __ bind(&done);
   __ mov(eax, result_operand);
   // Drop temp values from the stack, and restore context register.
-  __ add(esp, Immediate(3 * kPointerSize));
+  __ add(Operand(esp), Immediate(3 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  decrement_stack_height();
   context()->Plug(eax);
 }
 
@@ -3806,6 +3566,7 @@
     // Prepare for calling JS runtime function.
     __ mov(eax, GlobalObjectOperand());
     __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+    increment_stack_height();
   }
 
   // Push the arguments ("left-to-right").
@@ -3820,13 +3581,18 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    CallIC(ic, mode, expr->id());
+    __ call(ic, mode, expr->id());
     // Restore context register.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
     // Call the C runtime function.
     __ CallRuntime(expr->function(), arg_count);
   }
+  decrement_stack_height(arg_count);
+  if (expr->is_jsruntime()) {
+    decrement_stack_height();
+  }
+
   context()->Plug(eax);
 }
 
@@ -3841,16 +3607,15 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
-            ? kNonStrictMode : kStrictMode;
-        __ push(Immediate(Smi::FromInt(strict_mode_flag)));
+        __ push(Immediate(Smi::FromInt(strict_mode_flag())));
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        decrement_stack_height(2);
         context()->Plug(eax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ push(Immediate(var->name()));
@@ -3892,41 +3657,18 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
-      } else if (context()->IsTest()) {
-        const TestContext* test = TestContext::cast(context());
-        // The labels are swapped for the recursive call.
-        VisitForControl(expr->expression(),
-                        test->false_label(),
-                        test->true_label(),
-                        test->fall_through());
-        context()->Plug(test->true_label(), test->false_label());
       } else {
-        // We handle value contexts explicitly rather than simply visiting
-        // for control and plugging the control flow into the context,
-        // because we need to prepare a pair of extra administrative AST ids
-        // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
-        Label materialize_true, materialize_false, done;
-        VisitForControl(expr->expression(),
-                        &materialize_false,
-                        &materialize_true,
-                        &materialize_true);
-        __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
-        if (context()->IsAccumulatorValue()) {
-          __ mov(eax, isolate()->factory()->true_value());
-        } else {
-          __ Push(isolate()->factory()->true_value());
-        }
-        __ jmp(&done, Label::kNear);
-        __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
-        if (context()->IsAccumulatorValue()) {
-          __ mov(eax, isolate()->factory()->false_value());
-        } else {
-          __ Push(isolate()->factory()->false_value());
-        }
-        __ bind(&done);
+        Label materialize_true, materialize_false;
+        Label* if_true = NULL;
+        Label* if_false = NULL;
+        Label* fall_through = NULL;
+
+        // Notice that the labels are swapped.
+        context()->PrepareTest(&materialize_true, &materialize_false,
+                               &if_false, &if_true, &fall_through);
+        if (context()->IsTest()) ForwardBailoutToChild(expr);
+        VisitForControl(expr->expression(), if_true, if_false, fall_through);
+        context()->Plug(if_false, if_true);  // Labels swapped.
       }
       break;
     }
@@ -3937,6 +3679,7 @@
         VisitForTypeofValue(expr->expression());
       }
       __ CallRuntime(Runtime::kTypeof, 1);
+      decrement_stack_height();
       context()->Plug(eax);
       break;
     }
@@ -3978,7 +3721,7 @@
   // accumulator register eax.
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(eax);
 }
 
@@ -3990,7 +3733,10 @@
   // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
   // as the left-hand side.
   if (!expr->expression()->IsValidLeftHandSide()) {
-    VisitForEffect(expr->expression());
+    ASSERT(expr->expression()->AsThrow() != NULL);
+    VisitInCurrentContext(expr->expression());
+    // Visiting Throw does not plug the context.
+    context()->Plug(eax);
     return;
   }
 
@@ -4015,17 +3761,20 @@
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && !context()->IsEffect()) {
       __ push(Immediate(Smi::FromInt(0)));
+      increment_stack_height();
     }
     if (assign_type == NAMED_PROPERTY) {
       // Put the object both on the stack and in the accumulator.
       VisitForAccumulatorValue(prop->obj());
       __ push(eax);
+      increment_stack_height();
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
       __ mov(edx, Operand(esp, 0));
       __ push(eax);
+      increment_stack_height();
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4056,6 +3805,7 @@
       switch (assign_type) {
         case VARIABLE:
           __ push(eax);
+          increment_stack_height();
           break;
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
@@ -4073,9 +3823,9 @@
 
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
-      __ add(eax, Immediate(Smi::FromInt(1)));
+      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
     } else {
-      __ sub(eax, Immediate(Smi::FromInt(1)));
+      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
     }
     __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
@@ -4085,9 +3835,9 @@
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     if (expr->op() == Token::INC) {
-      __ sub(eax, Immediate(Smi::FromInt(1)));
+      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
     } else {
-      __ add(eax, Immediate(Smi::FromInt(1)));
+      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
     }
   }
 
@@ -4098,7 +3848,7 @@
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
   BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  __ call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4129,10 +3879,11 @@
     case NAMED_PROPERTY: {
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      decrement_stack_height();
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4146,10 +3897,12 @@
     case KEYED_PROPERTY: {
       __ pop(ecx);
       __ pop(edx);
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      decrement_stack_height();
+      decrement_stack_height();
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         // Result is on the stack
@@ -4177,7 +3930,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    CallIC(ic);
+    __ call(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(eax);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4197,25 +3950,20 @@
     context()->Plug(eax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInDuplicateContext(expr);
+    VisitInCurrentContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Expression* sub_expr,
-                                                 Handle<String> check) {
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(sub_expr);
+    VisitForTypeofValue(expr);
   }
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(eax, if_true);
@@ -4250,11 +3998,8 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(eax, if_false);
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
-    __ j(equal, if_true);
-    __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
-    Split(equal, if_true, if_false, fall_through);
+    __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
+    Split(above_equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(eax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -4272,7 +4017,18 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ cmp(eax, isolate()->factory()->undefined_value());
+  Split(equal, if_true, if_false, fall_through);
 }
 
 
@@ -4280,12 +4036,9 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr)) return;
-
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
+
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4293,13 +4046,21 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+    context()->Plug(if_true, if_false);
+    return;
+  }
+
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
-  switch (op) {
+  switch (expr->op()) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      decrement_stack_height(2);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -4308,8 +4069,9 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-      __ test(eax, eax);
+      decrement_stack_height(2);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      __ test(eax, Operand(eax));
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
       break;
@@ -4322,34 +4084,43 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = equal;
+          __ pop(edx);
           break;
         case Token::LT:
           cc = less;
+          __ pop(edx);
           break;
         case Token::GT:
-          cc = greater;
+          // Reverse left and right sizes to obtain ECMA-262 conversion order.
+          cc = less;
+          __ mov(edx, result_register());
+          __ pop(eax);
          break;
         case Token::LTE:
-          cc = less_equal;
+          // Reverse left and right sizes to obtain ECMA-262 conversion order.
+          cc = greater_equal;
+          __ mov(edx, result_register());
+          __ pop(eax);
           break;
         case Token::GTE:
           cc = greater_equal;
+          __ pop(edx);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
-      __ pop(edx);
+      decrement_stack_height();
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
         Label slow_case;
-        __ mov(ecx, edx);
-        __ or_(ecx, eax);
+        __ mov(ecx, Operand(edx));
+        __ or_(ecx, Operand(eax));
         patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-        __ cmp(edx, eax);
+        __ cmp(edx, Operand(eax));
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
@@ -4357,11 +4128,11 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-      __ test(eax, eax);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+      __ test(eax, Operand(eax));
       Split(cc, if_true, if_false, fall_through);
     }
   }
@@ -4372,9 +4143,7 @@
 }
 
 
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
-                                              Expression* sub_expr,
-                                              NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4382,20 +4151,15 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Handle<Object> nil_value = nil == kNullValue ?
-      isolate()->factory()->null_value() :
-      isolate()->factory()->undefined_value();
-  __ cmp(eax, nil_value);
-  if (expr->op() == Token::EQ_STRICT) {
+  VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ cmp(eax, isolate()->factory()->null_value());
+  if (expr->is_strict()) {
     Split(equal, if_true, if_false, fall_through);
   } else {
-    Handle<Object> other_nil_value = nil == kNullValue ?
-        isolate()->factory()->undefined_value() :
-        isolate()->factory()->null_value();
     __ j(equal, if_true);
-    __ cmp(eax, other_nil_value);
+    __ cmp(eax, isolate()->factory()->undefined_value());
     __ j(equal, if_true);
     __ JumpIfSmi(eax, if_false);
     // It can be an undetectable object.
@@ -4462,7 +4226,7 @@
   // Cook return address on top of stack (smi encoded Code* delta)
   ASSERT(!result_register().is(edx));
   __ pop(edx);
-  __ sub(edx, Immediate(masm_->CodeObject()));
+  __ sub(Operand(edx), Immediate(masm_->CodeObject()));
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   STATIC_ASSERT(kSmiTag == 0);
   __ SmiTag(edx);
@@ -4478,8 +4242,8 @@
   // Uncook return address.
   __ pop(edx);
   __ SmiUntag(edx);
-  __ add(edx, Immediate(masm_->CodeObject()));
-  __ jmp(edx);
+  __ add(Operand(edx), Immediate(masm_->CodeObject()));
+  __ jmp(Operand(edx));
 }
 
 
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 33f247a..9b5cc56 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -212,7 +212,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
+  __ RecordWrite(elements, r0, r1);
 }
 
 
@@ -326,7 +326,7 @@
   // Fast case: Do the load.
   STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
   __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
+  __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
   __ j(equal, out_of_range);
@@ -394,8 +394,8 @@
   // Check if element is in the range of mapped arguments. If not, jump
   // to the unmapped lookup with the parameter map in scratch1.
   __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
-  __ sub(scratch2, Immediate(Smi::FromInt(2)));
-  __ cmp(key, scratch2);
+  __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
+  __ cmp(key, Operand(scratch2));
   __ j(greater_equal, unmapped_case);
 
   // Load element index and check whether it is the hole.
@@ -432,7 +432,7 @@
   Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
   __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
   __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmp(key, scratch);
+  __ cmp(key, Operand(scratch));
   __ j(greater_equal, slow_case);
   return FieldOperand(backing_store,
                       key,
@@ -473,6 +473,7 @@
   Counters* counters = isolate->counters();
   __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
   __ ret(0);
+
   __ bind(&check_number_dictionary);
   __ mov(ebx, eax);
   __ SmiUntag(ebx);
@@ -533,38 +534,18 @@
   __ shr(ecx, KeyedLookupCache::kMapHashShift);
   __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
   __ shr(edi, String::kHashShift);
-  __ xor_(ecx, edi);
-  __ and_(ecx, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+  __ xor_(ecx, Operand(edi));
+  __ and_(ecx, KeyedLookupCache::kCapacityMask);
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys =
       ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ mov(edi, ecx);
-    __ shl(edi, kPointerSizeLog2 + 1);
-    if (i != 0) {
-      __ add(edi, Immediate(kPointerSize * i * 2));
-    }
-    __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
-    __ j(not_equal, &try_next_entry);
-    __ add(edi, Immediate(kPointerSize));
-    __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
-    __ j(equal, &hit_on_nth_entry[i]);
-    __ bind(&try_next_entry);
-  }
-
-  __ lea(edi, Operand(ecx, 1));
+  __ mov(edi, ecx);
   __ shl(edi, kPointerSizeLog2 + 1);
-  __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
   __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
-  __ add(edi, Immediate(kPointerSize));
+  __ add(Operand(edi), Immediate(kPointerSize));
   __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
 
@@ -575,27 +556,15 @@
   // ecx     : lookup cache index
   ExternalReference cache_field_offsets =
       ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    if (i != 0) {
-      __ add(ecx, Immediate(i));
-    }
-    __ mov(edi,
-           Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
-    __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-    __ sub(edi, ecx);
-    __ j(above_equal, &property_array_property);
-    if (i != 0) {
-      __ jmp(&load_in_object_property);
-    }
-  }
+  __ mov(edi,
+         Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
+  __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+  __ sub(edi, Operand(ecx));
+  __ j(above_equal, &property_array_property);
 
   // Load in-object property.
-  __ bind(&load_in_object_property);
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(ecx, edi);
+  __ add(ecx, Operand(edi));
   __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
   __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
   __ ret(0);
@@ -637,12 +606,14 @@
 
   Register receiver = edx;
   Register index = eax;
-  Register scratch = ecx;
+  Register scratch1 = ebx;
+  Register scratch2 = ecx;
   Register result = eax;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch,
+                                          scratch1,
+                                          scratch2,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -680,8 +651,8 @@
   // Check that it has indexed interceptor and access checks
   // are not enabled for this object.
   __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
-  __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
-  __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
+  __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
+  __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
   __ j(not_zero, &slow);
 
   // Everything is fine, call runtime.
@@ -739,7 +710,7 @@
   __ mov(mapped_location, eax);
   __ lea(ecx, mapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
+  __ RecordWrite(ebx, ecx, edx);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in ebx.
@@ -748,7 +719,7 @@
   __ mov(unmapped_location, eax);
   __ lea(edi, unmapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
+  __ RecordWrite(ebx, edi, edx);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -763,10 +734,7 @@
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, fast_object_with_map_check, fast_object_without_map_check;
-  Label fast_double_with_map_check, fast_double_without_map_check;
-  Label check_if_double_array, array, extra, transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
+  Label slow, fast, array, extra;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(edx, &slow);
@@ -782,18 +750,22 @@
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+  __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
   __ j(below, &slow);
+  __ CmpInstanceType(edi, JS_PROXY_TYPE);
+  __ j(equal, &slow);
+  __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
+  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // eax: value
   // edx: JSObject
   // ecx: key (a smi)
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(below, &fast_object_with_map_check);
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode and writable.
+  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
+  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+  __ j(below, &fast);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -806,28 +778,16 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  // ebx: receiver->elements, a FixedArray
-  // edi: receiver map
+  // edi: receiver->elements, a FixedArray
   // flags: compare (ecx, edx.length())
   // do not leave holes in the array:
   __ j(not_equal, &slow);
-  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &check_if_double_array);
-  // Add 1 to receiver->length, and go to common element store code for Objects.
+  // Add 1 to receiver->length, and go to fast array write.
   __ add(FieldOperand(edx, JSArray::kLengthOffset),
          Immediate(Smi::FromInt(1)));
-  __ jmp(&fast_object_without_map_check);
-
-  __ bind(&check_if_double_array);
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  // Add 1 to receiver->length, and go to common element store code for doubles.
-  __ add(FieldOperand(edx, JSArray::kLengthOffset),
-         Immediate(Smi::FromInt(1)));
-  __ jmp(&fast_double_without_map_check);
+  __ jmp(&fast);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -836,111 +796,34 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
 
-  // Check the key against the length in the array and fall through to the
-  // common store code.
+  // Check the key against the length in the array, compute the
+  // address to store into and fall through to fast case.
   __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
   __ j(above_equal, &extra);
 
-  // Fast case: Do the store, could either Object or double.
-  __ bind(&fast_object_with_map_check);
+  // Fast case: Do the store.
+  __ bind(&fast);
   // eax: value
   // ecx: key (a smi)
   // edx: receiver
-  // ebx: FixedArray receiver->elements
-  // edi: receiver map
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &fast_double_with_map_check);
-  __ bind(&fast_object_without_map_check);
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(eax, &non_smi_value);
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
-  __ ret(0);
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
-  __ CheckFastObjectElements(edi, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+  // edi: FixedArray receiver->elements
+  __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
   // Update write barrier for the elements array address.
-  __ mov(edx, eax);  // Preserve the value which is returned.
-  __ RecordWriteArray(
-      ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ mov(edx, Operand(eax));
+  __ RecordWrite(edi, 0, edx, ecx);
   __ ret(0);
-
-  __ bind(&fast_double_with_map_check);
-  // Check for fast double array case. If this fails, call through to the
-  // runtime.
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  __ bind(&fast_double_without_map_check);
-  // If the value is a number, store it as a double in the FastDoubleElements
-  // array.
-  __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0,
-                                 &transition_double_elements, false);
-  __ ret(0);
-
-  __ bind(&transition_smi_elements);
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &non_double_value,
-              DONT_DO_SMI_CHECK);
-
-  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         &slow);
-  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         &slow);
-  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         &slow);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
 }
 
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                               int argc,
-                                               Code::Kind kind,
-                                               Code::ExtraICState extra_state) {
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- edx                 : receiver
@@ -950,11 +833,11 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_state,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
-  Isolate* isolate = masm->isolate();
-  isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, eax);
+  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+                                                  eax);
 
   // If the stub cache probing failed, the receiver might be a value.
   // For value objects, we use the map of the prototype objects for
@@ -980,9 +863,9 @@
 
   // Check for boolean.
   __ bind(&non_string);
-  __ cmp(edx, isolate->factory()->true_value());
+  __ cmp(edx, FACTORY->true_value());
   __ j(equal, &boolean);
-  __ cmp(edx, isolate->factory()->false_value());
+  __ cmp(edx, FACTORY->false_value());
   __ j(not_equal, &miss);
   __ bind(&boolean);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -990,7 +873,8 @@
 
   // Probe the stub cache for the value object.
   __ bind(&probe);
-  isolate->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
+  Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
+                                                  no_reg);
   __ bind(&miss);
 }
 
@@ -1020,9 +904,8 @@
                     NullCallWrapper(), CALL_AS_METHOD);
 }
 
-
 // The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1046,10 +929,10 @@
 }
 
 
-void CallICBase::GenerateMiss(MacroAssembler* masm,
-                              int argc,
-                              IC::UtilityId id,
-                              Code::ExtraICState extra_state) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1068,22 +951,22 @@
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
 
-    // Push the receiver and the name of the function.
-    __ push(edx);
-    __ push(ecx);
+  // Push the receiver and the name of the function.
+  __ push(edx);
+  __ push(ecx);
 
-    // Call the entry.
-    CEntryStub stub(1);
-    __ mov(eax, Immediate(2));
-    __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
-    __ CallStub(&stub);
+  // Call the entry.
+  CEntryStub stub(1);
+  __ mov(eax, Immediate(2));
+  __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
+  __ CallStub(&stub);
 
-    // Move result to edi and exit the internal frame.
-    __ mov(edi, eax);
-  }
+  // Move result to edi and exit the internal frame.
+  __ mov(edi, eax);
+  __ LeaveInternalFrame();
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -1106,7 +989,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -1120,7 +1003,7 @@
 
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
-                                 Code::ExtraICState extra_state) {
+                                 Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1131,10 +1014,38 @@
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-  CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC,
-                                            extra_state);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
 
-  GenerateMiss(masm, argc, extra_state);
+  GenerateMiss(masm, argc, extra_ic_state);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
 }
 
 
@@ -1200,17 +1111,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(ecx);  // save the key
-    __ push(edx);  // pass the receiver
-    __ push(ecx);  // pass the key
-    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-    __ pop(ecx);  // restore the key
-    // Leave the internal frame.
-  }
-
+  __ EnterInternalFrame();
+  __ push(ecx);  // save the key
+  __ push(edx);  // pass the receiver
+  __ push(ecx);  // pass the key
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(ecx);  // restore the key
+  __ LeaveInternalFrame();
   __ mov(edi, eax);
   __ jmp(&do_call);
 
@@ -1236,8 +1143,10 @@
 
   __ bind(&lookup_monomorphic_cache);
   __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1);
-  CallICBase::GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC,
-                                            Code::kNoExtraICState);
+  GenerateMonomorphicCacheProbe(masm,
+                                argc,
+                                Code::KEYED_CALL_IC,
+                                Code::kNoExtraICState);
   // Fall through on miss.
 
   __ bind(&slow_call);
@@ -1300,12 +1209,25 @@
   __ JumpIfSmi(ecx, &miss);
   Condition cond = masm->IsObjectStringType(ecx, eax, eax);
   __ j(NegateCondition(cond), &miss);
-  CallICBase::GenerateNormal(masm, argc);
+  GenerateCallNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
 
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
@@ -1453,10 +1375,10 @@
   //  -- esp[0] : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSArray::SetElementsLength accepts
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
   // (currently anything except for external arrays which means anything with
-  // elements of FixedArray type).  Value must be a number, but only smis are
-  // accepted as the most common case.
+  // elements of FixedArray type.), but currently is restricted to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
 
   Label miss;
 
@@ -1478,13 +1400,6 @@
   __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
   __ j(not_equal, &miss);
 
-  // Check that the array has fast properties, otherwise the length
-  // property might have been redefined.
-  __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
-  __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(equal, &miss);
-
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1621,57 +1536,6 @@
 }
 
 
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ebx    : target map
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  // Must return the modified receiver in eax.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
-    __ mov(eax, edx);
-    __ Ret();
-    __ bind(&fail);
-  }
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ebx);  // return address
-  // Leaving the code managed by the register allocator and return to the
-  // convention of using esi as context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ebx    : target map
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  // Must return the modified receiver in eax.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
-    __ mov(eax, edx);
-    __ Ret();
-    __ bind(&fail);
-  }
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ebx);  // return address
-  // Leaving the code managed by the register allocator and return to the
-  // convention of using esi as context register.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
 #undef __
 
 
@@ -1683,9 +1547,11 @@
     case Token::LT:
       return less;
     case Token::GT:
-      return greater;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return less;
     case Token::LTE:
-      return less_equal;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return greater_equal;
     case Token::GTE:
       return greater_equal;
     default:
@@ -1717,9 +1583,6 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
-    if (state == KNOWN_OBJECTS) {
-      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
-    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 8fb4c79..d5a4fe6 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,7 +33,6 @@
 #include "code-stubs.h"
 #include "deoptimizer.h"
 #include "stub-cache.h"
-#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -67,18 +66,10 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
+  HPhase phase("Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
   CpuFeatures::Scope scope(SSE2);
-
-  CodeStub::GenerateFPStubs();
-
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done in GeneratePrologue).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -142,7 +133,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). ecx is zero for method calls and non-zero for
   // function calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
+  if (info_->is_strict_mode() || info_->is_native()) {
     Label ok;
     __ test(ecx, Operand(ecx));
     __ j(zero, &ok, Label::kNear);
@@ -213,12 +204,11 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers eax and ebx.
-        __ RecordWriteContextSlot(esi,
-                                  context_offset,
-                                  eax,
-                                  ebx,
-                                  kDontSaveFPRegs);
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have to use a third register to avoid
+        // clobbering esi.
+        __ mov(ecx, esi);
+        __ RecordWrite(ecx, context_offset, eax, ebx);
       }
     }
     Comment(";;; End allocate local context");
@@ -262,9 +252,6 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
-      Comment(";;; Deferred code @%d: %s.",
-              code->instruction_index(),
-              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -315,21 +302,18 @@
 }
 
 
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
-  Handle<Object> literal = chunk_->LookupLiteral(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
-  return literal;
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
-  Handle<Object> value = chunk_->LookupLiteral(op);
-  return value->Number();
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
-  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
+Immediate LCodeGen::ToImmediate(LOperand* op) {
+  LConstantOperand* const_op = LConstantOperand::cast(op);
+  Handle<Object> literal = chunk_->LookupLiteral(const_op);
+  Representation r = chunk_->LookupLiteralRepresentation(const_op);
+  if (r.IsInteger32()) {
+    ASSERT(literal->IsNumber());
+    return Immediate(static_cast<int32_t>(literal->Number()));
+  } else if (r.IsDouble()) {
+    Abort("unsupported double immediate");
+  }
+  ASSERT(r.IsTagged());
+  return Immediate(literal);
 }
 
 
@@ -368,19 +352,7 @@
 
   WriteTranslation(environment->outer(), translation);
   int closure_id = DefineDeoptimizationLiteral(environment->closure());
-  switch (environment->frame_type()) {
-    case JS_FUNCTION:
-      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
-      break;
-    case JS_CONSTRUCT:
-      translation->BeginConstructStubFrame(closure_id, translation_size);
-      break;
-    case ARGUMENTS_ADAPTOR:
-      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
-      break;
-    default:
-      UNREACHABLE();
-  }
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
     // spilled_registers_ and spilled_double_registers_ are either
@@ -492,18 +464,14 @@
                                        int argc,
                                        LInstruction* instr,
                                        LOperand* context) {
+  ASSERT(context->IsRegister() || context->IsStackSlot());
   if (context->IsRegister()) {
     if (!ToRegister(context).is(esi)) {
       __ mov(esi, ToRegister(context));
     }
-  } else if (context->IsStackSlot()) {
-    __ mov(esi, ToOperand(context));
-  } else if (context->IsConstantOperand()) {
-    Handle<Object> literal =
-        chunk_->LookupLiteral(LConstantOperand::cast(context));
-    __ LoadHeapObject(esi, Handle<Context>::cast(literal));
   } else {
-    UNREACHABLE();
+    // Context is stack slot.
+    __ mov(esi, ToOperand(context));
   }
 
   __ CallRuntimeSaveDoubles(id);
@@ -529,14 +497,10 @@
     // |>------------  translation_size ------------<|
 
     int frame_count = 0;
-    int jsframe_count = 0;
     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
       ++frame_count;
-      if (e->frame_type() == JS_FUNCTION) {
-        ++jsframe_count;
-      }
     }
-    Translation translation(&translations_, frame_count, jsframe_count);
+    Translation translation(&translations_, frame_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     int pc_offset = masm()->pc_offset();
@@ -553,6 +517,7 @@
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -603,6 +568,7 @@
 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
+  ASSERT(FLAG_deopt);
   Handle<DeoptimizationInputData> data =
       factory()->NewDeoptimizationInputData(length, TENURED);
 
@@ -677,7 +643,7 @@
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
-  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  const ZoneList<LOperand*>* operands = pointers->operands();
   Safepoint safepoint =
       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
@@ -1145,7 +1111,7 @@
   ASSERT(left->Equals(instr->result()));
 
   if (right->IsConstantOperand()) {
-    __ sub(ToOperand(left), ToInteger32Immediate(right));
+    __ sub(ToOperand(left), ToImmediate(right));
   } else {
     __ sub(ToRegister(left), ToOperand(right));
   }
@@ -1201,13 +1167,8 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  Register reg = ToRegister(instr->result());
-  Handle<Object> handle = instr->value();
-  if (handle->IsHeapObject()) {
-    __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
-  } else {
-    __ Set(reg, Immediate(handle));
-  }
+  ASSERT(instr->result()->IsRegister());
+  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
 }
 
 
@@ -1246,7 +1207,6 @@
   Register result = ToRegister(instr->result());
   Register map = ToRegister(instr->TempAt(0));
   ASSERT(input.is(result));
-
   Label done;
   // If the object is a smi return the object.
   __ JumpIfSmi(input, &done, Label::kNear);
@@ -1260,43 +1220,6 @@
 }
 
 
-void LCodeGen::DoDateField(LDateField* instr) {
-  Register object = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->TempAt(0));
-  Smi* index = instr->index();
-  Label runtime, done;
-  ASSERT(object.is(result));
-  ASSERT(object.is(eax));
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  __ Assert(equal, "Trying to get date field from non-date.");
-#endif
-
-  if (index->value() == 0) {
-    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ mov(scratch, Operand::StaticVariable(stamp));
-      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
-      __ j(not_equal, &runtime, Label::kNear);
-      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
-                                          kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch);
-    __ mov(Operand(esp, 0), object);
-    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ bind(&done);
-  }
-}
-
-
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->Equals(instr->result()));
@@ -1322,7 +1245,7 @@
   ASSERT(left->Equals(instr->result()));
 
   if (right->IsConstantOperand()) {
-    __ add(ToOperand(left), ToInteger32Immediate(right));
+    __ add(ToOperand(left), ToImmediate(right));
   } else {
     __ add(ToRegister(left), ToOperand(right));
   }
@@ -1571,40 +1494,32 @@
 }
 
 
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  if (right->IsConstantOperand()) {
+    __ cmp(ToOperand(left), ToImmediate(right));
+  } else {
+    __ cmp(ToRegister(left), ToOperand(right));
+  }
+}
+
+
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-  Condition cc = TokenToCondition(instr->op(), instr->is_double());
 
-  if (left->IsConstantOperand() && right->IsConstantOperand()) {
-    // We can statically evaluate the comparison.
-    double left_val = ToDouble(LConstantOperand::cast(left));
-    double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
-    EmitGoto(next_block);
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the false block.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
   } else {
-    if (instr->is_double()) {
-      // Don't base result on EFLAGS when a NaN is involved. Instead
-      // jump to the false block.
-      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
-    } else {
-      if (right->IsConstantOperand()) {
-        __ cmp(ToRegister(left), ToInteger32Immediate(right));
-      } else if (left->IsConstantOperand()) {
-        __ cmp(ToOperand(right), ToInteger32Immediate(left));
-        // We transposed the operands. Reverse the condition.
-        cc = ReverseCondition(cc);
-      } else {
-        __ cmp(ToRegister(left), ToOperand(right));
-      }
-    }
-    EmitBranch(true_block, false_block, cc);
+    EmitCmpI(left, right);
   }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  EmitBranch(true_block, false_block, cc);
 }
 
 
@@ -1629,33 +1544,23 @@
 }
 
 
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  // If the expression is known to be untagged or a smi, then it's definitely
-  // not null, and it can't be a an undetectable object.
-  if (instr->hydrogen()->representation().IsSpecialization() ||
-      instr->hydrogen()->type().IsSmi()) {
-    EmitGoto(false_block);
-    return;
-  }
+  // TODO(fsc): If the expression is known to be a smi, then it's
+  // definitely not null. Jump to the false block.
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-  Handle<Object> nil_value = instr->nil() == kNullValue ?
-      factory()->null_value() :
-      factory()->undefined_value();
-  __ cmp(reg, nil_value);
-  if (instr->kind() == kStrictEquality) {
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  __ cmp(reg, factory()->null_value());
+  if (instr->is_strict()) {
     EmitBranch(true_block, false_block, equal);
   } else {
-    Handle<Object> other_nil_value = instr->nil() == kNullValue ?
-        factory()->undefined_value() :
-        factory()->null_value();
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ cmp(reg, other_nil_value);
+    __ cmp(reg, factory()->undefined_value());
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1707,31 +1612,6 @@
 }
 
 
-Condition LCodeGen::EmitIsString(Register input,
-                                 Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
-
-  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
-
-  return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  Condition true_cond = EmitIsString(reg, temp, false_label);
-
-  EmitBranch(true_block, false_block, true_cond);
-}
-
-
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   Operand input = ToOperand(instr->InputAt(0));
 
@@ -1759,41 +1639,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  Condition condition = ComputeCompareCondition(op);
-  __ test(eax, Operand(eax));
-
-  EmitBranch(true_block, false_block, condition);
-}
-
-
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1857,7 +1702,7 @@
 
 
 // Branches to a label or falls through with the answer in the z flag.  Trashes
-// the temp registers, but not the input.
+// the temp registers, but not the input.  Only input and temp2 may alias.
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
                                Handle<String>class_name,
@@ -1865,38 +1710,30 @@
                                Register temp,
                                Register temp2) {
   ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
+  ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
   __ JumpIfSmi(input, is_false);
+  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+  __ j(below, is_false);
 
+  // Map is now in temp.
+  // Functions have class 'Function'.
+  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    // Assuming the following assertions, we can use the same compares to test
-    // for both being a function type and being in the object type range.
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  FIRST_SPEC_OBJECT_TYPE + 1);
-    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  LAST_SPEC_OBJECT_TYPE - 1);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-    __ j(below, is_false);
-    __ j(equal, is_true);
-    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
-    __ j(equal, is_true);
+    __ j(above_equal, is_true);
   } else {
-    // Faster code path to avoid two compares: subtract lower bound from the
-    // actual type and do a signed compare with the width of the type range.
-    __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
-    __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
-    __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
-                                     FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ j(above, is_false);
+    __ j(above_equal, is_false);
   }
 
-  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1925,7 +1762,12 @@
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
   Register temp2 = ToRegister(instr->TempAt(1));
-
+  if (input.is(temp)) {
+    // Swap.
+    Register swapper = temp;
+    temp = temp2;
+    temp2 = swapper;
+  }
   Handle<String> class_name = instr->hydrogen()->class_name();
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1976,8 +1818,9 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() { return instr_; }
+
     Label* map_check() { return &map_check_; }
+
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -2000,9 +1843,7 @@
   Register map = ToRegister(instr->TempAt(0));
   __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
-  Handle<JSGlobalPropertyCell> cache_cell =
-      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
-  __ cmp(map, Operand::Cell(cache_cell));  // Patched to cached map.
+  __ cmp(map, factory()->the_hole_value());  // Patched to cached map.
   __ j(not_equal, &cache_miss, Label::kNear);
   __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
   __ jmp(&done);
@@ -2050,7 +1891,7 @@
   // the stub.
   Register temp = ToRegister(instr->TempAt(0));
   ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
-  __ LoadHeapObject(InstanceofStub::right(), instr->function());
+  __ mov(InstanceofStub::right(), Immediate(instr->function()));
   static const int kAdditionalDelta = 13;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   __ mov(temp, Immediate(delta));
@@ -2068,6 +1909,26 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
@@ -2075,6 +1936,9 @@
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
   Label true_value, done;
   __ test(eax, Operand(eax));
   __ j(condition, &true_value, Label::kNear);
@@ -2105,7 +1969,7 @@
 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
-  if (instr->hydrogen()->RequiresHoleCheck()) {
+  if (instr->hydrogen()->check_hole_value()) {
     __ cmp(result, factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
@@ -2126,21 +1990,20 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->value());
-  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
+  Register value = ToRegister(instr->InputAt(0));
+  Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
 
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ cmp(Operand::Cell(cell_handle), factory()->the_hole_value());
+  if (instr->hydrogen()->check_hole_value()) {
+    __ cmp(cell_operand, factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
 
   // Store the value.
-  __ mov(Operand::Cell(cell_handle), value);
-  // Cells are always rescanned, so no write barrier here.
+  __ mov(cell_operand, value);
 }
 
 
@@ -2150,7 +2013,7 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2161,54 +2024,18 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ mov(result, ContextOperand(context, instr->slot_index()));
-
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ cmp(result, factory()->the_hole_value());
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
-    } else {
-      Label is_not_hole;
-      __ j(not_equal, &is_not_hole, Label::kNear);
-      __ mov(result, factory()->undefined_value());
-      __ bind(&is_not_hole);
-    }
-  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-
-  Label skip_assignment;
-
-  Operand target = ContextOperand(context, instr->slot_index());
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ cmp(target, factory()->the_hole_value());
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
-    } else {
-      __ j(not_equal, &skip_assignment, Label::kNear);
-    }
-  }
-
-  __ mov(target, value);
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  __ mov(ContextOperand(context, instr->slot_index()), value);
+  if (instr->needs_write_barrier()) {
     Register temp = ToRegister(instr->TempAt(0));
     int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWriteContextSlot(context,
-                              offset,
-                              value,
-                              temp,
-                              kSaveFPRegs,
-                              EMIT_REMEMBERED_SET,
-                              check_needed);
+    __ RecordWrite(context, offset, value, temp);
   }
-
-  __ bind(&skip_assignment);
 }
 
 
@@ -2228,9 +2055,9 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsFound() &&
+  ASSERT(lookup.IsProperty() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
   if (lookup.type() == FIELD) {
     int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2246,24 +2073,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    __ LoadHeapObject(result, function);
-  }
-}
-
-
-void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
-  ASSERT(!operand->IsDoubleRegister());
-  if (operand->IsConstantOperand()) {
-    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
-    if (object->IsSmi()) {
-      __ Push(Handle<Smi>::cast(object));
-    } else {
-      __ PushHeapObject(Handle<HeapObject>::cast(object));
-    }
-  } else if (operand->IsRegister()) {
-    __ push(ToRegister(operand));
-  } else {
-    __ push(ToOperand(operand));
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2441,14 +2251,16 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result = ToDoubleRegister(instr->result());
 
-  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-      sizeof(kHoleNanLower32);
-  Operand hole_check_operand = BuildFastArrayOperand(
-      instr->elements(), instr->key(),
-      FAST_DOUBLE_ELEMENTS,
-      offset);
-  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-  DeoptimizeIf(equal, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+        sizeof(kHoleNanLower32);
+    Operand hole_check_operand = BuildFastArrayOperand(
+        instr->elements(), instr->key(),
+        FAST_DOUBLE_ELEMENTS,
+        offset);
+    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+    DeoptimizeIf(equal, instr->environment());
+  }
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2518,7 +2330,6 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -2587,10 +2398,15 @@
 }
 
 
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   Register receiver = ToRegister(instr->receiver());
   Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
   Register scratch = ToRegister(instr->TempAt(0));
+  ASSERT(receiver.is(eax));  // Used for parameter count.
+  ASSERT(function.is(edi));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(eax));
 
   // If the receiver is null or undefined, we have to pass the global
   // object as a receiver to normal functions. Values have to be
@@ -2632,17 +2448,6 @@
   __ mov(receiver,
          FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
-  ASSERT(receiver.is(eax));  // Used for parameter count.
-  ASSERT(function.is(edi));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(eax));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
@@ -2679,13 +2484,17 @@
 
 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   LOperand* argument = instr->InputAt(0);
-  EmitPushTaggedOperand(argument);
+  if (argument->IsConstantOperand()) {
+    __ push(ToImmediate(argument));
+  } else {
+    __ push(ToOperand(argument));
+  }
 }
 
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
+  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
@@ -2703,15 +2512,6 @@
 }
 
 
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  ASSERT(ToRegister(instr->InputAt(0)).is(esi));
-  __ push(esi);  // The context is the first argument.
-  __ push(Immediate(instr->hydrogen()->pairs()));
-  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
-  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
@@ -2730,53 +2530,41 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
-      function->shared()->formal_parameter_count() == arity;
+  // Change context if needed.
+  bool change_context =
+      (info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+  } else {
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  }
+
+  // Set eax to arguments count if adaption is not needed. Assumes that eax
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ mov(eax, arity);
+  }
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (can_invoke_directly) {
-    __ LoadHeapObject(edi, function);
-
-    // Change context if needed.
-    bool change_context =
-        (info()->closure()->context() != function->context()) ||
-        scope()->contains_with() ||
-        (scope()->num_heap_slots() > 0);
-
-    if (change_context) {
-      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-    } else {
-      __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    }
-
-    // Set eax to arguments count if adaption is not needed. Assumes that eax
-    // is available to write to at this point.
-    if (!function->NeedsArgumentsAdaption()) {
-      __ mov(eax, arity);
-    }
-
-    // Invoke function directly.
-    __ SetCallKind(ecx, call_kind);
-    if (*function == *info()->closure()) {
-      __ CallSelf();
-    } else {
-      __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
-    }
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  // Invoke function.
+  __ SetCallKind(ecx, call_kind);
+  if (*function == *info()->closure()) {
+    __ CallSelf();
   } else {
-    // We need to adapt arguments.
-    SafepointGenerator generator(
-        this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
-    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
   }
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2859,7 +2647,6 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2991,146 +2778,72 @@
 }
 
 
-void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->value());
-  Register scratch = ToRegister(instr->temp());
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-
-  // Note that according to ECMA-262 15.8.2.13:
-  // Math.pow(-Infinity, 0.5) == Infinity
-  // Math.sqrt(-Infinity) == NaN
-  Label done, sqrt;
-  // Check base for -Infinity.  According to IEEE-754, single-precision
-  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
-  __ mov(scratch, 0xFF800000);
-  __ movd(xmm_scratch, scratch);
-  __ cvtss2sd(xmm_scratch, xmm_scratch);
-  __ ucomisd(input_reg, xmm_scratch);
-  // Comparing -Infinity with NaN results in "unordered", which sets the
-  // zero flag as if both were equal.  However, it also sets the carry flag.
-  __ j(not_equal, &sqrt, Label::kNear);
-  __ j(carry, &sqrt, Label::kNear);
-  // If input is -Infinity, return Infinity.
-  __ xorps(input_reg, input_reg);
-  __ subsd(input_reg, xmm_scratch);
-  __ jmp(&done, Label::kNear);
-
-  // Square root.
-  __ bind(&sqrt);
   __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
-  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
+  LOperand* left = instr->InputAt(0);
+  LOperand* right = instr->InputAt(1);
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  // Having marked this as a call, we can use any registers.
-  // Just make sure that the input/output registers are the expected ones.
-  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
-         ToDoubleRegister(instr->InputAt(1)).is(xmm1));
-  ASSERT(!instr->InputAt(1)->IsRegister() ||
-         ToRegister(instr->InputAt(1)).is(eax));
-  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
 
-  if (exponent_type.IsTagged()) {
-    Label no_deopt;
-    __ JumpIfSmi(eax, &no_deopt);
-    __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
-    DeoptimizeIf(not_equal, instr->environment());
-    __ bind(&no_deopt);
-    MathPowStub stub(MathPowStub::TAGGED);
-    __ CallStub(&stub);
+  if (exponent_type.IsDouble()) {
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
+    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+                     4);
   } else if (exponent_type.IsInteger32()) {
-    MathPowStub stub(MathPowStub::INTEGER);
-    __ CallStub(&stub);
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    ASSERT(!ToRegister(right).is(ebx));
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
+    __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
+                     4);
   } else {
-    ASSERT(exponent_type.IsDouble());
-    MathPowStub stub(MathPowStub::DOUBLE);
-    __ CallStub(&stub);
+    ASSERT(exponent_type.IsTagged());
+    CpuFeatures::Scope scope(SSE2);
+    Register right_reg = ToRegister(right);
+
+    Label non_smi, call;
+    __ JumpIfNotSmi(right_reg, &non_smi);
+    __ SmiUntag(right_reg);
+    __ cvtsi2sd(result_reg, Operand(right_reg));
+    __ jmp(&call);
+
+    __ bind(&non_smi);
+    // It is safe to use ebx directly since the instruction is marked
+    // as a call.
+    ASSERT(!right_reg.is(ebx));
+    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
+    DeoptimizeIf(not_equal, instr->environment());
+    __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
+
+    __ bind(&call);
+    __ PrepareCallCFunction(4, ebx);
+    __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
+    __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
+    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+                     4);
   }
-}
 
-
-void LCodeGen::DoRandom(LRandom* instr) {
-  class DeferredDoRandom: public LDeferredCode {
-   public:
-    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LRandom* instr_;
-  };
-
-  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
-
-  // Having marked this instruction as a call we can use any
-  // registers.
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  ASSERT(ToRegister(instr->InputAt(0)).is(eax));
-  // Assert that the register size is indeed the size of each seed.
-  static const int kSeedSize = sizeof(uint32_t);
-  STATIC_ASSERT(kPointerSize == kSeedSize);
-
-  __ mov(eax, FieldOperand(eax, GlobalObject::kGlobalContextOffset));
-  static const int kRandomSeedOffset =
-      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
-  __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
-  // ebx: FixedArray of the global context's random seeds
-
-  // Load state[0].
-  __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
-  // If state[0] == 0, call runtime to initialize seeds.
-  __ test(ecx, ecx);
-  __ j(zero, deferred->entry());
-  // Load state[1].
-  __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
-  // ecx: state[0]
-  // eax: state[1]
-
-  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
-  __ movzx_w(edx, ecx);
-  __ imul(edx, edx, 18273);
-  __ shr(ecx, 16);
-  __ add(ecx, edx);
-  // Save state[0].
-  __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
-
-  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
-  __ movzx_w(edx, eax);
-  __ imul(edx, edx, 36969);
-  __ shr(eax, 16);
-  __ add(eax, edx);
-  // Save state[1].
-  __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
-
-  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
-  __ shl(ecx, 14);
-  __ and_(eax, Immediate(0x3FFFF));
-  __ add(eax, ecx);
-
-  __ bind(deferred->exit());
-  // Convert 32 random bits in eax to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-  __ movd(xmm2, ebx);
-  __ movd(xmm1, eax);
-  __ cvtss2sd(xmm2, xmm2);
-  __ xorps(xmm1, xmm2);
-  __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
-  __ PrepareCallCFunction(1, ebx);
-  __ mov(Operand(esp, 0), eax);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-  // Return value is in eax.
+  // Return value is in st(0) on ia32.
+  // Store it into the (fixed) result register.
+  __ sub(Operand(esp), Immediate(kDoubleSize));
+  __ fstp_d(Operand(esp, 0));
+  __ movdbl(result_reg, Operand(esp, 0));
+  __ add(Operand(esp), Immediate(kDoubleSize));
 }
 
 
@@ -3165,14 +2878,6 @@
 }
 
 
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -3203,15 +2908,15 @@
     case kMathSqrt:
       DoMathSqrt(instr);
       break;
+    case kMathPowHalf:
+      DoMathPowHalf(instr);
+      break;
     case kMathCos:
       DoMathCos(instr);
       break;
     case kMathSin:
       DoMathSin(instr);
       break;
-    case kMathTan:
-      DoMathTan(instr);
-      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -3263,12 +2968,12 @@
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->function()).is(edi));
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ Drop(1);
 }
 
 
@@ -3287,6 +2992,7 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(eax));
+  __ mov(edi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3296,9 +3002,9 @@
   ASSERT(ToRegister(instr->constructor()).is(edi));
   ASSERT(ToRegister(instr->result()).is(eax));
 
-  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
   __ Set(eax, Immediate(instr->arity()));
-  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
@@ -3317,36 +3023,21 @@
   }
 
   // Do the store.
-  HType type = instr->hydrogen()->value()->type();
-  SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ mov(FieldOperand(object, offset), value);
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
+    if (instr->needs_write_barrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWriteField(object,
-                          offset,
-                          value,
-                          temp,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
+      __ RecordWrite(object, offset, value, temp);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
     __ mov(FieldOperand(temp, offset), value);
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
+    if (instr->needs_write_barrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWriteField(temp,
-                          offset,
-                          value,
-                          object,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
+      __ RecordWrite(temp, offset, value, object);
     }
   }
 }
@@ -3358,7 +3049,7 @@
   ASSERT(ToRegister(instr->value()).is(eax));
 
   __ mov(ecx, instr->name());
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3368,7 +3059,7 @@
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   if (instr->index()->IsConstantOperand()) {
     __ cmp(ToOperand(instr->length()),
-           Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+           ToImmediate(LConstantOperand::cast(instr->index())));
     DeoptimizeIf(below_equal, instr->environment());
   } else {
     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
@@ -3405,7 +3096,6 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -3438,21 +3128,13 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key,
            FieldOperand(elements,
                         key,
                         times_pointer_size,
                         FixedArray::kHeaderSize));
-    __ RecordWrite(elements,
-                   key,
-                   value,
-                   kSaveFPRegs,
-                   EMIT_REMEMBERED_SET,
-                   check_needed);
+    __ RecordWrite(elements, key, value);
   }
 }
 
@@ -3483,75 +3165,99 @@
   ASSERT(ToRegister(instr->key()).is(ecx));
   ASSERT(ToRegister(instr->value()).is(eax));
 
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
-  Register object_reg = ToRegister(instr->object());
-  Register new_map_reg = ToRegister(instr->new_map_reg());
-
-  Handle<Map> from_map = instr->original_map();
-  Handle<Map> to_map = instr->transitioned_map();
-  ElementsKind from_kind = from_map->elements_kind();
-  ElementsKind to_kind = to_map->elements_kind();
-
-  Label not_applicable;
-  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
-  __ j(not_equal, &not_applicable);
-  __ mov(new_map_reg, to_map);
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    Register object_reg = ToRegister(instr->object());
-    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
-    // Write barrier.
-    ASSERT_NE(instr->temp_reg(), NULL);
-    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
-                        ToRegister(instr->temp_reg()), kDontSaveFPRegs);
-  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      to_kind == FAST_DOUBLE_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(edx));
-    ASSERT(new_map_reg.is(ebx));
-    __ mov(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
-             RelocInfo::CODE_TARGET, instr);
-  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(edx));
-    ASSERT(new_map_reg.is(ebx));
-    __ mov(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
-             RelocInfo::CODE_TARGET, instr);
-  } else {
-    UNREACHABLE();
-  }
-  __ bind(&not_applicable);
-}
-
-
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   class DeferredStringCharCodeAt: public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
+  Register string = ToRegister(instr->string());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  StringCharLoadGenerator::Generate(masm(),
-                                    factory(),
-                                    ToRegister(instr->string()),
-                                    ToRegister(instr->index()),
-                                    ToRegister(instr->result()),
-                                    deferred->entry());
+  // Fetch the instance type of the receiver into result register.
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ test(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ test(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ SmiUntag(result);
+  __ add(index, Operand(result));
+  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle conses.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+         Immediate(factory()->empty_string()));
+  __ j(not_equal, deferred->entry());
+  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test(result, Immediate(kStringRepresentationMask));
+  __ j(not_zero, deferred->entry());
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ test(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string, Label::kNear);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  Label done;
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movzx_w(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ movzx_b(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&done);
   __ bind(deferred->exit());
 }
 
@@ -3594,7 +3300,6 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3644,8 +3349,16 @@
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  EmitPushTaggedOperand(instr->left());
-  EmitPushTaggedOperand(instr->right());
+  if (instr->left()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->left()));
+  } else {
+    __ push(ToOperand(instr->left()));
+  }
+  if (instr->right()->IsConstantOperand()) {
+    __ push(ToImmediate(instr->right()));
+  } else {
+    __ push(ToOperand(instr->right()));
+  }
   StringAddStub stub(NO_STRING_CHECK_IN_STUB);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 }
@@ -3666,7 +3379,6 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -3734,7 +3446,6 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3795,10 +3506,8 @@
 
 
 void LCodeGen::EmitNumberUntagD(Register input_reg,
-                                Register temp_reg,
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
-                                bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Label load_smi, done;
 
@@ -3827,15 +3536,6 @@
   }
   // Heap number to XMM conversion.
   __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  if (deoptimize_on_minus_zero) {
-    XMMRegister xmm_scratch = xmm0;
-    __ xorps(xmm_scratch, xmm_scratch);
-    __ ucomisd(result_reg, xmm_scratch);
-    __ j(not_zero, &done, Label::kNear);
-    __ movmskpd(temp_reg, result_reg);
-    __ test_b(temp_reg, 1);
-    DeoptimizeIf(not_zero, env);
-  }
   __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
@@ -3847,6 +3547,16 @@
 }
 
 
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3879,7 +3589,8 @@
       __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
       __ j(less, &convert, Label::kNear);
       // Pop FPU stack before deoptimizing.
-      __ fstp(0);
+      __ ffree(0);
+      __ fincstp();
       DeoptimizeIf(no_condition, instr->environment());
 
       // Reserve space for 64 bit answer.
@@ -3927,16 +3638,6 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI: public LDeferredCode {
-   public:
-    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LTaggedToI* instr_;
-  };
-
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3958,23 +3659,14 @@
 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
-  LOperand* temp = instr->TempAt(0);
-  ASSERT(temp == NULL || temp->IsRegister());
   LOperand* result = instr->result();
   ASSERT(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
   XMMRegister result_reg = ToDoubleRegister(result);
 
-  bool deoptimize_on_minus_zero =
-      instr->hydrogen()->deoptimize_on_minus_zero();
-  Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
-  EmitNumberUntagD(input_reg,
-                   temp_reg,
-                   result_reg,
+  EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
-                   deoptimize_on_minus_zero,
                    instr->environment());
 }
 
@@ -4148,7 +3840,7 @@
     } else {
       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
       __ and_(temp, mask);
-      __ cmp(temp, tag);
+      __ cmpb(Operand(temp), tag);
       DeoptimizeIf(not_equal, instr->environment());
     }
   }
@@ -4156,37 +3848,20 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  Handle<JSFunction> target = instr->hydrogen()->target();
-  if (isolate()->heap()->InNewSpace(*target)) {
-    Register reg = ToRegister(instr->value());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(target);
-    __ cmp(reg, Operand::Cell(cell));
-  } else {
-    Operand operand = ToOperand(instr->value());
-    __ cmp(operand, target);
-  }
+  ASSERT(instr->InputAt(0)->IsRegister());
+  Operand operand = ToOperand(instr->InputAt(0));
+  __ cmp(operand, instr->hydrogen()->target());
   DeoptimizeIf(not_equal, instr->environment());
 }
 
 
-void LCodeGen::DoCheckMapCommon(Register reg,
-                                Handle<Map> map,
-                                CompareMapMode mode,
-                                LEnvironment* env) {
-  Label success;
-  __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, env);
-  __ bind(&success);
-}
-
-
 void LCodeGen::DoCheckMap(LCheckMap* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         instr->hydrogen()->map());
+  DeoptimizeIf(not_equal, instr->environment());
 }
 
 
@@ -4238,6 +3913,17 @@
 }
 
 
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(object);
+    __ mov(result, Operand::Cell(cell));
+  } else {
+    __ mov(result, object);
+  }
+}
+
+
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register reg = ToRegister(instr->TempAt(0));
 
@@ -4245,141 +3931,33 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  __ LoadHeapObject(reg, current_prototype);
+  LoadHeapObject(reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
-    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-
+    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Handle<Map>(current_prototype->map()));
+    DeoptimizeIf(not_equal, instr->environment());
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    __ LoadHeapObject(reg, current_prototype);
+    LoadHeapObject(reg, current_prototype);
   }
 
   // Check the holder map.
-  DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
-  class DeferredAllocateObject: public LDeferredCode {
-   public:
-    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LAllocateObject* instr_;
-  };
-
-  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
-
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->TempAt(0));
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-  Handle<Map> initial_map(constructor->initial_map());
-  int instance_size = initial_map->instance_size();
-  ASSERT(initial_map->pre_allocated_property_fields() +
-         initial_map->unused_property_fields() -
-         initial_map->inobject_properties() == 0);
-
-  // Allocate memory for the object.  The initial map might change when
-  // the constructor's prototype changes, but instance size and property
-  // counts remain unchanged (if slack tracking finished).
-  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
-  __ AllocateInNewSpace(instance_size,
-                        result,
-                        no_reg,
-                        scratch,
-                        deferred->entry(),
-                        TAG_OBJECT);
-
-  // Load the initial map.
-  Register map = scratch;
-  __ LoadHeapObject(scratch, constructor);
-  __ mov(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(map);
-    __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
-            instance_size >> kPointerSizeLog2);
-    __ Assert(equal, "Unexpected instance size");
-    __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
-            initial_map->pre_allocated_property_fields());
-    __ Assert(equal, "Unexpected pre-allocated property fields count");
-    __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
-            initial_map->unused_property_fields());
-    __ Assert(equal, "Unexpected unused property fields count");
-    __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
-            initial_map->inobject_properties());
-    __ Assert(equal, "Unexpected in-object property fields count");
-  }
-
-  // Initialize map and fields of the newly allocated object.
-  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
-  __ mov(FieldOperand(result, JSObject::kMapOffset), map);
-  __ mov(scratch, factory()->empty_fixed_array());
-  __ mov(FieldOperand(result, JSObject::kElementsOffset), scratch);
-  __ mov(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
-  if (initial_map->inobject_properties() != 0) {
-    __ mov(scratch, factory()->undefined_value());
-    for (int i = 0; i < initial_map->inobject_properties(); i++) {
-      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
-      __ mov(FieldOperand(result, property_offset), scratch);
-    }
-  }
-
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
-  Register result = ToRegister(instr->result());
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ Set(result, Immediate(0));
-
-  PushSafepointRegistersScope scope(this);
-  __ PushHeapObject(constructor);
-  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr, instr->context());
-  __ StoreToSafepointRegisterSlot(result, eax);
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Handle<Map>(current_prototype->map()));
+  DeoptimizeIf(not_equal, instr->environment());
 }
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
-  Heap* heap = isolate()->heap();
-  ElementsKind boilerplate_elements_kind =
-      instr->hydrogen()->boilerplate_elements_kind();
-
-  // Deopt if the array literal boilerplate ElementsKind is of a type different
-  // than the expected one. The check isn't necessary if the boilerplate has
-  // already been converted to FAST_ELEMENTS.
-  if (boilerplate_elements_kind != FAST_ELEMENTS) {
-    __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
-    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-    // Load the map's "bit field 2". We only need the first byte,
-    // but the following masking takes care of that anyway.
-    __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ and_(ebx, Map::kElementsKindMask);
-    __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
-    DeoptimizeIf(not_equal, instr->environment());
-  }
-
-  // Set up the parameters to the stub/runtime call.
+  // Setup the parameters to the stub/runtime call.
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  // Boilerplate already exists, constant elements are never accessed.
-  // Pass an empty fixed array.
-  __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
+  __ push(Immediate(instr->hydrogen()->constant_elements()));
 
   // Pick the right runtime function or stub to call.
   int length = instr->hydrogen()->length();
@@ -4395,150 +3973,20 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
-            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
-                            Register result,
-                            Register source,
-                            int* offset) {
-  ASSERT(!source.is(ecx));
-  ASSERT(!result.is(ecx));
-
-  if (FLAG_debug_code) {
-    __ LoadHeapObject(ecx, object);
-    __ cmp(source, ecx);
-    __ Assert(equal, "Unexpected object literal boilerplate");
-  }
-
-  // Only elements backing stores for non-COW arrays need to be copied.
-  Handle<FixedArrayBase> elements(object->elements());
-  bool has_elements = elements->length() > 0 &&
-      elements->map() != isolate()->heap()->fixed_cow_array_map();
-
-  // Increase the offset so that subsequent objects end up right after
-  // this object and its backing store.
-  int object_offset = *offset;
-  int object_size = object->map()->instance_size();
-  int elements_offset = *offset + object_size;
-  int elements_size = has_elements ? elements->Size() : 0;
-  *offset += object_size + elements_size;
-
-  // Copy object header.
-  ASSERT(object->properties()->length() == 0);
-  int inobject_properties = object->map()->inobject_properties();
-  int header_size = object_size - inobject_properties * kPointerSize;
-  for (int i = 0; i < header_size; i += kPointerSize) {
-    if (has_elements && i == JSObject::kElementsOffset) {
-      __ lea(ecx, Operand(result, elements_offset));
-    } else {
-      __ mov(ecx, FieldOperand(source, i));
-    }
-    __ mov(FieldOperand(result, object_offset + i), ecx);
-  }
-
-  // Copy in-object properties.
-  for (int i = 0; i < inobject_properties; i++) {
-    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
-    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
-    if (value->IsJSObject()) {
-      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-      __ lea(ecx, Operand(result, *offset));
-      __ mov(FieldOperand(result, total_offset), ecx);
-      __ LoadHeapObject(source, value_object);
-      EmitDeepCopy(value_object, result, source, offset);
-    } else if (value->IsHeapObject()) {
-      __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
-      __ mov(FieldOperand(result, total_offset), ecx);
-    } else {
-      __ mov(FieldOperand(result, total_offset), Immediate(value));
-    }
-  }
-
-  if (has_elements) {
-    // Copy elements backing store header.
-    __ LoadHeapObject(source, elements);
-    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
-      __ mov(ecx, FieldOperand(source, i));
-      __ mov(FieldOperand(result, elements_offset + i), ecx);
-    }
-
-    // Copy elements backing store content.
-    int elements_length = elements->length();
-    if (elements->IsFixedDoubleArray()) {
-      Handle<FixedDoubleArray> double_array =
-          Handle<FixedDoubleArray>::cast(elements);
-      for (int i = 0; i < elements_length; i++) {
-        int64_t value = double_array->get_representation(i);
-        int32_t value_low = value & 0xFFFFFFFF;
-        int32_t value_high = value >> 32;
-        int total_offset =
-            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
-        __ mov(FieldOperand(result, total_offset), Immediate(value_low));
-        __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
-      }
-    } else if (elements->IsFixedArray()) {
-      for (int i = 0; i < elements_length; i++) {
-        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
-        if (value->IsJSObject()) {
-          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-          __ lea(ecx, Operand(result, *offset));
-          __ mov(FieldOperand(result, total_offset), ecx);
-          __ LoadHeapObject(source, value_object);
-          EmitDeepCopy(value_object, result, source, offset);
-        } else if (value->IsHeapObject()) {
-          __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
-          __ mov(FieldOperand(result, total_offset), ecx);
-        } else {
-          __ mov(FieldOperand(result, total_offset), Immediate(value));
-        }
-      }
-    } else {
-      UNREACHABLE();
-    }
-  }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  int size = instr->hydrogen()->total_size();
-
-  // Allocate all objects that are part of the literal in one big
-  // allocation. This avoids multiple limit checks.
-  Label allocated, runtime_allocate;
-  __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
-  __ jmp(&allocated);
-
-  __ bind(&runtime_allocate);
-  __ push(Immediate(Smi::FromInt(size)));
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
-  __ bind(&allocated);
-  int offset = 0;
-  __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
-  EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
-  ASSERT_EQ(size, offset);
-}
-
-
 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
-  Handle<FixedArray> literals(instr->environment()->closure()->literals());
-  Handle<FixedArray> constant_properties =
-      instr->hydrogen()->constant_properties();
-
-  // Set up the parameters to the stub/runtime call.
-  __ PushHeapObject(literals);
+  // Setup the parameters to the stub/runtime call.
+  __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ push(Immediate(constant_properties));
+  __ push(Immediate(instr->hydrogen()->constant_properties()));
   int flags = instr->hydrogen()->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -4547,16 +3995,11 @@
       : ObjectLiteral::kNoFlags;
   __ push(Immediate(Smi::FromInt(flags)));
 
-  // Pick the right runtime function or stub to call.
-  int properties_count = constant_properties->length() / 2;
+  // Pick the right runtime function to call.
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   }
 }
 
@@ -4629,11 +4072,12 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(shared_info->language_mode());
+    FastNewClosureStub stub(
+        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ push(Immediate(shared_info));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
-    __ push(esi);
+    __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(Immediate(shared_info));
     __ push(Immediate(pretenure
                       ? factory()->true_value()
@@ -4645,7 +4089,11 @@
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
   LOperand* input = instr->InputAt(1);
-  EmitPushTaggedOperand(input);
+  if (input->IsConstantOperand()) {
+    __ push(ToImmediate(input));
+  } else {
+    __ push(ToOperand(input));
+  }
   CallRuntime(Runtime::kTypeof, 1, instr);
 }
 
@@ -4657,11 +4105,12 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition final_branch_condition =
-      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
-  if (final_branch_condition != no_condition) {
-    EmitBranch(true_block, false_block, final_branch_condition);
-  }
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal());
+
+  EmitBranch(true_block, false_block, final_branch_condition);
 }
 
 
@@ -4705,12 +4154,10 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
-    __ j(equal, true_label);
-    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
-    final_branch_condition = equal;
+    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+    final_branch_condition = above_equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4728,8 +4175,11 @@
     final_branch_condition = zero;
 
   } else {
+    final_branch_condition = not_equal;
     __ jmp(false_label);
+    // A dead branch instruction will be generated after this point.
   }
+
   return final_branch_condition;
 }
 
@@ -4769,7 +4219,9 @@
   int patch_size = Deoptimizer::patch_size();
   if (current_pc < last_lazy_deopt_pc_ + patch_size) {
     int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-    __ Nop(padding_size);
+    while (padding_size-- > 0) {
+      __ nop();
+    }
   }
   last_lazy_deopt_pc_ = masm()->pc_offset();
 }
@@ -4793,7 +4245,11 @@
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
   __ push(ToOperand(obj));
-  EmitPushTaggedOperand(key);
+  if (key->IsConstantOperand()) {
+    __ push(ToImmediate(key));
+  } else {
+    __ push(ToOperand(key));
+  }
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
@@ -4825,7 +4281,6 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
@@ -4890,8 +4345,16 @@
 void LCodeGen::DoIn(LIn* instr) {
   LOperand* obj = instr->object();
   LOperand* key = instr->key();
-  EmitPushTaggedOperand(key);
-  EmitPushTaggedOperand(obj);
+  if (key->IsConstantOperand()) {
+    __ push(ToImmediate(key));
+  } else {
+    __ push(ToOperand(key));
+  }
+  if (obj->IsConstantOperand()) {
+    __ push(ToImmediate(obj));
+  } else {
+    __ push(ToOperand(obj));
+  }
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
@@ -4901,84 +4364,6 @@
 }
 
 
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  DeoptimizeIf(equal, instr->environment());
-
-  __ cmp(eax, isolate()->factory()->null_value());
-  DeoptimizeIf(equal, instr->environment());
-
-  __ test(eax, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr->environment());
-
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
-  DeoptimizeIf(below_equal, instr->environment());
-
-  Label use_cache, call_runtime;
-  __ CheckEnumCache(&call_runtime);
-
-  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  __ jmp(&use_cache, Label::kNear);
-
-  // Get the set of properties to enumerate.
-  __ bind(&call_runtime);
-  __ push(eax);
-  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         isolate()->factory()->meta_map());
-  DeoptimizeIf(not_equal, instr->environment());
-  __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
-  Register map = ToRegister(instr->map());
-  Register result = ToRegister(instr->result());
-  __ LoadInstanceDescriptors(map, result);
-  __ mov(result,
-         FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
-  __ mov(result,
-         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
-  __ test(result, result);
-  DeoptimizeIf(equal, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
-  Register object = ToRegister(instr->value());
-  __ cmp(ToRegister(instr->map()),
-         FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  Register object = ToRegister(instr->object());
-  Register index = ToRegister(instr->index());
-
-  Label out_of_object, done;
-  __ cmp(index, Immediate(0));
-  __ j(less, &out_of_object);
-  __ mov(object, FieldOperand(object,
-                              index,
-                              times_half_pointer_size,
-                              JSObject::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  __ bind(&out_of_object);
-  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
-  __ neg(index);
-  // Index is now equal to out of object property index plus 1.
-  __ mov(object, FieldOperand(object,
-                              index,
-                              times_half_pointer_size,
-                              FixedArray::kHeaderSize - kPointerSize));
-  __ bind(&done);
-}
-
-
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 52befc6..d955450 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -77,13 +77,7 @@
   Operand ToOperand(LOperand* op) const;
   Register ToRegister(LOperand* op) const;
   XMMRegister ToDoubleRegister(LOperand* op) const;
-
-  bool IsInteger32(LConstantOperand* op) const;
-  Immediate ToInteger32Immediate(LOperand* op) const {
-    return Immediate(ToInteger32(LConstantOperand::cast(op)));
-  }
-
-  Handle<Object> ToHandle(LConstantOperand* op) const;
+  Immediate ToImmediate(LOperand* op);
 
   // The operand denoting the second word (the one with a higher address) of
   // a double stack slot.
@@ -104,16 +98,11 @@
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
   void DoDeferredStackCheck(LStackCheck* instr);
-  void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
-  void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
-
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
   void DoGap(LGap* instr);
@@ -141,8 +130,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  StrictModeFlag strict_mode_flag() const {
-    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+  int strict_mode_flag() const {
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -213,6 +202,8 @@
                          LInstruction* instr,
                          CallKind call_kind);
 
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
+
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
 
@@ -231,8 +222,6 @@
   Register ToRegister(int index) const;
   XMMRegister ToDoubleRegister(int index) const;
   int ToInteger32(LConstantOperand* op) const;
-
-  double ToDouble(LConstantOperand* op) const;
   Operand BuildFastArrayOperand(LOperand* elements_pointer,
                                 LOperand* key,
                                 ElementsKind elements_kind,
@@ -244,8 +233,8 @@
   void DoMathFloor(LUnaryMathOperation* instr);
   void DoMathRound(LUnaryMathOperation* instr);
   void DoMathSqrt(LUnaryMathOperation* instr);
+  void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
-  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -264,20 +253,17 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
-                        Register temp,
                         XMMRegister result,
                         bool deoptimize_on_undefined,
-                        bool deoptimize_on_minus_zero,
                         LEnvironment* env);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label,
-                         Label* false_label,
-                         Register input,
-                         Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -287,13 +273,6 @@
                          Label* is_not_object,
                          Label* is_object);
 
-  // Emits optimized code for %_IsString(x).  Preserves input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitIsString(Register input,
-                         Register temp1,
-                         Label* is_not_string);
-
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
@@ -302,20 +281,8 @@
                                        Register object,
                                        Handle<Map> type,
                                        Handle<String> name);
-
-  // Emits optimized code to deep-copy the contents of statically known
-  // object graphs (e.g. object literal boilerplate).
-  void EmitDeepCopy(Handle<JSObject> object,
-                    Register result,
-                    Register source,
-                    int* offset);
-
   void EnsureSpaceForLazyDeopt();
 
-  // Emits code for pushing either a tagged constant, a (non-double)
-  // register, or a stack slot operand.
-  void EmitPushTaggedOperand(LOperand* operand);
-
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -371,20 +338,16 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen),
-        external_exit_(NULL),
-        instruction_index_(codegen->current_instruction_) {
+      : codegen_(codegen), external_exit_(NULL) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
-  virtual LInstruction* instr() = 0;
 
-  void SetExit(Label* exit) { external_exit_ = exit; }
+  void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -395,7 +358,6 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
-  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 510d9f1..fcf1f91 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -303,24 +303,14 @@
     }
 
   } else if (source->IsConstantOperand()) {
-    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    Immediate src = cgen_->ToImmediate(source);
     if (destination->IsRegister()) {
       Register dst = cgen_->ToRegister(destination);
-      if (cgen_->IsInteger32(constant_source)) {
-        __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
-      } else {
-        __ LoadObject(dst, cgen_->ToHandle(constant_source));
-      }
+      __ Set(dst, src);
     } else {
-      ASSERT(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
-      if (cgen_->IsInteger32(constant_source)) {
-        __ Set(dst, cgen_->ToInteger32Immediate(constant_source));
-      } else {
-        Register tmp = EnsureTempRegister();
-        __ LoadObject(tmp, cgen_->ToHandle(constant_source));
-        __ mov(dst, tmp);
-      }
+      __ Set(dst, src);
     }
 
   } else if (source->IsDoubleRegister()) {
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 2bfbb67..3dc220d 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -110,17 +110,22 @@
 }
 
 
-void LInstruction::PrintDataTo(StringStream* stream) {
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
   stream->Add("= ");
-  for (int i = 0; i < InputCount(); i++) {
+  for (int i = 0; i < inputs_.length(); i++) {
     if (i > 0) stream->Add(" ");
-    InputAt(i)->PrintTo(stream);
+    inputs_[i]->PrintTo(stream);
   }
 }
 
 
-void LInstruction::PrintOutputOperandTo(StringStream* stream) {
-  if (HasResult()) result()->PrintTo(stream);
+template<int R, int I, int T>
+void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
+  for (int i = 0; i < results_.length(); i++) {
+    if (i > 0) stream->Add(" ");
+    results_[i]->PrintTo(stream);
+  }
 }
 
 
@@ -209,11 +214,10 @@
 }
 
 
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(kind() == kStrictEquality ? " === " : " == ");
-  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  stream->Add(is_strict() ? " === null" : " == null");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -225,13 +229,6 @@
 }
 
 
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_string(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -246,14 +243,6 @@
 }
 
 
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if string_compare(");
-  InputAt(1)->PrintTo(stream);
-  InputAt(2)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -298,12 +287,6 @@
 }
 
 
-void LMathPowHalf::PrintDataTo(StringStream* stream) {
-  stream->Add("/pow_half ");
-  InputAt(0)->PrintTo(stream);
-}
-
-
 void LLoadContextSlot::PrintDataTo(StringStream* stream) {
   InputAt(0)->PrintTo(stream);
   stream->Add("[%d]", slot_index());
@@ -384,7 +367,7 @@
 
 
 void LChunk::MarkEmptyBlocks() {
-  HPhase phase("L_Mark empty blocks", this);
+  HPhase phase("Mark empty blocks", this);
   for (int i = 0; i < graph()->blocks()->length(); ++i) {
     HBasicBlock* block = graph()->blocks()->at(i);
     int first = block->first_instruction_index();
@@ -464,14 +447,8 @@
 }
 
 
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -546,8 +523,8 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new(zone()) LChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
+  chunk_ = new LChunk(info(), graph());
+  HPhase phase("Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -576,15 +553,20 @@
 }
 
 
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
-                                  Register::ToAllocationIndex(reg));
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                                  XMMRegister::ToAllocationIndex(reg));
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          XMMRegister::ToAllocationIndex(reg));
 }
 
 
@@ -599,30 +581,30 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                                      LUnallocated::USED_AT_START));
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+  return Use(value, new LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
-                                             LUnallocated::USED_AT_START));
+  return Use(value, new LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
 }
 
 
@@ -657,7 +639,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+      :  Use(value, new LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -666,7 +648,7 @@
     HInstruction* instr = HInstruction::cast(value);
     VisitInstruction(instr);
   }
-  operand->set_virtual_register(value->id());
+  allocator_->RecordUse(value, operand);
   return operand;
 }
 
@@ -674,17 +656,22 @@
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
                                     LUnallocated* result) {
-  result->set_virtual_register(current_instruction_->id());
+  allocator_->RecordDefinition(current_instruction_, result);
   instr->set_result(result);
   return instr;
 }
 
 
 template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
@@ -692,16 +679,14 @@
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr,
     int index) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -722,9 +707,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator));
+  instr->set_environment(CreateEnvironment(hydrogen_env));
   return instr;
 }
 
@@ -754,7 +737,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasObservableSideEffects()) {
+  if (hinstr->HasSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -766,8 +749,7 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
-      !hinstr->HasObservableSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -784,48 +766,67 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new(zone()) LPointerMap(position_));
+  instr->set_pointer_map(new LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand =
-      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  operand->set_virtual_register(allocator_->GetVirtualRegister());
-  if (!allocator_->AllocationOk()) {
-    Abort("Not enough virtual registers (temps).");
-  }
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new(zone()) LLabel(instr->block());
+  return new LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
+  return AssignEnvironment(new LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new LBitI(op, left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* context = UseFixed(instr->context(), esi);
+    LOperand* left = UseFixed(instr->left(), edx);
+    LOperand* right = UseFixed(instr->right(), eax);
+    LArithmeticT* result = new LArithmeticT(op, context, left, right);
+    return MarkAsCall(DefineFixed(result, eax), instr);
+  }
 }
 
 
@@ -838,7 +839,7 @@
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* left = UseFixed(instr->left(), edx);
     LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right);
+    LArithmeticT* result = new LArithmeticT(op, context, left, right);
     return MarkAsCall(DefineFixed(result, eax), instr);
   }
 
@@ -872,7 +873,7 @@
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -885,7 +886,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+  LArithmeticD* result = new LArithmeticD(op, left, right);
   return DefineSameAsFirst(result);
 }
 
@@ -905,7 +906,7 @@
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
   LArithmeticT* result =
-      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+      new LArithmeticT(op, context, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -993,26 +994,20 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(
-    HEnvironment* hydrogen_env,
-    int* argument_index_accumulator) {
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer =
-      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
   int ast_id = hydrogen_env->ast_id();
-  ASSERT(ast_id != AstNode::kNoNumber ||
-         hydrogen_env->frame_type() != JS_FUNCTION);
+  ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
-  LEnvironment* result =
-      new(zone()) LEnvironment(hydrogen_env->closure(),
-                               hydrogen_env->frame_type(),
-                               ast_id,
-                               hydrogen_env->parameter_count(),
-                               argument_count_,
-                               value_count,
-                               outer);
-  int argument_index = *argument_index_accumulator;
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1021,69 +1016,56 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new(zone()) LArgument(argument_index++);
+      op = new LArgument(argument_index++);
     } else {
       op = UseAny(value);
     }
     result->AddValue(op, value->representation());
   }
 
-  if (hydrogen_env->frame_type() == JS_FUNCTION) {
-    *argument_index_accumulator = argument_index;
-  }
-
   return result;
 }
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+  return new LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
 LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* value = instr->value();
-  if (value->EmitAtUses()) {
-    ASSERT(value->IsConstant());
-    ASSERT(!value->representation().IsDouble());
-    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
+  HValue* v = instr->value();
+  if (v->EmitAtUses()) {
+    ASSERT(v->IsConstant());
+    ASSERT(!v->representation().IsDouble());
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new(zone()) LGoto(successor->block_id());
+    return new LGoto(successor->block_id());
   }
-
-  // Untagged integers or doubles, smis and booleans don't require a
-  // deoptimization environment nor a temp register.
-  Representation rep = value->representation();
-  HType type = value->type();
-  if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) {
-    return new(zone()) LBranch(UseRegister(value), NULL);
-  }
-
   ToBooleanStub::Types expected = instr->expected_input_types();
   // We need a temporary register when we have to access the map *or* we have
   // no type info yet, in which case we handle all cases (including the ones
   // involving maps).
   bool needs_temp = expected.NeedsMap() || expected.IsEmpty();
   LOperand* temp = needs_temp ? TempRegister() : NULL;
-  return AssignEnvironment(new(zone()) LBranch(UseRegister(value), temp));
+  return AssignEnvironment(new LBranch(UseRegister(v), temp));
 }
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LCmpMapAndBranch(value);
+  return new LCmpMapAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
+  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new(zone()) LArgumentsElements);
+  return DefineAsRegister(new LArgumentsElements);
 }
 
 
@@ -1091,7 +1073,7 @@
   LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
   LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
   LOperand* context = UseFixed(instr->context(), esi);
-  LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
+  LInstanceOf* result = new LInstanceOf(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1099,7 +1081,7 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new(zone()) LInstanceOfKnownGlobal(
+      new LInstanceOfKnownGlobal(
           UseFixed(instr->context(), esi),
           UseFixed(instr->left(), InstanceofStub::left()),
           FixedTemp(edi));
@@ -1107,25 +1089,17 @@
 }
 
 
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
-  LOperand* receiver = UseRegister(instr->receiver());
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LOperand* temp = TempRegister();
-  LWrapReceiver* result =
-      new(zone()) LWrapReceiver(receiver, function, temp);
-  return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
 LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
   LOperand* function = UseFixed(instr->function(), edi);
   LOperand* receiver = UseFixed(instr->receiver(), eax);
   LOperand* length = UseFixed(instr->length(), ebx);
   LOperand* elements = UseFixed(instr->elements(), ecx);
-  LApplyArguments* result = new(zone()) LApplyArguments(function,
-                                                        receiver,
-                                                        length,
-                                                        elements);
+  LOperand* temp = FixedTemp(edx);
+  LApplyArguments* result = new LApplyArguments(function,
+                                                receiver,
+                                                length,
+                                                elements,
+                                                temp);
   return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1133,50 +1107,42 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = UseAny(instr->argument());
-  return new(zone()) LPushArgument(argument);
+  return new LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses()
-      ? NULL
-      : DefineAsRegister(new(zone()) LThisFunction);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+  return DefineAsRegister(new LOuterContext(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalObject(context));
+  return DefineAsRegister(new LGlobalObject(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+  return DefineAsRegister(new LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr);
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, eax), instr);
 }
 
 
@@ -1184,7 +1150,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  LInvokeFunction* result = new LInvokeFunction(context, function);
   return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
@@ -1196,25 +1162,17 @@
     ASSERT(instr->value()->representation().IsDouble());
     LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
-                                                                  input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
     return DefineSameAsFirst(result);
-  } else if (op == kMathSin || op == kMathCos || op == kMathTan) {
+  } else if (op == kMathSin || op == kMathCos) {
     LOperand* context = UseFixed(instr->context(), esi);
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
-                                                                  input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
     LOperand* context = UseAny(instr->context());  // Deferred use by MathAbs.
-    if (op == kMathPowHalf) {
-      LOperand* temp = TempRegister();
-      LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
-      return DefineSameAsFirst(result);
-    }
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
-                                                                  input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(context, input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1224,6 +1182,8 @@
         return AssignEnvironment(DefineAsRegister(result));
       case kMathSqrt:
         return DefineSameAsFirst(result);
+      case kMathPowHalf:
+        return DefineSameAsFirst(result);
       default:
         UNREACHABLE();
         return NULL;
@@ -1237,7 +1197,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseFixed(instr->key(), ecx);
   argument_count_ -= instr->argument_count();
-  LCallKeyed* result = new(zone()) LCallKeyed(context, key);
+  LCallKeyed* result = new LCallKeyed(context, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1245,7 +1205,7 @@
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallNamed* result = new(zone()) LCallNamed(context);
+  LCallNamed* result = new LCallNamed(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1253,14 +1213,14 @@
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallGlobal* result = new(zone()) LCallGlobal(context);
+  LCallGlobal* result = new LCallGlobal(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr);
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, eax), instr);
 }
 
 
@@ -1268,16 +1228,15 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* constructor = UseFixed(instr->constructor(), edi);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new(zone()) LCallNew(context, constructor);
+  LCallNew* result = new LCallNew(context, constructor);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* function = UseFixed(instr->function(), edi);
   argument_count_ -= instr->argument_count();
-  LCallFunction* result = new(zone()) LCallFunction(context, function);
+  LCallFunction* result = new LCallFunction(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1285,7 +1244,7 @@
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
+  return MarkAsCall(DefineFixed(new LCallRuntime(context), eax), instr);
 }
 
 
@@ -1304,26 +1263,8 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new(zone()) LBitI(left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* context = UseFixed(instr->context(), esi);
-    LOperand* left = UseFixed(instr->left(), edx);
-    LOperand* right = UseFixed(instr->right(), eax);
-    LArithmeticT* result =
-        new(zone()) LArithmeticT(instr->op(), context, left, right);
-    return MarkAsCall(DefineFixed(result, eax), instr);
-  }
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
 }
 
 
@@ -1331,11 +1272,21 @@
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LBitNotI* result = new(zone()) LBitNotI(input);
+  LBitNotI* result = new LBitNotI(input);
   return DefineSameAsFirst(result);
 }
 
 
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1345,7 +1296,7 @@
     LOperand* temp = FixedTemp(edx);
     LOperand* dividend = UseFixed(instr->left(), eax);
     LOperand* divisor = UseRegister(instr->right());
-    LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
+    LDivI* result = new LDivI(dividend, divisor, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   } else {
     ASSERT(instr->representation().IsTagged());
@@ -1363,8 +1314,7 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      LModI* mod =
-          new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
+      LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
       result = DefineSameAsFirst(mod);
     } else {
       // The temporary operand is necessary to ensure that right is
@@ -1372,7 +1322,7 @@
       LOperand* temp = FixedTemp(edx);
       LOperand* value = UseFixed(instr->left(), eax);
       LOperand* divisor = UseRegister(instr->right());
-      LModI* mod = new(zone()) LModI(value, divisor, temp);
+      LModI* mod = new LModI(value, divisor, temp);
       result = DefineFixed(mod, edx);
     }
 
@@ -1389,7 +1339,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), xmm2);
     LOperand* right = UseFixedDouble(instr->right(), xmm1);
-    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   }
 }
@@ -1405,12 +1355,8 @@
     if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
       temp = TempRegister();
     }
-    LMulI* mul = new(zone()) LMulI(left, right, temp);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      AssignEnvironment(mul);
-    }
-    return DefineSameAsFirst(mul);
+    LMulI* mul = new LMulI(left, right, temp);
+    return AssignEnvironment(DefineSameAsFirst(mul));
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
   } else {
@@ -1426,7 +1372,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new(zone()) LSubI(left, right);
+    LSubI* sub = new LSubI(left, right);
     LInstruction* result = DefineSameAsFirst(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1447,7 +1393,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new(zone()) LAddI(left, right);
+    LAddI* add = new LAddI(left, right);
     LInstruction* result = DefineSameAsFirst(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1468,32 +1414,25 @@
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
   ASSERT(instr->left()->representation().IsDouble());
-  LOperand* left = UseFixedDouble(instr->left(), xmm2);
+  LOperand* left = UseFixedDouble(instr->left(), xmm1);
   LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), xmm1) :
+      UseFixedDouble(instr->right(), xmm2) :
       UseFixed(instr->right(), eax);
-  LPower* result = new(zone()) LPower(left, right);
+  LPower* result = new LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
 
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->global_object()->representation().IsTagged());
-  LOperand* global_object = UseFixed(instr->global_object(), eax);
-  LRandom* result = new(zone()) LRandom(global_object);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
+  bool reversed = (op == Token::GT || op == Token::LTE);
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left = UseFixed(instr->left(), edx);
-  LOperand* right = UseFixed(instr->right(), eax);
-  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  LOperand* left = UseFixed(instr->left(), reversed ? eax : edx);
+  LOperand* right = UseFixed(instr->right(), reversed ? edx : eax);
+  LCmpT* result = new LCmpT(context, left, right);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1504,23 +1443,16 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return new(zone()) LCmpIDAndBranch(left, right);
+    return new LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
-    LOperand* left;
-    LOperand* right;
-    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
-      left = UseRegisterOrConstantAtStart(instr->left());
-      right = UseRegisterOrConstantAtStart(instr->right());
-    } else {
-      left = UseRegisterAtStart(instr->left());
-      right = UseRegisterAtStart(instr->right());
-    }
-    return new(zone()) LCmpIDAndBranch(left, right);
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1529,73 +1461,49 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseAtStart(instr->right());
-  return new(zone()) LCmpObjectEqAndBranch(left, right);
+  return new LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
   HCompareConstantEqAndBranch* instr) {
-  return new(zone()) LCmpConstantEqAndBranch(
-      UseRegisterAtStart(instr->value()));
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
   // We only need a temp register for non-strict compare.
-  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
-  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
-  return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* temp = TempRegister();
-  return new LIsStringAndBranch(UseRegister(instr->value()), temp);
+  return new LIsObjectAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+  return new LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr  ->value()->representation().IsTagged());
-  return new(zone()) LIsUndetectableAndBranch(
-      UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
-    HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* left = UseFixed(instr->left(), edx);
-  LOperand* right = UseFixed(instr->right(), eax);
-
-  LStringCompareAndBranch* result = new
-      LStringCompareAndBranch(context, left, right);
-
-  return MarkAsCall(result, instr);
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LHasInstanceTypeAndBranch(
-      UseRegisterAtStart(instr->value()),
-      TempRegister());
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()),
+                                       TempRegister());
 }
 
 
@@ -1604,14 +1512,14 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+  return DefineAsRegister(new LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
+  return new LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
 
@@ -1619,48 +1527,40 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
-                                           TempRegister(),
-                                           TempRegister());
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister(),
+                                   TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LJSArrayLength(array));
+  return DefineAsRegister(new LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LElementsKind(object));
+  return DefineAsRegister(new LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
-  return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
-  LOperand* date = UseFixed(instr->value(), eax);
-  LDateField* result =
-      new(zone()) LDateField(date, FixedTemp(ecx), instr->index());
-  return MarkAsCall(DefineFixed(result, eax), instr);
+  LValueOf* result = new LValueOf(object, TempRegister());
+  return AssignEnvironment(DefineSameAsFirst(result));
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  return AssignEnvironment(new(zone()) LBoundsCheck(
+  return AssignEnvironment(new LBoundsCheck(
       UseRegisterOrConstantAtStart(instr->index()),
       UseAtStart(instr->length())));
 }
@@ -1676,7 +1576,7 @@
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseFixed(instr->value(), eax);
-  return MarkAsCall(new(zone()) LThrow(context, value), instr);
+  return MarkAsCall(new LThrow(context, value), instr);
 }
 
 
@@ -1699,11 +1599,7 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      // Temp register only necessary for minus zero check.
-      LOperand* temp = instr->deoptimize_on_minus_zero()
-                       ? TempRegister()
-                       : NULL;
-      LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
+      LNumberUntagD* res = new LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
@@ -1715,10 +1611,10 @@
             (truncating && CpuFeatures::IsSupported(SSE3))
             ? NULL
             : FixedTemp(xmm1);
-        LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
+        LTaggedToI* res = new LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
-        return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
+        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
       }
     }
   } else if (from.IsDouble()) {
@@ -1728,7 +1624,7 @@
 
       // Make sure that temp and result_temp are different registers.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
+      LNumberTagD* result = new LNumberTagD(value, temp);
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
@@ -1737,23 +1633,21 @@
       LOperand* value = needs_temp ?
           UseTempRegister(instr->value()) : UseRegister(instr->value());
       LOperand* temp = needs_temp ? TempRegister() : NULL;
-      return AssignEnvironment(
-          DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
+      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineSameAsFirst(new(zone()) LSmiTag(value));
+        return DefineSameAsFirst(new LSmiTag(value));
       } else {
-        LNumberTagI* result = new(zone()) LNumberTagI(value);
+        LNumberTagI* result = new LNumberTagI(value);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
       ASSERT(to.IsDouble());
-      return DefineAsRegister(
-          new(zone()) LInteger32ToDouble(Use(instr->value())));
+      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
     }
   }
   UNREACHABLE();
@@ -1763,46 +1657,40 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+  return AssignEnvironment(new LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
-  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
+  LCheckInstanceType* result = new LCheckInstanceType(value, temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp = TempRegister();
-  LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckSmi(value));
+  return AssignEnvironment(new LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
-  // If the target is in new space, we'll emit a global cell compare and so
-  // want the value in a register.  If the target gets promoted before we
-  // emit code, we will still get the register but will do an immediate
-  // compare instead of the cell compare.  This is safe.
-  LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
-      ? UseRegisterAtStart(instr->value())
-      : UseAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckFunction(value));
+  LOperand* value = UseAtStart(instr->value());
+  return AssignEnvironment(new LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new(zone()) LCheckMap(value);
+  LCheckMap* result = new LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1812,39 +1700,67 @@
   Representation input_rep = value->representation();
   if (input_rep.IsDouble()) {
     LOperand* reg = UseRegister(value);
-    return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+    return DefineAsRegister(new LClampDToUint8(reg));
   } else if (input_rep.IsInteger32()) {
     LOperand* reg = UseFixed(value, eax);
-    return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
+    return DefineFixed(new LClampIToUint8(reg), eax);
   } else {
     ASSERT(input_rep.IsTagged());
     LOperand* reg = UseFixed(value, eax);
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
     LOperand* temp = FixedTemp(xmm1);
-    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
+    LClampTToUint8* result = new LClampTToUint8(reg, temp);
     return AssignEnvironment(DefineFixed(result, eax));
   }
 }
 
 
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+
+  LInstruction* result;
+  if (input_rep.IsDouble()) {
+    LOperand* reg = UseRegister(value);
+    LOperand* temp_reg =
+        CpuFeatures::IsSupported(SSE3) ? NULL : TempRegister();
+    result = DefineAsRegister(new LDoubleToI(reg, temp_reg));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* reg = UseRegister(value);
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LOperand* xmm_temp =
+        CpuFeatures::IsSupported(SSE3) ? NULL : FixedTemp(xmm1);
+    result = DefineSameAsFirst(new LTaggedToI(reg, xmm_temp));
+  }
+  return AssignEnvironment(result);
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new(zone()) LReturn(UseFixed(instr->value(), eax));
+  return new LReturn(UseFixed(instr->value(), eax));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new(zone()) LConstantI);
+    return DefineAsRegister(new LConstantI);
   } else if (r.IsDouble()) {
     double value = instr->DoubleValue();
     LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
         ? TempRegister()
         : NULL;
-    return DefineAsRegister(new(zone()) LConstantD(temp));
+    return DefineAsRegister(new LConstantD(temp));
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new(zone()) LConstantT);
+    return DefineAsRegister(new LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1853,8 +1769,8 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
-  return instr->RequiresHoleCheck()
+  LLoadGlobalCell* result = new LLoadGlobalCell;
+  return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1863,16 +1779,15 @@
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* global_object = UseFixed(instr->global_object(), eax);
-  LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   LStoreGlobalCell* result =
-      new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+      new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
+  return instr->check_hole_value() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1881,56 +1796,54 @@
   LOperand* global_object = UseFixed(instr->global_object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
   LStoreGlobalGeneric* result =
-      new(zone()) LStoreGlobalGeneric(context, global_object, value);
+      new LStoreGlobalGeneric(context, global_object, value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  LInstruction* result =
-      DefineAsRegister(new(zone()) LLoadContextSlot(context));
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+  return DefineAsRegister(new LLoadContextSlot(context));
 }
 
 
 LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
   LOperand* value;
   LOperand* temp;
-  LOperand* context = UseRegister(instr->context());
   if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
     value = UseTempRegister(instr->value());
     temp = TempRegister();
   } else {
+    context = UseRegister(instr->context());
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+  return new LStoreContextSlot(context, value, temp);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   ASSERT(instr->representation().IsTagged());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+  return DefineAsRegister(new LLoadNamedField(obj));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
     HLoadNamedFieldPolymorphic* instr) {
   ASSERT(instr->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
   if (instr->need_generic()) {
-    LOperand* context = UseFixed(instr->context(), esi);
     LOperand* obj = UseFixed(instr->object(), eax);
     LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
+        new LLoadNamedFieldPolymorphic(context, obj);
     return MarkAsCall(DefineFixed(result, eax), instr);
   } else {
-    LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* obj = UseRegisterAtStart(instr->object());
     LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(context, obj);
+        new LLoadNamedFieldPolymorphic(context, obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1939,7 +1852,7 @@
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseFixed(instr->object(), eax);
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
+  LLoadNamedGeneric* result = new LLoadNamedGeneric(context, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -1947,21 +1860,21 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
-                                         TempRegister())));
+      new LLoadFunctionPrototype(UseRegister(instr->function()),
+                                 TempRegister())));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadElements(input));
+  return DefineAsRegister(new LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new LLoadExternalArrayPointer(input));
 }
 
 
@@ -1971,9 +1884,8 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
-  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
-  return DefineAsRegister(result);
+  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
@@ -1984,7 +1896,7 @@
   LOperand* elements = UseRegisterAtStart(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+      new LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1992,18 +1904,19 @@
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
+  Representation representation(instr->representation());
   ASSERT(
-      (instr->representation().IsInteger32() &&
+      (representation.IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (instr->representation().IsDouble() &&
+      (representation.IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer,
+      new LLoadKeyedSpecializedArrayElement(external_pointer,
                                             key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
@@ -2019,8 +1932,7 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* key = UseFixed(instr->key(), eax);
 
-  LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key);
+  LLoadKeyedGeneric* result = new LLoadKeyedGeneric(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2032,14 +1944,15 @@
   ASSERT(instr->object()->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
 
-  LOperand* obj = UseRegister(instr->object());
+  LOperand* obj = UseTempRegister(instr->object());
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
       : UseRegisterAtStart(instr->value());
   LOperand* key = needs_write_barrier
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
-  return new(zone()) LStoreKeyedFastElement(obj, key, val);
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
 }
 
 
@@ -2053,18 +1966,19 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
+  Representation representation(instr->value()->representation());
   ElementsKind elements_kind = instr->elements_kind();
     ASSERT(
-      (instr->value()->representation().IsInteger32() &&
+      (representation.IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (instr->value()->representation().IsDouble() &&
+      (representation.IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2082,9 +1996,9 @@
     val = UseRegister(instr->value());
   }
 
-  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                        key,
-                                                        val);
+  return new LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                key,
+                                                val);
 }
 
 
@@ -2099,45 +2013,17 @@
   ASSERT(instr->value()->representation().IsTagged());
 
   LStoreKeyedGeneric* result =
-      new(zone()) LStoreKeyedGeneric(context, object, key, value);
+      new LStoreKeyedGeneric(context, object, key, value);
   return MarkAsCall(result, instr);
 }
 
 
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
-    HTransitionElementsKind* instr) {
-  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
-      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
-    LOperand* object = UseRegister(instr->object());
-    LOperand* new_map_reg = TempRegister();
-    LOperand* temp_reg = TempRegister();
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
-    return DefineSameAsFirst(result);
-  } else {
-    LOperand* object = UseFixed(instr->object(), eax);
-    LOperand* fixed_object_reg = FixedTemp(edx);
-    LOperand* new_map_reg = FixedTemp(ebx);
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object,
-                                            new_map_reg,
-                                            fixed_object_reg);
-    return MarkAsCall(DefineFixed(result, eax), instr);
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
-  LOperand* obj;
-  if (needs_write_barrier) {
-    obj = instr->is_in_object()
-        ? UseRegister(instr->object())
-        : UseTempRegister(instr->object());
-  } else {
-    obj = UseRegisterAtStart(instr->object());
-  }
+  LOperand* obj = needs_write_barrier
+      ? UseTempRegister(instr->object())
+      : UseRegisterAtStart(instr->object());
 
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
@@ -2149,7 +2035,7 @@
       ? TempRegister()
       : NULL;
 
-  return new(zone()) LStoreNamedField(obj, val, temp);
+  return new LStoreNamedField(obj, val, temp);
 }
 
 
@@ -2158,8 +2044,7 @@
   LOperand* object = UseFixed(instr->object(), edx);
   LOperand* value = UseFixed(instr->value(), eax);
 
-  LStoreNamedGeneric* result =
-      new(zone()) LStoreNamedGeneric(context, object, value);
+  LStoreNamedGeneric* result = new LStoreNamedGeneric(context, object, value);
   return MarkAsCall(result, instr);
 }
 
@@ -2168,7 +2053,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseOrConstantAtStart(instr->left());
   LOperand* right = UseOrConstantAtStart(instr->right());
-  LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
+  LStringAdd* string_add = new LStringAdd(context, left, right);
   return MarkAsCall(DefineFixed(string_add, eax), instr);
 }
 
@@ -2177,8 +2062,7 @@
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
   LOperand* context = UseAny(instr->context());
-  LStringCharCodeAt* result =
-      new(zone()) LStringCharCodeAt(context, string, index);
+  LStringCharCodeAt* result = new LStringCharCodeAt(context, string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
@@ -2186,58 +2070,38 @@
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
   LOperand* context = UseAny(instr->context());
-  LStringCharFromCode* result =
-      new(zone()) LStringCharFromCode(context, char_code);
+  LStringCharFromCode* result = new LStringCharFromCode(context, char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* temp = TempRegister();
-  LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
+  return DefineAsRegister(new LStringLength(string));
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LArrayLiteral(context), eax), instr);
+  return MarkAsCall(DefineFixed(new LArrayLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LObjectLiteral(context), eax), instr);
+  return MarkAsCall(DefineFixed(new LObjectLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LRegExpLiteral(context), eax), instr);
+  return MarkAsCall(DefineFixed(new LRegExpLiteral(context), eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  return MarkAsCall(
-      DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
+  return MarkAsCall(DefineFixed(new LFunctionLiteral(context), eax), instr);
 }
 
 
@@ -2245,7 +2109,7 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* object = UseAtStart(instr->object());
   LOperand* key = UseOrConstantAtStart(instr->key());
-  LDeleteProperty* result = new(zone()) LDeleteProperty(context, object, key);
+  LDeleteProperty* result = new LDeleteProperty(context, object, key);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2253,13 +2117,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new(zone()) LOsrEntry);
+  return AssignEnvironment(new LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  return DefineAsSpilled(new LParameter, spill_index);
 }
 
 
@@ -2269,14 +2133,14 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   argument_count_ -= instr->argument_count();
-  LCallStub* result = new(zone()) LCallStub(context);
+  LCallStub* result = new LCallStub(context);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2294,15 +2158,14 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = Use(instr->index());
-  LAccessArgumentsAt* result =
-      new(zone()) LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), eax);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  LToFastProperties* result = new LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2310,19 +2173,19 @@
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* value = UseAtStart(instr->value());
-  LTypeof* result = new(zone()) LTypeof(context, value);
+  LTypeof* result = new LTypeof(context, value);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new(zone()) LIsConstructCallAndBranch(TempRegister());
+  return new LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2346,7 +2209,7 @@
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
     ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
-    LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
+    LLazyBailout* lazy_bailout = new LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2361,12 +2224,11 @@
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
     LOperand* context = UseFixed(instr->context(), esi);
-    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+    return MarkAsCall(new LStackCheck(context), instr);
   } else {
     ASSERT(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
-    return AssignEnvironment(
-        AssignPointerMap(new(zone()) LStackCheck(context)));
+    return AssignEnvironment(AssignPointerMap(new LStackCheck(context)));
   }
 }
 
@@ -2375,11 +2237,9 @@
   HEnvironment* outer = current_block_->last_environment();
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
                                                instr->function(),
                                                undefined,
-                                               instr->call_kind(),
-                                               instr->is_construct());
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2387,8 +2247,7 @@
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment()->
-      DiscardInlined(false);
+  HEnvironment* outer = current_block_->last_environment()->outer();
   current_block_->UpdateEnvironment(outer);
   return NULL;
 }
@@ -2398,40 +2257,11 @@
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* key = UseOrConstantAtStart(instr->key());
   LOperand* object = UseOrConstantAtStart(instr->object());
-  LIn* result = new(zone()) LIn(context, key, object);
+  LIn* result = new LIn(context, key, object);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
 
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
-  LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->enumerable(), eax);
-  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
-  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
-  LOperand* map = UseRegister(instr->map());
-  return AssignEnvironment(DefineAsRegister(
-      new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* map = UseRegisterAtStart(instr->map());
-  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
-  LOperand* object = UseRegister(instr->object());
-  LOperand* index = UseTempRegister(instr->index());
-  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 4ecce96..b0ab6b4 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,7 +43,6 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
-  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -82,13 +81,11 @@
   V(ConstantI)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
-  V(DeclareGlobals)                             \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(ElementsKind)                               \
-  V(FastLiteral)                                \
   V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
@@ -104,12 +101,10 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNilAndBranch)                             \
+  V(IsNullAndBranch)                            \
   V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
-  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -126,7 +121,6 @@
   V(LoadNamedField)                             \
   V(LoadNamedFieldPolymorphic)                  \
   V(LoadNamedGeneric)                           \
-  V(MathPowHalf)                                \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -137,7 +131,6 @@
   V(OuterContext)                               \
   V(Parameter)                                  \
   V(Power)                                      \
-  V(Random)                                     \
   V(PushArgument)                               \
   V(RegExpLiteral)                              \
   V(Return)                                     \
@@ -163,18 +156,11 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
-  V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)
+  V(ValueOf)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -205,8 +191,8 @@
   virtual void CompileToNative(LCodeGen* generator) = 0;
   virtual const char* Mnemonic() const = 0;
   virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream) = 0;
+  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
 
   enum Opcode {
     // Declare a unique enum value for each instruction.
@@ -302,6 +288,9 @@
   int TempCount() { return T; }
   LOperand* TempAt(int i) { return temps_[i]; }
 
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
  protected:
   EmbeddedContainer<LOperand*, R> results_;
   EmbeddedContainer<LOperand*, I> inputs_;
@@ -457,33 +446,18 @@
 };
 
 
-class LWrapReceiver: public LTemplateInstruction<1, 2, 1> {
- public:
-  LWrapReceiver(LOperand* receiver,
-                LOperand* function,
-                LOperand* temp) {
-    inputs_[0] = receiver;
-    inputs_[1] = function;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
-  LOperand* receiver() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments: public LTemplateInstruction<1, 4, 1> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
                   LOperand* length,
-                  LOperand* elements) {
+                  LOperand* elements,
+                  LOperand* temp) {
     inputs_[0] = function;
     inputs_[1] = receiver;
     inputs_[2] = length;
     inputs_[3] = elements;
+    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
@@ -607,24 +581,6 @@
 };
 
 
-class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
- public:
-  LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
-    inputs_[1] = context;
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* context() { return inputs_[1]; }
-  LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -649,18 +605,17 @@
 };
 
 
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
+class LIsNullAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNilAndBranch(LOperand* value, LOperand* temp) {
+  LIsNullAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
 
-  EqualityKind kind() const { return hydrogen()->kind(); }
-  NilValue nil() const { return hydrogen()->nil(); }
+  bool is_strict() const { return hydrogen()->is_strict(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -679,19 +634,6 @@
 };
 
 
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
-  LIsStringAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -719,24 +661,6 @@
 };
 
 
-class LStringCompareAndBranch: public LControlInstruction<3, 0> {
- public:
-  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
-    inputs_[0] = context;
-    inputs_[1] = left;
-    inputs_[2] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
-                               "string-compare-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  Token::Value op() const { return hydrogen()->token(); }
-};
-
-
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
@@ -863,15 +787,18 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(LOperand* left, LOperand* right) {
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return hydrogen()->op(); }
+  Token::Value op() const { return op_; }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ private:
+  Token::Value op_;
 };
 
 
@@ -1019,24 +946,6 @@
 };
 
 
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
-  LDateField(LOperand* date, LOperand* temp, Smi* index)
-      : index_(index) {
-    inputs_[0] = date;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
-  DECLARE_HYDROGEN_ACCESSOR(DateField)
-
-  Smi* index() const { return index_; }
-
- private:
-  Smi* index_;
-};
-
-
 class LThrow: public LTemplateInstruction<0, 2, 0> {
  public:
   LThrow(LOperand* context, LOperand* value) {
@@ -1085,17 +994,6 @@
 };
 
 
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LRandom(LOperand* global_object) {
-    inputs_[0] = global_object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
-  DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
 class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1330,8 +1228,6 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
-  LOperand* value() { return inputs_[0]; }
 };
 
 
@@ -1352,7 +1248,7 @@
   LOperand* global_object() { return InputAt(1); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(2); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1386,6 +1282,7 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
+  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1402,9 +1299,7 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
-  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1426,17 +1321,6 @@
 };
 
 
-class LDeclareGlobals: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LDeclareGlobals(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
-  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
 class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGlobalObject(LOperand* context) {
@@ -1528,19 +1412,17 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 2, 0> {
+class LCallFunction: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LCallFunction(LOperand* context, LOperand* function) {
+  explicit LCallFunction(LOperand* context) {
     inputs_[0] = context;
-    inputs_[1] = function;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
   LOperand* context() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-  int arity() const { return hydrogen()->argument_count() - 1; }
+  int arity() const { return hydrogen()->argument_count() - 2; }
 };
 
 
@@ -1676,11 +1558,10 @@
 };
 
 
-class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
  public:
-  explicit LNumberUntagD(LOperand* value, LOperand* temp) {
+  explicit LNumberUntagD(LOperand* value) {
     inputs_[0] = value;
-    temps_[0] = temp;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
@@ -1723,6 +1604,7 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
+  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1744,7 +1626,7 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1834,31 +1716,7 @@
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
   LOperand* value() { return inputs_[3]; }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
-  LTransitionElementsKind(LOperand* object,
-                          LOperand* new_map_temp,
-                          LOperand* temp_reg) {
-    inputs_[0] = object;
-    temps_[0] = new_map_temp;
-    temps_[1] = temp_reg;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
-                               "transition-elements-kind")
-  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* new_map_reg() { return temps_[0]; }
-  LOperand* temp_reg() { return temps_[1]; }
-  Handle<Map> original_map() { return hydrogen()->original_map(); }
-  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1930,8 +1788,6 @@
     inputs_[0] = value;
   }
 
-  LOperand* value() { return inputs_[0]; }
-
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -2031,33 +1887,6 @@
 };
 
 
-class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
- public:
-  LAllocateObject(LOperand* context, LOperand* temp) {
-    inputs_[0] = context;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-
-  LOperand* context() { return inputs_[0]; }
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LFastLiteral(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
-  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
 class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArrayLiteral(LOperand* context) {
@@ -2222,64 +2051,6 @@
 };
 
 
-class LForInPrepareMap: public LTemplateInstruction<1, 2, 0> {
- public:
-  LForInPrepareMap(LOperand* context, LOperand* object) {
-    inputs_[0] = context;
-    inputs_[1] = object;
-  }
-
-  LOperand* context() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInCacheArray(LOperand* map) {
-    inputs_[0] = map;
-  }
-
-  LOperand* map() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
-  int idx() {
-    return HForInCacheArray::cast(this->hydrogen_value())->idx();
-  }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
-  LCheckMapValue(LOperand* value, LOperand* map) {
-    inputs_[0] = value;
-    inputs_[1] = map;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* map() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadFieldByIndex(LOperand* object, LOperand* index) {
-    inputs_[0] = object;
-    inputs_[1] = index;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* index() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2353,7 +2124,6 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
-        zone_(graph->isolate()->zone()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2383,7 +2153,6 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
-  Zone* zone() { return zone_; }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2393,6 +2162,7 @@
   void Abort(const char* format, ...);
 
   // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
 
@@ -2443,6 +2213,8 @@
       LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
                            LUnallocated* result);
   template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
       LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
   template<int I, int T>
       LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2477,12 +2249,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
-                                  int* argument_index_accumulator);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2492,7 +2264,6 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
-  Zone* zone_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 60e38a6..ce6d6a6 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,8 +44,7 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      has_frame_(false) {
+      allow_stub_calls_(true) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -53,75 +52,33 @@
 }
 
 
-void MacroAssembler::InNewSpace(
-    Register object,
-    Register scratch,
-    Condition cc,
-    Label* condition_met,
-    Label::Distance condition_met_distance) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (scratch.is(object)) {
-    and_(scratch, Immediate(~Page::kPageAlignmentMask));
-  } else {
-    mov(scratch, Immediate(~Page::kPageAlignmentMask));
-    and_(scratch, object);
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register addr,
+                                       Register scratch) {
+  if (emit_debug_code()) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, not_equal, &not_in_new_space);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
   }
-  // Check that we can use a test_b.
-  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
-  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
-  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
-           | (1 << MemoryChunk::IN_TO_SPACE);
-  // If non-zero, the page belongs to new-space.
-  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
-         static_cast<uint8_t>(mask));
-  j(cc, condition_met, condition_met_distance);
-}
 
+  // Compute the page start address from the heap object pointer, and reuse
+  // the 'object' register for it.
+  and_(object, ~Page::kPageAlignmentMask);
 
-void MacroAssembler::RememberedSetHelper(
-    Register object,  // Only used for debug checks.
-    Register addr,
-    Register scratch,
-    SaveFPRegsMode save_fp,
-    MacroAssembler::RememberedSetFinalAction and_then) {
-  Label done;
-  if (FLAG_debug_code) {
-    Label ok;
-    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-  }
-  // Load store buffer top.
-  ExternalReference store_buffer =
-      ExternalReference::store_buffer_top(isolate());
-  mov(scratch, Operand::StaticVariable(store_buffer));
-  // Store pointer to buffer.
-  mov(Operand(scratch, 0), addr);
-  // Increment buffer top.
-  add(scratch, Immediate(kPointerSize));
-  // Write back new top of buffer.
-  mov(Operand::StaticVariable(store_buffer), scratch);
-  // Call stub on end of buffer.
-  // Check for end of buffer.
-  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
-  if (and_then == kReturnAtEnd) {
-    Label buffer_overflowed;
-    j(not_equal, &buffer_overflowed, Label::kNear);
-    ret(0);
-    bind(&buffer_overflowed);
-  } else {
-    ASSERT(and_then == kFallThroughAtEnd);
-    j(equal, &done, Label::kNear);
-  }
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(save_fp);
-  CallStub(&store_buffer_overflow);
-  if (and_then == kReturnAtEnd) {
-    ret(0);
-  } else {
-    ASSERT(and_then == kFallThroughAtEnd);
-    bind(&done);
-  }
+  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+  // method for more details.
+  shr(addr, Page::kRegionSizeLog2);
+  and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2);
+
+  // Set dirty mark for region.
+  // Bit tests with a memory operand should be avoided on Intel processors,
+  // as they usually have long latency and multiple uops. We load the bit base
+  // operand to a register at first and store it back after bit set.
+  mov(scratch, Operand(object, Page::kDirtyFlagOffset));
+  bts(Operand(scratch), addr);
+  mov(Operand(object, Page::kDirtyFlagOffset), scratch);
 }
 
 
@@ -155,144 +112,100 @@
 }
 
 
-void MacroAssembler::RecordWriteArray(Register object,
-                                      Register value,
-                                      Register index,
-                                      SaveFPRegsMode save_fp,
-                                      RememberedSetAction remembered_set_action,
-                                      SmiCheck smi_check) {
-  // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis.
-  Label done;
-
-  // Skip barrier if writing a smi.
-  if (smi_check == INLINE_SMI_CHECK) {
-    ASSERT_EQ(0, kSmiTag);
-    test(value, Immediate(kSmiTagMask));
-    j(zero, &done);
-  }
-
-  // Array access: calculate the destination address in the same manner as
-  // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
-  // into an array of words.
-  Register dst = index;
-  lea(dst, Operand(object, index, times_half_pointer_size,
-                   FixedArray::kHeaderSize - kHeapObjectTag));
-
-  RecordWrite(
-      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
-  bind(&done);
-
-  // Clobber clobbered input registers when running with the debug-code flag
-  // turned on to provoke errors.
-  if (emit_debug_code()) {
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch,
+                                Label::Distance branch_near) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    mov(scratch, Operand(object));
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    and_(Operand(scratch),
+         Immediate(ExternalReference::new_space_mask(isolate())));
+    cmp(Operand(scratch),
+        Immediate(ExternalReference::new_space_start(isolate())));
+    j(cc, branch, branch_near);
+  } else {
+    int32_t new_space_start = reinterpret_cast<int32_t>(
+        ExternalReference::new_space_start(isolate()).address());
+    lea(scratch, Operand(object, -new_space_start));
+    and_(scratch, isolate()->heap()->NewSpaceMask());
+    j(cc, branch, branch_near);
   }
 }
 
 
-void MacroAssembler::RecordWriteField(
-    Register object,
-    int offset,
-    Register value,
-    Register dst,
-    SaveFPRegsMode save_fp,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(Register object,
+                                 int offset,
+                                 Register value,
+                                 Register scratch) {
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis.
+  // catch stores of Smis and stores into young gen.
   Label done;
 
   // Skip barrier if writing a smi.
-  if (smi_check == INLINE_SMI_CHECK) {
-    JumpIfSmi(value, &done, Label::kNear);
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpIfSmi(value, &done, Label::kNear);
+
+  InNewSpace(object, value, equal, &done, Label::kNear);
+
+  // The offset is relative to a tagged or untagged HeapObject pointer,
+  // so either offset or offset + kHeapObjectTag must be a
+  // multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize) ||
+         IsAligned(offset + kHeapObjectTag, kPointerSize));
+
+  Register dst = scratch;
+  if (offset != 0) {
+    lea(dst, Operand(object, offset));
+  } else {
+    // Array access: calculate the destination address in the same manner as
+    // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
+    // into an array of words.
+    STATIC_ASSERT(kSmiTagSize == 1);
+    STATIC_ASSERT(kSmiTag == 0);
+    lea(dst, Operand(object, dst, times_half_pointer_size,
+                     FixedArray::kHeaderSize - kHeapObjectTag));
   }
-
-  // Although the object register is tagged, the offset is relative to the start
-  // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
-
-  lea(dst, FieldOperand(object, offset));
-  if (emit_debug_code()) {
-    Label ok;
-    test_b(dst, (1 << kPointerSizeLog2) - 1);
-    j(zero, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-  }
-
-  RecordWrite(
-      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+  RecordWriteHelper(object, dst, value);
 
   bind(&done);
 
-  // Clobber clobbered input registers when running with the debug-code flag
+  // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
+    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value,
-                                 SaveFPRegsMode fp_mode,
-                                 RememberedSetAction remembered_set_action,
-                                 SmiCheck smi_check) {
-  ASSERT(!object.is(value));
-  ASSERT(!object.is(address));
-  ASSERT(!value.is(address));
-  if (emit_debug_code()) {
-    AbortIfSmi(object);
-  }
-
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
-    return;
-  }
-
-  if (FLAG_debug_code) {
-    Label ok;
-    cmp(value, Operand(address, 0));
-    j(equal, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-  }
-
+                                 Register value) {
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
   Label done;
 
-  if (smi_check == INLINE_SMI_CHECK) {
-    // Skip barrier if writing a smi.
-    JumpIfSmi(value, &done, Label::kNear);
-  }
+  // Skip barrier if writing a smi.
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpIfSmi(value, &done, Label::kNear);
 
-  CheckPageFlag(value,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersToHereAreInterestingMask,
-                zero,
-                &done,
-                Label::kNear);
-  CheckPageFlag(object,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersFromHereAreInterestingMask,
-                zero,
-                &done,
-                Label::kNear);
+  InNewSpace(object, value, equal, &done);
 
-  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
-  CallStub(&stub);
+  RecordWriteHelper(object, address, value);
 
   bind(&done);
 
-  // Clobber clobbered registers when running with the debug-code flag
+  // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
+    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(address, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
   }
@@ -311,7 +224,7 @@
 
 void MacroAssembler::Set(Register dst, const Immediate& x) {
   if (x.is_zero()) {
-    xor_(dst, dst);  // Shorter than mov.
+    xor_(dst, Operand(dst));  // Shorter than mov.
   } else {
     mov(dst, x);
   }
@@ -352,15 +265,7 @@
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
   // see ROOT_ACCESSOR macro in factory.h
-  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
-  cmp(with, value);
-}
-
-
-void MacroAssembler::CompareRoot(const Operand& with,
-                                 Heap::RootListIndex index) {
-  // see ROOT_ACCESSOR macro in factory.h
-  Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
+  Handle<Object> value(&isolate()->heap()->roots_address()[index]);
   cmp(with, value);
 }
 
@@ -382,153 +287,22 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Map::kMaximumBitField2FastElementValue);
   j(above, fail, distance);
 }
 
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Label* fail,
-                                             Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastSmiOnlyElementValue);
-  j(below_equal, fail, distance);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastElementValue);
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
-                                              Label* fail,
-                                              Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Map::kMaximumBitField2FastSmiOnlyElementValue);
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
-    Register maybe_number,
-    Register elements,
-    Register key,
-    Register scratch1,
-    XMMRegister scratch2,
-    Label* fail,
-    bool specialize_for_processor) {
-  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
-  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
-  CheckMap(maybe_number,
-           isolate()->factory()->heap_number_map(),
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  cmp(FieldOperand(maybe_number, offset),
-      Immediate(kNaNOrInfinityLowerBoundUpper32));
-  j(greater_equal, &maybe_nan, Label::kNear);
-
-  bind(&not_nan);
-  ExternalReference canonical_nan_reference =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
-    bind(&have_double_value);
-    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
-           scratch2);
-  } else {
-    fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
-    bind(&have_double_value);
-    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
-  }
-  jmp(&done);
-
-  bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  j(greater, &is_nan, Label::kNear);
-  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
-  j(zero, &not_nan);
-  bind(&is_nan);
-  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
-  } else {
-    fld_d(Operand::StaticVariable(canonical_nan_reference));
-  }
-  jmp(&have_double_value, Label::kNear);
-
-  bind(&smi_value);
-  // Value is a smi. Convert to a double and store.
-  // Preserve original value.
-  mov(scratch1, maybe_number);
-  SmiUntag(scratch1);
-  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
-    CpuFeatures::Scope fscope(SSE2);
-    cvtsi2sd(scratch2, scratch1);
-    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
-           scratch2);
-  } else {
-    push(scratch1);
-    fild_s(Operand(esp, 0));
-    pop(scratch1);
-    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
-  }
-  bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
-                                Handle<Map> map,
-                                Label* early_success,
-                                CompareMapMode mode) {
-  cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
-  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
-    Map* transitioned_fast_element_map(
-        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
-    ASSERT(transitioned_fast_element_map == NULL ||
-           map->elements_kind() != FAST_ELEMENTS);
-    if (transitioned_fast_element_map != NULL) {
-      j(equal, early_success, Label::kNear);
-      cmp(FieldOperand(obj, HeapObject::kMapOffset),
-          Handle<Map>(transitioned_fast_element_map));
-    }
-
-    Map* transitioned_double_map(
-        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
-    ASSERT(transitioned_double_map == NULL ||
-           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
-    if (transitioned_double_map != NULL) {
-      j(equal, early_success, Label::kNear);
-      cmp(FieldOperand(obj, HeapObject::kMapOffset),
-          Handle<Map>(transitioned_double_map));
-    }
-  }
-}
-
-
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type,
-                              CompareMapMode mode) {
+                              SmiCheckType smi_check_type) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-
-  Label success;
-  CompareMap(obj, map, &success, mode);
+  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
   j(not_equal, fail);
-  bind(&success);
 }
 
 
@@ -571,7 +345,7 @@
                                             Register scratch,
                                             Label* fail) {
   movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
-  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   cmp(scratch,
       LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   j(above, fail);
@@ -581,7 +355,8 @@
 void MacroAssembler::FCmp() {
   if (CpuFeatures::IsSupported(CMOV)) {
     fucomip();
-    fstp(0);
+    ffree(0);
+    fincstp();
   } else {
     fucompp();
     push(eax);
@@ -627,7 +402,7 @@
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
-  mov(ebp, esp);
+  mov(ebp, Operand(esp));
   push(esi);
   push(Immediate(Smi::FromInt(type)));
   push(Immediate(CodeObject()));
@@ -649,12 +424,12 @@
 
 
 void MacroAssembler::EnterExitFramePrologue() {
-  // Set up the frame structure on the stack.
+  // Setup the frame structure on the stack.
   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   push(ebp);
-  mov(ebp, esp);
+  mov(ebp, Operand(esp));
 
   // Reserve room for entry stack pointer and push the code object.
   ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
@@ -676,14 +451,14 @@
   if (save_doubles) {
     CpuFeatures::Scope scope(SSE2);
     int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
-    sub(esp, Immediate(space));
+    sub(Operand(esp), Immediate(space));
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
       movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
     }
   } else {
-    sub(esp, Immediate(argc * kPointerSize));
+    sub(Operand(esp), Immediate(argc * kPointerSize));
   }
 
   // Get the required frame alignment for the OS.
@@ -701,9 +476,9 @@
 void MacroAssembler::EnterExitFrame(bool save_doubles) {
   EnterExitFramePrologue();
 
-  // Set up argc and argv in callee-saved registers.
+  // Setup argc and argv in callee-saved registers.
   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  mov(edi, eax);
+  mov(edi, Operand(eax));
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
@@ -757,68 +532,55 @@
 
 
 void MacroAssembler::LeaveApiExitFrame() {
-  mov(esp, ebp);
+  mov(esp, Operand(ebp));
   pop(ebp);
 
   LeaveExitFrameEpilogue();
 }
 
 
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
-                                    int handler_index) {
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // We will build up the handler from the bottom by pushing on the stack.
-  // First push the frame pointer and context.
-  if (kind == StackHandler::JS_ENTRY) {
-    // The frame pointer does not point to a JS frame so we save NULL for
-    // ebp. We expect the code throwing an exception to check ebp before
-    // dereferencing it to restore the context.
-    push(Immediate(0));  // NULL frame pointer.
-    push(Immediate(Smi::FromInt(0)));  // No context.
-  } else {
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  // The pc (return address) is already on TOS.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      push(Immediate(StackHandler::TRY_CATCH));
+    } else {
+      push(Immediate(StackHandler::TRY_FINALLY));
+    }
     push(ebp);
     push(esi);
+  } else {
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for ebp. We expect the code throwing an exception to check ebp
+    // before dereferencing it to restore the context.
+    push(Immediate(StackHandler::ENTRY));
+    push(Immediate(0));  // NULL frame pointer.
+    push(Immediate(Smi::FromInt(0)));  // No context.
   }
-  // Push the state and the code object.
-  unsigned state =
-      StackHandler::IndexField::encode(handler_index) |
-      StackHandler::KindField::encode(kind);
-  push(Immediate(state));
-  Push(CodeObject());
-
-  // Link the current handler as the next handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  push(Operand::StaticVariable(handler_address));
-  // Set this new handler as the current one.
-  mov(Operand::StaticVariable(handler_address), esp);
+  // Save the current handler as the next handler.
+  push(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
+                                                 isolate())));
+  // Link this handler as the new current one.
+  mov(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
+                                                isolate())),
+      esp);
 }
 
 
 void MacroAssembler::PopTryHandler() {
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  pop(Operand::StaticVariable(handler_address));
-  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
-}
-
-
-void MacroAssembler::JumpToHandlerEntry() {
-  // Compute the handler entry address and jump to it.  The handler table is
-  // a fixed array of (smi-tagged) code offsets.
-  // eax = exception, edi = code object, edx = state.
-  mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
-  shr(edx, StackHandler::kKindWidth);
-  mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
-  SmiUntag(edx);
-  lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
-  jmp(edi);
+  pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
+                                                isolate())));
+  add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
 }
 
 
@@ -826,83 +588,99 @@
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in eax.
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  // eax must hold the exception.
   if (!value.is(eax)) {
     mov(eax, value);
   }
-  // Drop the stack pointer to the top of the top handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+
+  // Drop the sp to the top of the handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress,
+                                    isolate());
   mov(esp, Operand::StaticVariable(handler_address));
-  // Restore the next handler.
+
+  // Restore next handler, context, and frame pointer; discard handler state.
   pop(Operand::StaticVariable(handler_address));
-
-  // Remove the code object and state, compute the handler address in edi.
-  pop(edi);  // Code object.
-  pop(edx);  // Index and state.
-
-  // Restore the context and frame pointer.
   pop(esi);  // Context.
   pop(ebp);  // Frame pointer.
+  pop(edx);  // State.
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
-  // ebp or esi.
+  // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
+  // of them.
   Label skip;
-  test(esi, esi);
-  j(zero, &skip, Label::kNear);
+  cmp(Operand(edx), Immediate(StackHandler::ENTRY));
+  j(equal, &skip, Label::kNear);
   mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
   bind(&skip);
 
-  JumpToHandlerEntry();
+  ret(0);
 }
 
 
-void MacroAssembler::ThrowUncatchable(Register value) {
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+                                      Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
 
-  // The exception is expected in eax.
+  // eax must hold the exception.
   if (!value.is(eax)) {
     mov(eax, value);
   }
-  // Drop the stack pointer to the top of the top stack handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
+
+  // Drop sp to the top stack handler.
+  ExternalReference handler_address(Isolate::kHandlerAddress,
+                                    isolate());
   mov(esp, Operand::StaticVariable(handler_address));
 
-  // Unwind the handlers until the top ENTRY handler is found.
-  Label fetch_next, check_kind;
-  jmp(&check_kind, Label::kNear);
-  bind(&fetch_next);
-  mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
+  j(equal, &done, Label::kNear);
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  mov(esp, Operand(esp, kNextOffset));
+  jmp(&loop);
+  bind(&done);
 
-  bind(&check_kind);
-  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
-  test(Operand(esp, StackHandlerConstants::kStateOffset),
-       Immediate(StackHandler::KindField::kMask));
-  j(not_zero, &fetch_next);
-
-  // Set the top handler address to next handler past the top ENTRY handler.
+  // Set the top handler address to next handler past the current ENTRY handler.
   pop(Operand::StaticVariable(handler_address));
 
-  // Remove the code object and state, compute the handler address in edi.
-  pop(edi);  // Code object.
-  pop(edx);  // Index and state.
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(
+        Isolate::kExternalCaughtExceptionAddress,
+        isolate());
+    mov(eax, false);
+    mov(Operand::StaticVariable(external_caught), eax);
 
-  // Clear the context pointer and frame pointer (0 was saved in the handler).
-  pop(esi);
+    // Set pending exception and eax to out of memory exception.
+    ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+                                        isolate());
+    mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+    mov(Operand::StaticVariable(pending_exception), eax);
+  }
+
+  // Discard the context saved in the handler and clear the context pointer.
+  pop(edx);
+  Set(esi, Immediate(0));
+
+  // Restore fp from handler and discard handler state.
   pop(ebp);
+  pop(edx);  // State.
 
-  JumpToHandlerEntry();
+  ret(0);
 }
 
 
@@ -918,7 +696,7 @@
 
   // When generating debug code, make sure the lexical context is set.
   if (emit_debug_code()) {
-    cmp(scratch, Immediate(0));
+    cmp(Operand(scratch), Immediate(0));
     Check(not_equal, "we should not have an empty lexical context");
   }
   // Load the global context of the current context.
@@ -981,39 +759,40 @@
 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
   // Xor original key with a seed.
   if (Serializer::enabled()) {
-    ExternalReference roots_array_start =
-        ExternalReference::roots_array_start(isolate());
+    ExternalReference roots_address =
+        ExternalReference::roots_address(isolate());
     mov(scratch, Immediate(Heap::kHashSeedRootIndex));
-    mov(scratch,
-        Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
+    mov(scratch, Operand::StaticArray(scratch,
+                                      times_pointer_size,
+                                      roots_address));
     SmiUntag(scratch);
-    xor_(r0, scratch);
+    xor_(r0, Operand(scratch));
   } else {
     int32_t seed = isolate()->heap()->HashSeed();
-    xor_(r0, Immediate(seed));
+    xor_(r0, seed);
   }
 
   // hash = ~hash + (hash << 15);
   mov(scratch, r0);
   not_(r0);
   shl(scratch, 15);
-  add(r0, scratch);
+  add(r0, Operand(scratch));
   // hash = hash ^ (hash >> 12);
   mov(scratch, r0);
   shr(scratch, 12);
-  xor_(r0, scratch);
+  xor_(r0, Operand(scratch));
   // hash = hash + (hash << 2);
   lea(r0, Operand(r0, r0, times_4, 0));
   // hash = hash ^ (hash >> 4);
   mov(scratch, r0);
   shr(scratch, 4);
-  xor_(r0, scratch);
+  xor_(r0, Operand(scratch));
   // hash = hash * 2057;
   imul(r0, r0, 2057);
   // hash = hash ^ (hash >> 16);
   mov(scratch, r0);
   shr(scratch, 16);
-  xor_(r0, scratch);
+  xor_(r0, Operand(scratch));
 }
 
 
@@ -1057,9 +836,9 @@
     mov(r2, r0);
     // Compute the masked index: (hash + i + i * i) & mask.
     if (i > 0) {
-      add(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
+      add(Operand(r2), Immediate(SeededNumberDictionary::GetProbeOffset(i)));
     }
-    and_(r2, r1);
+    and_(r2, Operand(r1));
 
     // Scale the index by multiplying by the entry size.
     ASSERT(SeededNumberDictionary::kEntrySize == 3);
@@ -1115,7 +894,7 @@
   if (scratch.is(no_reg)) {
     mov(result, Operand::StaticVariable(new_space_allocation_top));
   } else {
-    mov(scratch, Immediate(new_space_allocation_top));
+    mov(Operand(scratch), Immediate(new_space_allocation_top));
     mov(result, Operand(scratch, 0));
   }
 }
@@ -1174,7 +953,7 @@
   if (!top_reg.is(result)) {
     mov(top_reg, result);
   }
-  add(top_reg, Immediate(object_size));
+  add(Operand(top_reg), Immediate(object_size));
   j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1185,12 +964,12 @@
   // Tag result if requested.
   if (top_reg.is(result)) {
     if ((flags & TAG_OBJECT) != 0) {
-      sub(result, Immediate(object_size - kHeapObjectTag));
+      sub(Operand(result), Immediate(object_size - kHeapObjectTag));
     } else {
-      sub(result, Immediate(object_size));
+      sub(Operand(result), Immediate(object_size));
     }
   } else if ((flags & TAG_OBJECT) != 0) {
-    add(result, Immediate(kHeapObjectTag));
+    add(Operand(result), Immediate(kHeapObjectTag));
   }
 }
 
@@ -1228,7 +1007,7 @@
   // We assume that element_count*element_size + header_size does not
   // overflow.
   lea(result_end, Operand(element_count, element_size, header_size));
-  add(result_end, result);
+  add(result_end, Operand(result));
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1273,7 +1052,7 @@
   if (!object_size.is(result_end)) {
     mov(result_end, object_size);
   }
-  add(result_end, result);
+  add(result_end, Operand(result));
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1293,7 +1072,7 @@
       ExternalReference::new_space_allocation_top_address(isolate());
 
   // Make sure the object has no tag before resetting top.
-  and_(object, Immediate(~kHeapObjectTagMask));
+  and_(Operand(object), Immediate(~kHeapObjectTagMask));
 #ifdef DEBUG
   cmp(object, Operand::StaticVariable(new_space_allocation_top));
   Check(below, "Undo allocation of non allocated memory");
@@ -1332,7 +1111,7 @@
   ASSERT(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
-  and_(scratch1, Immediate(~kObjectAlignmentMask));
+  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
 
   // Allocate two byte string in new space.
   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -1366,10 +1145,10 @@
   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   mov(scratch1, length);
   ASSERT(kCharSize == 1);
-  add(scratch1, Immediate(kObjectAlignmentMask));
-  and_(scratch1, Immediate(~kObjectAlignmentMask));
+  add(Operand(scratch1), Immediate(kObjectAlignmentMask));
+  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
 
-  // Allocate ASCII string in new space.
+  // Allocate ascii string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
                      times_1,
                      scratch1,
@@ -1397,7 +1176,7 @@
                                          Label* gc_required) {
   ASSERT(length > 0);
 
-  // Allocate ASCII string in new space.
+  // Allocate ascii string in new space.
   AllocateInNewSpace(SeqAsciiString::SizeFor(length),
                      result,
                      scratch1,
@@ -1501,7 +1280,7 @@
                                Register scratch) {
   Label loop, done, short_string, short_loop;
   // Experimentation shows that the short string loop is faster if length < 10.
-  cmp(length, Immediate(10));
+  cmp(Operand(length), Immediate(10));
   j(less_equal, &short_string);
 
   ASSERT(source.is(esi));
@@ -1516,12 +1295,12 @@
   mov(scratch, ecx);
   shr(ecx, 2);
   rep_movs();
-  and_(scratch, Immediate(0x3));
-  add(destination, scratch);
+  and_(Operand(scratch), Immediate(0x3));
+  add(destination, Operand(scratch));
   jmp(&done);
 
   bind(&short_string);
-  test(length, length);
+  test(length, Operand(length));
   j(zero, &done);
 
   bind(&short_loop);
@@ -1536,40 +1315,13 @@
 }
 
 
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
-                                                Register end_offset,
-                                                Register filler) {
-  Label loop, entry;
-  jmp(&entry);
-  bind(&loop);
-  mov(Operand(start_offset, 0), filler);
-  add(start_offset, Immediate(kPointerSize));
-  bind(&entry);
-  cmp(start_offset, end_offset);
-  j(less, &loop);
-}
-
-
-void MacroAssembler::BooleanBitTest(Register object,
-                                    int field_offset,
-                                    int bit_index) {
-  bit_index += kSmiTagSize + kSmiShiftSize;
-  ASSERT(IsPowerOf2(kBitsPerByte));
-  int byte_index = bit_index / kBitsPerByte;
-  int byte_bit_index = bit_index & (kBitsPerByte - 1);
-  test_b(FieldOperand(object, field_offset + byte_index),
-         static_cast<byte>(1 << byte_bit_index));
-}
-
-
-
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
   Label ok;
-  test(result, result);
+  test(result, Operand(result));
   j(not_zero, &ok);
-  test(op, op);
+  test(op, Operand(op));
   j(sign, then_label);
   bind(&ok);
 }
@@ -1581,10 +1333,10 @@
                                       Register scratch,
                                       Label* then_label) {
   Label ok;
-  test(result, result);
+  test(result, Operand(result));
   j(not_zero, &ok);
-  mov(scratch, op1);
-  or_(scratch, op2);
+  mov(scratch, Operand(op1));
+  or_(scratch, Operand(op2));
   j(sign, then_label);
   bind(&ok);
 }
@@ -1593,8 +1345,7 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss,
-                                             bool miss_on_bound_function) {
+                                             Label* miss) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -1602,15 +1353,6 @@
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   j(not_equal, miss);
 
-  if (miss_on_bound_function) {
-    // If a bound function, go to miss label.
-    mov(scratch,
-        FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
-    BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
-                   SharedFunctionInfo::kBoundFunction);
-    j(not_zero, miss);
-  }
-
   // Make sure that the function has an instance prototype.
   Label non_instance;
   movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
@@ -1624,7 +1366,7 @@
   // If the prototype or initial map is the hole, don't return it and
   // simply miss the cache instead. This will allow us to allocate a
   // prototype object on-demand in the runtime system.
-  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
+  cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
@@ -1647,32 +1389,48 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
+  return result;
+}
+
+
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
   jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  jmp(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET);
+  return result;
+}
+
+
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
 
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
-  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
-  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
-    add(esp, Immediate(num_arguments * kPointerSize));
+    add(Operand(esp), Immediate(num_arguments * kPointerSize));
   }
   mov(eax, Immediate(isolate()->factory()->undefined_value()));
 }
@@ -1706,11 +1464,18 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(eax, Immediate(function->nargs));
   mov(ebx, Immediate(ExternalReference(function, isolate())));
-  CEntryStub ces(1, kSaveFPRegs);
+  CEntryStub ces(1);
+  ces.SaveDoubles();
   CallStub(&ces);
 }
 
 
+MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
+                                            int num_arguments) {
+  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -1732,6 +1497,26 @@
 }
 
 
+MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
+                                            int num_arguments) {
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    // Since we did not call the stub, there was no allocation failure.
+    // Return some non-failure object.
+    return isolate()->heap()->undefined_value();
+  }
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  Set(eax, Immediate(num_arguments));
+  mov(ebx, Immediate(ExternalReference(f, isolate())));
+  CEntryStub ces(1);
+  return TryCallStub(&ces);
+}
+
+
 void MacroAssembler::CallExternalReference(ExternalReference ref,
                                            int num_arguments) {
   mov(eax, Immediate(num_arguments));
@@ -1754,6 +1539,17 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  Set(eax, Immediate(num_arguments));
+  return TryJumpToExternalReference(ext);
+}
+
+
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -1763,6 +1559,14 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
+                                                int num_arguments,
+                                                int result_size) {
+  return TryTailCallExternalReference(
+      ExternalReference(fid, isolate()), num_arguments, result_size);
+}
+
+
 // If true, a Handle<T> returned by value from a function with cdecl calling
 // convention will be returned directly as a value of location_ field in a
 // register eax.
@@ -1811,8 +1615,8 @@
 }
 
 
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
-                                              int stack_space) {
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function,
+                                                         int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   ExternalReference limit_address =
@@ -1825,8 +1629,8 @@
   mov(edi, Operand::StaticVariable(limit_address));
   add(Operand::StaticVariable(level_address), Immediate(1));
 
-  // Call the api function.
-  call(function_address, RelocInfo::RUNTIME_ENTRY);
+  // Call the api function!
+  call(function->address(), RelocInfo::RUNTIME_ENTRY);
 
   if (!kReturnHandlesDirectly) {
     // PrepareCallApiFunction saved pointer to the output slot into
@@ -1841,7 +1645,7 @@
   Label leave_exit_frame;
 
   // Check if the result handle holds 0.
-  test(eax, eax);
+  test(eax, Operand(eax));
   j(zero, &empty_handle);
   // It was non-zero.  Dereference to get the result value.
   mov(eax, Operand(eax, 0));
@@ -1864,8 +1668,11 @@
   LeaveApiExitFrame();
   ret(stack_space * kPointerSize);
   bind(&promote_scheduled_exception);
-  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
-
+  MaybeObject* result =
+      TryTailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
   bind(&empty_handle);
   // It was zero; the result is undefined.
   mov(eax, isolate()->factory()->undefined_value());
@@ -1879,9 +1686,11 @@
   mov(edi, eax);
   mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
   mov(eax, Immediate(delete_extensions));
-  call(eax);
+  call(Operand(eax));
   mov(eax, edi);
   jmp(&leave_exit_frame);
+
+  return result;
 }
 
 
@@ -1893,6 +1702,15 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& ext) {
+  // Set the entry point and jump to the C entry runtime stub.
+  mov(ebx, Immediate(ext));
+  CEntryStub ces(1);
+  return TryTailCallStub(&ces);
+}
+
+
 void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
   // This macro takes the dst register to make the code more readable
   // at the call sites. However, the dst register has to be ecx to
@@ -1902,10 +1720,10 @@
   if (call_kind == CALL_AS_FUNCTION) {
     // Set to some non-zero smi by updating the least significant
     // byte.
-    mov_b(dst, 1 << kSmiTagSize);
+    mov_b(Operand(dst), 1 << kSmiTagSize);
   } else {
     // Set to smi zero by clearing the register.
-    xor_(dst, dst);
+    xor_(dst, Operand(dst));
   }
 }
 
@@ -1915,13 +1733,11 @@
                                     Handle<Code> code_constant,
                                     const Operand& code_operand,
                                     Label* done,
-                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     Label::Distance done_near,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
-  *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
     ASSERT(actual.is_immediate());
@@ -1937,7 +1753,6 @@
         // arguments.
         definitely_matches = true;
       } else {
-        *definitely_mismatches = true;
         mov(ebx, expected.immediate());
       }
     }
@@ -1953,7 +1768,7 @@
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
-      cmp(expected.reg(), actual.reg());
+      cmp(expected.reg(), Operand(actual.reg()));
       j(equal, &invoke);
       ASSERT(actual.reg().is(eax));
       ASSERT(expected.reg().is(ebx));
@@ -1965,7 +1780,7 @@
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (!code_constant.is_null()) {
       mov(edx, Immediate(code_constant));
-      add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
     } else if (!code_operand.is_reg(edx)) {
       mov(edx, code_operand);
     }
@@ -1975,9 +1790,7 @@
       SetCallKind(ecx, call_kind);
       call(adaptor, RelocInfo::CODE_TARGET);
       call_wrapper.AfterCall();
-      if (!*definitely_mismatches) {
-        jmp(done, done_near);
-      }
+      jmp(done, done_near);
     } else {
       SetCallKind(ecx, call_kind);
       jmp(adaptor, RelocInfo::CODE_TARGET);
@@ -1993,27 +1806,21 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
-  bool definitely_mismatches = false;
   InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, &definitely_mismatches, flag, Label::kNear,
-                 call_wrapper, call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      call_wrapper.BeforeCall(CallSize(code));
-      SetCallKind(ecx, call_kind);
-      call(code);
-      call_wrapper.AfterCall();
-    } else {
-      ASSERT(flag == JUMP_FUNCTION);
-      SetCallKind(ecx, call_kind);
-      jmp(code);
-    }
-    bind(&done);
+                 &done, flag, Label::kNear, call_wrapper,
+                 call_kind);
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(ecx, call_kind);
+    call(code);
+    call_wrapper.AfterCall();
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(ecx, call_kind);
+    jmp(code);
   }
+  bind(&done);
 }
 
 
@@ -2024,27 +1831,21 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
-  Operand dummy(eax, 0);
-  bool definitely_mismatches = false;
-  InvokePrologue(expected, actual, code, dummy, &done, &definitely_mismatches,
-                 flag, Label::kNear, call_wrapper, call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      call_wrapper.BeforeCall(CallSize(code, rmode));
-      SetCallKind(ecx, call_kind);
-      call(code, rmode);
-      call_wrapper.AfterCall();
-    } else {
-      ASSERT(flag == JUMP_FUNCTION);
-      SetCallKind(ecx, call_kind);
-      jmp(code, rmode);
-    }
-    bind(&done);
+  Operand dummy(eax);
+  InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
+                 call_wrapper, call_kind);
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code, rmode));
+    SetCallKind(ecx, call_kind);
+    call(code, rmode);
+    call_wrapper.AfterCall();
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(ecx, call_kind);
+    jmp(code, rmode);
   }
+  bind(&done);
 }
 
 
@@ -2053,9 +1854,6 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -2068,32 +1866,36 @@
 }
 
 
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
+  ASSERT(function->is_compiled());
   // Get the function and setup the context.
-  LoadHeapObject(edi, function);
+  mov(edi, Immediate(Handle<JSFunction>(function)));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   ParameterCount expected(function->shared()->formal_parameter_count());
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-             expected, actual, flag, call_wrapper, call_kind);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+               expected, actual, flag, call_wrapper, call_kind);
+  } else {
+    Handle<Code> code(function->code());
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET,
+               flag, call_wrapper, call_kind);
+  }
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  // Calls are not allowed in some stubs.
+  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -2104,7 +1906,6 @@
              expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
-
 void MacroAssembler::GetBuiltinFunction(Register target,
                                         Builtins::JavaScript id) {
   // Load the JavaScript builtin function from the builtins object.
@@ -2114,7 +1915,6 @@
                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
 }
 
-
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
   ASSERT(!target.is(edi));
   // Load the JavaScript builtin function from the builtins object.
@@ -2150,46 +1950,6 @@
 }
 
 
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  // Load the global or builtins object from the current context.
-  mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
-
-  // Check that the function's map is the same as the expected cached map.
-  int expected_index =
-      Context::GetContextMapIndexFromElementsKind(expected_kind);
-  cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
-  j(not_equal, no_map_match);
-
-  // Use the transitioned cached map.
-  int trans_index =
-      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
-  mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
-    Register function_in, Register scratch, Register map_out) {
-  ASSERT(!function_in.is(map_out));
-  Label done;
-  mov(map_out, FieldOperand(function_in,
-                            JSFunction::kPrototypeOrInitialMapOffset));
-  if (!FLAG_smi_only_arrays) {
-    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                        FAST_ELEMENTS,
-                                        map_out,
-                                        scratch,
-                                        &done);
-  }
-  bind(&done);
-}
-
-
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the global or builtins object from the current context.
   mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -2246,29 +2006,6 @@
 }
 
 
-void MacroAssembler::LoadHeapObject(Register result,
-                                    Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    mov(result, Operand::Cell(cell));
-  } else {
-    mov(result, object);
-  }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    push(Operand::Cell(cell));
-  } else {
-    Push(object);
-  }
-}
-
-
 void MacroAssembler::Ret() {
   ret(0);
 }
@@ -2279,7 +2016,7 @@
     ret(bytes_dropped);
   } else {
     pop(scratch);
-    add(esp, Immediate(bytes_dropped));
+    add(Operand(esp), Immediate(bytes_dropped));
     push(scratch);
     ret(0);
   }
@@ -2288,7 +2025,7 @@
 
 void MacroAssembler::Drop(int stack_elements) {
   if (stack_elements > 0) {
-    add(esp, Immediate(stack_elements * kPointerSize));
+    add(Operand(esp), Immediate(stack_elements * kPointerSize));
   }
 }
 
@@ -2300,6 +2037,11 @@
 }
 
 
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+  mov(dst, value);
+}
+
+
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
@@ -2426,19 +2168,13 @@
     RecordComment(msg);
   }
 #endif
+  // Disable stub call restrictions to always allow calls to abort.
+  AllowStubCallsScope allow_scope(this, true);
 
   push(eax);
   push(Immediate(p0));
   push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
-  // Disable stub call restrictions to always allow calls to abort.
-  if (!has_frame_) {
-    // We don't actually want to generate a pile of code for this, so just
-    // claim there is a stack frame, without generating one.
-    FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 2);
-  } else {
-    CallRuntime(Runtime::kAbort, 2);
-  }
+  CallRuntime(Runtime::kAbort, 2);
   // will not return here
   int3();
 }
@@ -2461,7 +2197,7 @@
   ASSERT(is_uintn(power + HeapNumber::kExponentBias,
                   HeapNumber::kExponentBits));
   mov(scratch, Immediate(power + HeapNumber::kExponentBias));
-  movd(dst, scratch);
+  movd(dst, Operand(scratch));
   psllq(dst, HeapNumber::kMantissaBits);
 }
 
@@ -2487,8 +2223,8 @@
                                                          Label* failure) {
   // Check that both objects are not smis.
   STATIC_ASSERT(kSmiTag == 0);
-  mov(scratch1, object1);
-  and_(scratch1, object2);
+  mov(scratch1, Operand(object1));
+  and_(scratch1, Operand(object2));
   JumpIfSmi(scratch1, failure);
 
   // Load instance type for both strings.
@@ -2497,7 +2233,7 @@
   movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ASCII strings.
+  // Check that both are flat ascii strings.
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
   const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
@@ -2517,12 +2253,12 @@
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
-    sub(esp, Immediate((num_arguments + 1) * kPointerSize));
+    sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
     ASSERT(IsPowerOf2(frame_alignment));
     and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
-    sub(esp, Immediate(num_arguments * kPointerSize));
+    sub(Operand(esp), Immediate(num_arguments * kPointerSize));
   }
 }
 
@@ -2530,39 +2266,27 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
   // Trashing eax is ok as it will be the return value.
-  mov(eax, Immediate(function));
+  mov(Operand(eax), Immediate(function));
   CallCFunction(eax, num_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
-  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
-  call(function);
+  call(Operand(function));
   if (OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
-    add(esp, Immediate(num_arguments * kPointerSize));
+    add(Operand(esp), Immediate(num_arguments * kPointerSize));
   }
 }
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
-}
-
-
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -2584,238 +2308,6 @@
 }
 
 
-void MacroAssembler::CheckPageFlag(
-    Register object,
-    Register scratch,
-    int mask,
-    Condition cc,
-    Label* condition_met,
-    Label::Distance condition_met_distance) {
-  ASSERT(cc == zero || cc == not_zero);
-  if (scratch.is(object)) {
-    and_(scratch, Immediate(~Page::kPageAlignmentMask));
-  } else {
-    mov(scratch, Immediate(~Page::kPageAlignmentMask));
-    and_(scratch, object);
-  }
-  if (mask < (1 << kBitsPerByte)) {
-    test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
-           static_cast<uint8_t>(mask));
-  } else {
-    test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
-  }
-  j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
-                                 Register scratch0,
-                                 Register scratch1,
-                                 Label* on_black,
-                                 Label::Distance on_black_near) {
-  HasColor(object, scratch0, scratch1,
-           on_black, on_black_near,
-           1, 0);  // kBlackBitPattern.
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
-                              Register bitmap_scratch,
-                              Register mask_scratch,
-                              Label* has_color,
-                              Label::Distance has_color_distance,
-                              int first_bit,
-                              int second_bit) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
-
-  GetMarkBits(object, bitmap_scratch, mask_scratch);
-
-  Label other_color, word_boundary;
-  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
-  add(mask_scratch, mask_scratch);  // Shift left 1 by adding.
-  j(zero, &word_boundary, Label::kNear);
-  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
-  jmp(&other_color, Label::kNear);
-
-  bind(&word_boundary);
-  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
-
-  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
-  bind(&other_color);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
-                                 Register bitmap_reg,
-                                 Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
-  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
-  and_(bitmap_reg, addr_reg);
-  mov(ecx, addr_reg);
-  int shift =
-      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
-  shr(ecx, shift);
-  and_(ecx,
-       (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
-
-  add(bitmap_reg, ecx);
-  mov(ecx, addr_reg);
-  shr(ecx, kPointerSizeLog2);
-  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
-  mov(mask_reg, Immediate(1));
-  shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
-    Register value,
-    Register bitmap_scratch,
-    Register mask_scratch,
-    Label* value_is_white_and_not_data,
-    Label::Distance distance) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
-  GetMarkBits(value, bitmap_scratch, mask_scratch);
-
-  // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  Label done;
-
-  // Since both black and grey have a 1 in the first position and white does
-  // not have a 1 there we only need to check one bit.
-  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  j(not_zero, &done, Label::kNear);
-
-  if (FLAG_debug_code) {
-    // Check for impossible bit pattern.
-    Label ok;
-    push(mask_scratch);
-    // shl.  May overflow making the check conservative.
-    add(mask_scratch, mask_scratch);
-    test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
-    j(zero, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-    pop(mask_scratch);
-  }
-
-  // Value is white.  We check whether it is data that doesn't need scanning.
-  // Currently only checks for HeapNumber and non-cons strings.
-  Register map = ecx;  // Holds map while checking type.
-  Register length = ecx;  // Holds length of object after checking type.
-  Label not_heap_number;
-  Label is_data_object;
-
-  // Check for heap-number
-  mov(map, FieldOperand(value, HeapObject::kMapOffset));
-  cmp(map, FACTORY->heap_number_map());
-  j(not_equal, &not_heap_number, Label::kNear);
-  mov(length, Immediate(HeapNumber::kSize));
-  jmp(&is_data_object, Label::kNear);
-
-  bind(&not_heap_number);
-  // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  Register instance_type = ecx;
-  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
-  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
-  j(not_zero, value_is_white_and_not_data);
-  // It's a non-indirect (non-cons and non-slice) string.
-  // If it's external, the length is just ExternalString::kSize.
-  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
-  Label not_external;
-  // External strings are the only ones with the kExternalStringTag bit
-  // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
-  test_b(instance_type, kExternalStringTag);
-  j(zero, &not_external, Label::kNear);
-  mov(length, Immediate(ExternalString::kSize));
-  jmp(&is_data_object, Label::kNear);
-
-  bind(&not_external);
-  // Sequential string, either ASCII or UC16.
-  ASSERT(kAsciiStringTag == 0x04);
-  and_(length, Immediate(kStringEncodingMask));
-  xor_(length, Immediate(kStringEncodingMask));
-  add(length, Immediate(0x04));
-  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
-  // by 2. If we multiply the string length as smi by this, it still
-  // won't overflow a 32-bit value.
-  ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
-  ASSERT(SeqAsciiString::kMaxSize <=
-         static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
-  imul(length, FieldOperand(value, String::kLengthOffset));
-  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
-  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
-  and_(length, Immediate(~kObjectAlignmentMask));
-
-  bind(&is_data_object);
-  // Value is a data object, and it is white.  Mark it black.  Since we know
-  // that the object is white we can make it black by flipping one bit.
-  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
-  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
-  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
-      length);
-  if (FLAG_debug_code) {
-    mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-    cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
-    Check(less_equal, "Live Bytes Count overflow chunk size");
-  }
-
-  bind(&done);
-}
-
-
-void MacroAssembler::CheckEnumCache(Label* call_runtime) {
-  Label next;
-  mov(ecx, eax);
-  bind(&next);
-
-  // Check that there are no elements.  Register ecx contains the
-  // current JS object we've reached through the prototype chain.
-  cmp(FieldOperand(ecx, JSObject::kElementsOffset),
-      isolate()->factory()->empty_fixed_array());
-  j(not_equal, call_runtime);
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in ebx for the subsequent
-  // prototype load.
-  mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
-  mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOrBitField3Offset));
-  JumpIfSmi(edx, call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (edx).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
-  JumpIfSmi(edx, call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  cmp(ecx, eax);
-  j(equal, &check_prototype, Label::kNear);
-  mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  cmp(edx, isolate()->factory()->empty_fixed_array());
-  j(not_equal, call_runtime);
-
-  // Load the prototype from the map and loop if non-null.
-  bind(&check_prototype);
-  mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
-  cmp(ecx, isolate()->factory()->null_value());
-  j(not_equal, &next);
-}
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 66d1ce7..8c5f5e9 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,6 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "assembler.h"
-#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -51,13 +50,6 @@
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
 
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -69,130 +61,42 @@
 
   // ---------------------------------------------------------------------------
   // GC Support
-  enum RememberedSetFinalAction {
-    kReturnAtEnd,
-    kFallThroughAtEnd
-  };
 
-  // Record in the remembered set the fact that we have a pointer to new space
-  // at the address pointed to by the addr register.  Only works if addr is not
-  // in new space.
-  void RememberedSetHelper(Register object,  // Used for debug code.
-                           Register addr,
-                           Register scratch,
-                           SaveFPRegsMode save_fp,
-                           RememberedSetFinalAction and_then);
+  // For page containing |object| mark region covering |addr| dirty.
+  // RecordWriteHelper only works if the object is not in new
+  // space.
+  void RecordWriteHelper(Register object,
+                         Register addr,
+                         Register scratch);
 
-  void CheckPageFlag(Register object,
-                     Register scratch,
-                     int mask,
-                     Condition cc,
-                     Label* condition_met,
-                     Label::Distance condition_met_distance = Label::kFar);
+  // Check if object is in new space.
+  // scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,  // equal for new space, not_equal otherwise.
+                  Label* branch,
+                  Label::Distance branch_near = Label::kFar);
 
-  // Check if object is in new space.  Jumps if the object is not in new space.
-  // The register scratch can be object itself, but scratch will be clobbered.
-  void JumpIfNotInNewSpace(Register object,
-                           Register scratch,
-                           Label* branch,
-                           Label::Distance distance = Label::kFar) {
-    InNewSpace(object, scratch, zero, branch, distance);
-  }
-
-  // Check if object is in new space.  Jumps if the object is in new space.
-  // The register scratch can be object itself, but it will be clobbered.
-  void JumpIfInNewSpace(Register object,
-                        Register scratch,
-                        Label* branch,
-                        Label::Distance distance = Label::kFar) {
-    InNewSpace(object, scratch, not_zero, branch, distance);
-  }
-
-  // Check if an object has a given incremental marking color.  Also uses ecx!
-  void HasColor(Register object,
-                Register scratch0,
-                Register scratch1,
-                Label* has_color,
-                Label::Distance has_color_distance,
-                int first_bit,
-                int second_bit);
-
-  void JumpIfBlack(Register object,
-                   Register scratch0,
-                   Register scratch1,
-                   Label* on_black,
-                   Label::Distance on_black_distance = Label::kFar);
-
-  // Checks the color of an object.  If the object is already grey or black
-  // then we just fall through, since it is already live.  If it is white and
-  // we can determine that it doesn't need to be scanned, then we just mark it
-  // black and fall through.  For the rest we jump to the label so the
-  // incremental marker can fix its assumptions.
-  void EnsureNotWhite(Register object,
-                      Register scratch1,
-                      Register scratch2,
-                      Label* object_is_white_and_not_data,
-                      Label::Distance distance);
-
-  // Notify the garbage collector that we wrote a pointer into an object.
-  // |object| is the object being stored into, |value| is the object being
-  // stored.  value and scratch registers are clobbered by the operation.
-  // The offset is the offset from the start of the object, not the offset from
-  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
-  void RecordWriteField(
-      Register object,
-      int offset,
-      Register value,
-      Register scratch,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
-
-  // As above, but the offset has the tag presubtracted.  For use with
-  // Operand(reg, off).
-  void RecordWriteContextSlot(
-      Register context,
-      int offset,
-      Register value,
-      Register scratch,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK) {
-    RecordWriteField(context,
-                     offset + kHeapObjectTag,
-                     value,
-                     scratch,
-                     save_fp,
-                     remembered_set_action,
-                     smi_check);
-  }
-
-  // Notify the garbage collector that we wrote a pointer into a fixed array.
-  // |array| is the array being stored into, |value| is the
-  // object being stored.  |index| is the array index represented as a
-  // Smi. All registers are clobbered by the operation RecordWriteArray
+  // For page containing |object| mark region covering [object+offset]
+  // dirty. |object| is the object being stored into, |value| is the
+  // object being stored. If offset is zero, then the scratch register
+  // contains the array index into the elements array represented as a
+  // Smi. All registers are clobbered by the operation. RecordWrite
   // filters out smis so it does not update the write barrier if the
   // value is a smi.
-  void RecordWriteArray(
-      Register array,
-      Register value,
-      Register index,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
+  void RecordWrite(Register object,
+                   int offset,
+                   Register value,
+                   Register scratch);
 
   // For page containing |object| mark region covering |address|
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. The address and value registers are clobbered by the
+  // object being stored. All registers are clobbered by the
   // operation. RecordWrite filters out smis so it does not update the
   // write barrier if the value is a smi.
-  void RecordWrite(
-      Register object,
-      Register address,
-      Register value,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
+  void RecordWrite(Register object,
+                   Register address,
+                   Register value);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -201,6 +105,15 @@
   void DebugBreak();
 #endif
 
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
   // Enter specific kind of exit frame. Expects the number of
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
@@ -221,22 +134,6 @@
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the global context if the map in register
-  // map_in_out is the cached Array map in the global context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
-  // Load the initial map for new Arrays from a JSFunction.
-  void LoadInitialArrayMap(Register function_in,
-                           Register scratch,
-                           Register map_out);
-
   // Load the global function with the given index.
   void LoadGlobalFunction(int index, Register function);
 
@@ -253,35 +150,15 @@
   void StoreToSafepointRegisterSlot(Register dst, Immediate src);
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
-  void PushHeapObject(Handle<HeapObject> object);
-
-  void LoadObject(Register result, Handle<Object> object) {
-    if (object->IsHeapObject()) {
-      LoadHeapObject(result, Handle<HeapObject>::cast(object));
-    } else {
-      Set(result, Immediate(object));
-    }
-  }
-
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
-  // Set up call kind marking in ecx. The method takes ecx as an
+  // Setup call kind marking in ecx. The method takes ecx as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
 
   // Invoke the JavaScript function code by either calling or jumping.
-  void InvokeCode(Register code,
-                  const ParameterCount& expected,
-                  const ParameterCount& actual,
-                  InvokeFlag flag,
-                  const CallWrapper& call_wrapper,
-                  CallKind call_kind) {
-    InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
-  }
-
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
@@ -305,7 +182,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(Handle<JSFunction> function,
+  void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -332,9 +209,8 @@
   void SafeSet(Register dst, const Immediate& x);
   void SafePush(const Immediate& x);
 
-  // Compare against a known root, e.g. undefined, null, true, ...
+  // Compare a register against a known root, e.g. undefined, null, true, ...
   void CompareRoot(Register with, Heap::RootListIndex index);
-  void CompareRoot(const Operand& with, Heap::RootListIndex index);
 
   // Compare object type for heap object.
   // Incoming register is heap_object and outgoing register is map.
@@ -349,47 +225,13 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Label* fail,
-                               Label::Distance distance = Label::kFar);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiOnlyElements(Register map,
-                                Label* fail,
-                                Label::Distance distance = Label::kFar);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements, otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register maybe_number,
-                                   Register elements,
-                                   Register key,
-                                   Register scratch1,
-                                   XMMRegister scratch2,
-                                   Label* fail,
-                                   bool specialize_for_processor);
-
-  // Compare an object's map with the specified map and its transitioned
-  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
-  // result of map compare. If multiple map compares are required, the compare
-  // sequences branches to early_success.
-  void CompareMap(Register obj,
-                  Handle<Map> map,
-                  Label* early_success,
-                  CompareMapMode mode = REQUIRE_EXACT_MAP);
-
   // Check if the map of an object is equal to a specified map and branch to
   // label if not. Skip the smi check if not required (object is known to be a
-  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
-  // against maps that are ElementsKind transition maps of the specified map.
+  // heap object)
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type,
-                CompareMapMode mode = REQUIRE_EXACT_MAP);
+                SmiCheckType smi_check_type);
 
   // Check if the map of an object is equal to a specified map and branch to a
   // specified target if equal. Skip the smi check if not required (object is
@@ -435,7 +277,7 @@
   void SmiTag(Register reg) {
     STATIC_ASSERT(kSmiTag == 0);
     STATIC_ASSERT(kSmiTagSize == 1);
-    add(reg, reg);
+    add(reg, Operand(reg));
   }
   void SmiUntag(Register reg) {
     sar(reg, kSmiTagSize);
@@ -490,17 +332,17 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link it into try handler chain.
-  void PushTryHandler(StackHandler::Kind kind, int handler_index);
+  // Push a new try handler and link into try handler chain.  The return
+  // address must be pushed before calling this helper.
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   void PopTryHandler();
 
-  // Throw to the top handler in the try hander chain.
+  // Activate the top handler in the try hander chain.
   void Throw(Register value);
 
-  // Throw past all JS frames to the top JS entry frame.
-  void ThrowUncatchable(Register value);
+  void ThrowUncatchable(UncatchableExceptionType type, Register value);
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -624,19 +466,9 @@
                  Register length,
                  Register scratch);
 
-  // Initialize fields with filler values.  Fields starting at |start_offset|
-  // not including end_offset are overwritten with the value in |filler|.  At
-  // the end the loop, |start_offset| takes the value of |end_offset|.
-  void InitializeFieldsWithFiller(Register start_offset,
-                                  Register end_offset,
-                                  Register filler);
-
   // ---------------------------------------------------------------------------
   // Support functions.
 
-  // Check a boolean-bit of a Smi field.
-  void BooleanBitTest(Register object, int field_offset, int bit_index);
-
   // Check if result is zero and op is negative.
   void NegativeZeroTest(Register result, Register op, Label* then_label);
 
@@ -653,8 +485,7 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss,
-                               bool miss_on_bound_function = false);
+                               Label* miss);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -672,9 +503,19 @@
   // Call a code stub.  Generate the code if necessary.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
+
   // Tail call a code stub (jump).  Generate the code if necessary.
   void TailCallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
+
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -682,9 +523,19 @@
   void CallRuntime(const Runtime::Function* f, int num_arguments);
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
+  // Call a runtime function, returning the CodeStub object called.
+  // Try to generate the stub code if necessary.  Do not perform a GC
+  // but instead return a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
+                                              int num_arguments);
+
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
+  // Convenience function: Same as above, but takes the fid instead.
+  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
+                                              int num_arguments);
+
   // Convenience function: call an external reference.
   void CallExternalReference(ExternalReference ref, int num_arguments);
 
@@ -695,11 +546,23 @@
                                  int num_arguments,
                                  int result_size);
 
+  // Tail call of a runtime routine (jump). Try to generate the code if
+  // necessary. Do not perform a GC but instead return a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
+  // Convenience function: tail call a runtime routine (jump). Try to generate
+  // the code if necessary. Do not perform a GC but instead return a retry after
+  // GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
+                                                  int num_arguments,
+                                                  int result_size);
+
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
   // etc., not pushed. The argument count assumes all arguments are word sized.
@@ -724,15 +587,19 @@
   // stores the pointer to the reserved slot into esi.
   void PrepareCallApiFunction(int argc);
 
-  // Calls an API function.  Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions.  Clobbers ebx, edi and
-  // caller-save registers.  Restores context.  On return removes
-  // stack_space * kPointerSize (GCed).
-  void CallApiFunctionAndReturn(Address function_address, int stack_space);
+  // Calls an API function. Allocates HandleScope, extracts
+  // returned value from handle and propagates exceptions.
+  // Clobbers ebx, edi and caller-save registers. Restores context.
+  // On return removes stack_space * kPointerSize (GCed).
+  MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
+                                           int stack_space);
 
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext);
 
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
+
+
   // ---------------------------------------------------------------------------
   // Utilities
 
@@ -757,8 +624,10 @@
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
+  void Move(Register target, Handle<Object> value);
+
   // Push a handle value.
-  void Push(Handle<Object> handle) { push(Immediate(handle)); }
+  void Push(Handle<Object> handle) { push(handle); }
 
   Handle<Object> CodeObject() {
     ASSERT(!code_object_.is_null());
@@ -799,14 +668,11 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
-  void set_has_frame(bool value) { has_frame_ = value; }
-  bool has_frame() { return has_frame_; }
-  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // String utilities.
 
-  // Check whether the instance type represents a flat ASCII string. Jump to the
+  // Check whether the instance type represents a flat ascii string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
   void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
@@ -825,18 +691,9 @@
     return SafepointRegisterStackIndex(reg.code());
   }
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
-  // Expects object in eax and returns map with validated enum cache
-  // in eax.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Label* call_runtime);
-
  private:
   bool generating_stub_;
   bool allow_stub_calls_;
-  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -846,12 +703,15 @@
                       Handle<Code> code_constant,
                       const Operand& code_operand,
                       Label* done,
-                      bool* definitely_mismatches,
                       InvokeFlag flag,
-                      Label::Distance done_distance,
+                      Label::Distance done_near = Label::kFar,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
   void EnterExitFramePrologue();
   void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
@@ -870,24 +730,6 @@
                                                     Register scratch,
                                                     bool gc_allowed);
 
-  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,
-                  Label* condition_met,
-                  Label::Distance condition_met_distance = Label::kFar);
-
-  // Helper for finding the mark bits for an address.  Afterwards, the
-  // bitmap register points at the word with the mark bits and the mask
-  // the position of the first bit.  Uses ecx as scratch and leaves addr_reg
-  // unchanged.
-  inline void GetMarkBits(Register addr_reg,
-                          Register bitmap_reg,
-                          Register mask_reg);
-
-  // Helper for throwing exceptions.  Compute a handler address and jump to
-  // it.  See the implementation for register usage.
-  void JumpToHandlerEntry();
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
@@ -923,26 +765,26 @@
 // Static helper functions.
 
 // Generate an Operand for loading a field from an object.
-inline Operand FieldOperand(Register object, int offset) {
+static inline Operand FieldOperand(Register object, int offset) {
   return Operand(object, offset - kHeapObjectTag);
 }
 
 
 // Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
-                            Register index,
-                            ScaleFactor scale,
-                            int offset) {
+static inline Operand FieldOperand(Register object,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   int offset) {
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
 
-inline Operand ContextOperand(Register context, int index) {
+static inline Operand ContextOperand(Register context, int index) {
   return Operand(context, Context::SlotOffset(index));
 }
 
 
-inline Operand GlobalObjectOperand() {
+static inline Operand GlobalObjectOperand() {
   return ContextOperand(esi, Context::GLOBAL_INDEX);
 }
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 04d6b62..d175d9e 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,7 +134,7 @@
 
 void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
   if (by != 0) {
-    __ add(edi, Immediate(by * char_size()));
+    __ add(Operand(edi), Immediate(by * char_size()));
   }
 }
 
@@ -152,8 +152,8 @@
   CheckPreemption();
   // Pop Code* offset from backtrack stack, add Code* and jump to location.
   Pop(ebx);
-  __ add(ebx, Immediate(masm_->CodeObject()));
-  __ jmp(ebx);
+  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+  __ jmp(Operand(ebx));
 }
 
 
@@ -210,7 +210,7 @@
                                                bool check_end_of_string) {
 #ifdef DEBUG
   // If input is ASCII, don't even bother calling here if the string to
-  // match contains a non-ASCII character.
+  // match contains a non-ascii character.
   if (mode_ == ASCII) {
     ASSERT(String::IsAscii(str.start(), str.length()));
   }
@@ -219,7 +219,7 @@
   int byte_offset = cp_offset * char_size();
   if (check_end_of_string) {
     // Check that there are at least str.length() characters left in the input.
-    __ cmp(edi, Immediate(-(byte_offset + byte_length)));
+    __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
     BranchOrBacktrack(greater, on_failure);
   }
 
@@ -288,7 +288,7 @@
   Label fallthrough;
   __ cmp(edi, Operand(backtrack_stackpointer(), 0));
   __ j(not_equal, &fallthrough);
-  __ add(backtrack_stackpointer(), Immediate(kPointerSize));  // Pop.
+  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));  // Pop.
   BranchOrBacktrack(no_condition, on_equal);
   __ bind(&fallthrough);
 }
@@ -300,7 +300,7 @@
   Label fallthrough;
   __ mov(edx, register_location(start_reg));  // Index of start of capture
   __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
-  __ sub(ebx, edx);  // Length of capture.
+  __ sub(ebx, Operand(edx));  // Length of capture.
 
   // The length of a capture should not be negative. This can only happen
   // if the end of the capture is unrecorded, or at a point earlier than
@@ -320,9 +320,9 @@
     __ push(backtrack_stackpointer());
     // After this, the eax, ecx, and edi registers are available.
 
-    __ add(edx, esi);  // Start of capture
-    __ add(edi, esi);  // Start of text to match against capture.
-    __ add(ebx, edi);  // End of text to match against capture.
+    __ add(edx, Operand(esi));  // Start of capture
+    __ add(edi, Operand(esi));  // Start of text to match against capture.
+    __ add(ebx, Operand(edi));  // End of text to match against capture.
 
     Label loop;
     __ bind(&loop);
@@ -339,15 +339,15 @@
     __ movzx_b(ecx, Operand(edx, 0));
     __ or_(ecx, 0x20);
 
-    __ cmp(eax, ecx);
+    __ cmp(eax, Operand(ecx));
     __ j(not_equal, &fail);
 
     __ bind(&loop_increment);
     // Increment pointers into match and capture strings.
-    __ add(edx, Immediate(1));
-    __ add(edi, Immediate(1));
+    __ add(Operand(edx), Immediate(1));
+    __ add(Operand(edi), Immediate(1));
     // Compare to end of match, and loop if not done.
-    __ cmp(edi, ebx);
+    __ cmp(edi, Operand(ebx));
     __ j(below, &loop);
     __ jmp(&success);
 
@@ -361,9 +361,9 @@
     // Restore original value before continuing.
     __ pop(backtrack_stackpointer());
     // Drop original value of character position.
-    __ add(esp, Immediate(kPointerSize));
+    __ add(Operand(esp), Immediate(kPointerSize));
     // Compute new value of character position after the matched part.
-    __ sub(edi, esi);
+    __ sub(edi, Operand(esi));
   } else {
     ASSERT(mode_ == UC16);
     // Save registers before calling C function.
@@ -389,19 +389,16 @@
     // Set byte_offset2.
     // Found by adding negative string-end offset of current position (edi)
     // to end of string.
-    __ add(edi, esi);
+    __ add(edi, Operand(esi));
     __ mov(Operand(esp, 1 * kPointerSize), edi);
     // Set byte_offset1.
     // Start of capture, where edx already holds string-end negative offset.
-    __ add(edx, esi);
+    __ add(edx, Operand(esi));
     __ mov(Operand(esp, 0 * kPointerSize), edx);
 
-    {
-      AllowExternalCallThatCantCauseGC scope(masm_);
-      ExternalReference compare =
-          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-      __ CallCFunction(compare, argument_count);
-    }
+    ExternalReference compare =
+        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+    __ CallCFunction(compare, argument_count);
     // Pop original values before reacting on result value.
     __ pop(ebx);
     __ pop(backtrack_stackpointer());
@@ -409,10 +406,10 @@
     __ pop(esi);
 
     // Check if function returned non-zero for success or zero for failure.
-    __ or_(eax, eax);
+    __ or_(eax, Operand(eax));
     BranchOrBacktrack(zero, on_no_match);
     // On success, increment position by length of capture.
-    __ add(edi, ebx);
+    __ add(edi, Operand(ebx));
   }
   __ bind(&fallthrough);
 }
@@ -428,7 +425,7 @@
   // Find length of back-referenced capture.
   __ mov(edx, register_location(start_reg));
   __ mov(eax, register_location(start_reg + 1));
-  __ sub(eax, edx);  // Length to check.
+  __ sub(eax, Operand(edx));  // Length to check.
   // Fail on partial or illegal capture (start of capture after end of capture).
   BranchOrBacktrack(less, on_no_match);
   // Succeed on empty capture (including no capture)
@@ -436,7 +433,7 @@
 
   // Check that there are sufficient characters left in the input.
   __ mov(ebx, edi);
-  __ add(ebx, eax);
+  __ add(ebx, Operand(eax));
   BranchOrBacktrack(greater, on_no_match);
 
   // Save register to make it available below.
@@ -444,7 +441,7 @@
 
   // Compute pointers to match string and capture string
   __ lea(ebx, Operand(esi, edi, times_1, 0));  // Start of match.
-  __ add(edx, esi);  // Start of capture.
+  __ add(edx, Operand(esi));  // Start of capture.
   __ lea(ecx, Operand(eax, ebx, times_1, 0));  // End of match
 
   Label loop;
@@ -459,10 +456,10 @@
   }
   __ j(not_equal, &fail);
   // Increment pointers into capture and match string.
-  __ add(edx, Immediate(char_size()));
-  __ add(ebx, Immediate(char_size()));
+  __ add(Operand(edx), Immediate(char_size()));
+  __ add(Operand(ebx), Immediate(char_size()));
   // Check if we have reached end of match area.
-  __ cmp(ebx, ecx);
+  __ cmp(ebx, Operand(ecx));
   __ j(below, &loop);
   __ jmp(&success);
 
@@ -474,7 +471,7 @@
   __ bind(&success);
   // Move current character position to position after match.
   __ mov(edi, ecx);
-  __ sub(edi, esi);
+  __ sub(Operand(edi), esi);
   // Restore backtrack stackpointer.
   __ pop(backtrack_stackpointer());
 
@@ -523,7 +520,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  ASSERT(minus < String::kMaxUC16CharCode);
   __ lea(eax, Operand(current_character(), -minus));
   __ and_(eax, mask);
   __ cmp(eax, c);
@@ -577,17 +574,17 @@
     return true;
   case '.': {
     // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
-    __ mov(eax, current_character());
-    __ xor_(eax, Immediate(0x01));
+    __ mov(Operand(eax), current_character());
+    __ xor_(Operand(eax), Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(eax, Immediate(0x0b));
+    __ sub(Operand(eax), Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     BranchOrBacktrack(below_equal, on_no_match);
     if (mode_ == UC16) {
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(eax, Immediate(0x2028 - 0x0b));
+      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
       __ cmp(eax, 0x2029 - 0x2028);
       BranchOrBacktrack(below_equal, on_no_match);
     }
@@ -596,7 +593,7 @@
   case 'w': {
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(current_character(), Immediate('z'));
+      __ cmp(Operand(current_character()), Immediate('z'));
       BranchOrBacktrack(above, on_no_match);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -610,7 +607,7 @@
     Label done;
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(current_character(), Immediate('z'));
+      __ cmp(Operand(current_character()), Immediate('z'));
       __ j(above, &done);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -630,10 +627,10 @@
   case 'n': {
     // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
     // The opposite of '.'.
-    __ mov(eax, current_character());
-    __ xor_(eax, Immediate(0x01));
+    __ mov(Operand(eax), current_character());
+    __ xor_(Operand(eax), Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(eax, Immediate(0x0b));
+    __ sub(Operand(eax), Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     if (mode_ == ASCII) {
       BranchOrBacktrack(above, on_no_match);
@@ -644,7 +641,7 @@
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(eax, Immediate(0x2028 - 0x0b));
+      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
       __ cmp(eax, 1);
       BranchOrBacktrack(above, on_no_match);
       __ bind(&done);
@@ -671,12 +668,7 @@
 
   // Entry code:
   __ bind(&entry_label_);
-
-  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
-  // code is generated.
-  FrameScope scope(masm_, StackFrame::MANUAL);
-
-  // Actually emit code to start a new stack frame.
+  // Start new stack frame.
   __ push(ebp);
   __ mov(ebp, esp);
   // Save callee-save registers. Order here should correspond to order of
@@ -707,7 +699,7 @@
 
   __ bind(&stack_limit_hit);
   CallCheckStackGuardState(ebx);
-  __ or_(eax, eax);
+  __ or_(eax, Operand(eax));
   // If returned value is non-zero, we exit with the returned value as result.
   __ j(not_zero, &exit_label_);
 
@@ -716,13 +708,13 @@
   __ mov(ebx, Operand(ebp, kStartIndex));
 
   // Allocate space on stack for registers.
-  __ sub(esp, Immediate(num_registers_ * kPointerSize));
+  __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
   // Load string length.
   __ mov(esi, Operand(ebp, kInputEnd));
   // Load input position.
   __ mov(edi, Operand(ebp, kInputStart));
   // Set up edi to be negative offset from string end.
-  __ sub(edi, esi);
+  __ sub(edi, Operand(esi));
 
   // Set eax to address of char before start of the string.
   // (effectively string position -1).
@@ -744,7 +736,7 @@
     Label init_loop;
     __ bind(&init_loop);
     __ mov(Operand(ebp, ecx, times_1, +0), eax);
-    __ sub(ecx, Immediate(kPointerSize));
+    __ sub(Operand(ecx), Immediate(kPointerSize));
     __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
     __ j(greater, &init_loop);
   }
@@ -785,12 +777,12 @@
       if (mode_ == UC16) {
         __ lea(ecx, Operand(ecx, edx, times_2, 0));
       } else {
-        __ add(ecx, edx);
+        __ add(ecx, Operand(edx));
       }
       for (int i = 0; i < num_saved_registers_; i++) {
         __ mov(eax, register_location(i));
         // Convert to index from start of string, not end.
-        __ add(eax, ecx);
+        __ add(eax, Operand(ecx));
         if (mode_ == UC16) {
           __ sar(eax, 1);  // Convert byte index to character index.
         }
@@ -827,7 +819,7 @@
     __ push(edi);
 
     CallCheckStackGuardState(ebx);
-    __ or_(eax, eax);
+    __ or_(eax, Operand(eax));
     // If returning non-zero, we should end execution with the given
     // result as return value.
     __ j(not_zero, &exit_label_);
@@ -862,7 +854,7 @@
     __ CallCFunction(grow_stack, num_arguments);
     // If return NULL, we have failed to grow the stack, and
     // must exit with a stack-overflow exception.
-    __ or_(eax, eax);
+    __ or_(eax, Operand(eax));
     __ j(equal, &exit_with_exception);
     // Otherwise use return value as new stack pointer.
     __ mov(backtrack_stackpointer(), eax);
@@ -1085,7 +1077,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+  MaybeObject* result = Execution::HandleStackGuardInterrupt();
 
   if (*code_handle != re_code) {  // Return address no longer valid
     int delta = code_handle->address() - re_code->address();
@@ -1141,11 +1133,6 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
-  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
-    // Subject string might have been a ConsString that underwent
-    // short-circuiting during GC. That will not change start_address but
-    // will change pointer inside the subject handle.
-    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
@@ -1196,8 +1183,8 @@
 
 void RegExpMacroAssemblerIA32::SafeReturn() {
   __ pop(ebx);
-  __ add(ebx, Immediate(masm_->CodeObject()));
-  __ jmp(ebx);
+  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
+  __ jmp(Operand(ebx));
 }
 
 
@@ -1209,14 +1196,14 @@
 void RegExpMacroAssemblerIA32::Push(Register source) {
   ASSERT(!source.is(backtrack_stackpointer()));
   // Notice: This updates flags, unlike normal Push.
-  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), source);
 }
 
 
 void RegExpMacroAssemblerIA32::Push(Immediate value) {
   // Notice: This updates flags, unlike normal Push.
-  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
+  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), value);
 }
 
@@ -1225,7 +1212,7 @@
   ASSERT(!target.is(backtrack_stackpointer()));
   __ mov(target, Operand(backtrack_stackpointer(), 0));
   // Notice: This updates flags, unlike normal Pop.
-  __ add(backtrack_stackpointer(), Immediate(kPointerSize));
+  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
 }
 
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index fd26779..ab62764 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,30 +44,19 @@
                        Code::Flags flags,
                        StubCache::Table table,
                        Register name,
-                       Register receiver,
-                       // Number of the cache entry pointer-size scaled.
                        Register offset,
                        Register extra) {
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
   ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
 
   Label miss;
 
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
   if (extra.is_valid()) {
     // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+    __ mov(extra, Operand::StaticArray(offset, times_2, value_offset));
 
     // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
     __ j(not_equal, &miss);
 
     // Check that the flags match what we're looking for.
@@ -76,17 +65,9 @@
     __ cmp(offset, flags);
     __ j(not_equal, &miss);
 
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
     // Jump to the first instruction in the code stub.
-    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(extra);
+    __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(Operand(extra));
 
     __ bind(&miss);
   } else {
@@ -94,19 +75,11 @@
     __ push(offset);
 
     // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+    __ cmp(name, Operand::StaticArray(offset, times_2, key_offset));
     __ j(not_equal, &miss);
 
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Restore offset register.
-    __ mov(offset, Operand(esp, 0));
-
     // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
 
     // Check that the flags match what we're looking for.
     __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
@@ -114,21 +87,13 @@
     __ cmp(offset, flags);
     __ j(not_equal, &miss);
 
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
     // Restore offset and re-load code entry from cache.
     __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+    __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
 
     // Jump to the first instruction in the code stub.
-    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(offset);
+    __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(Operand(offset));
 
     // Pop at miss.
     __ bind(&miss);
@@ -142,12 +107,12 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             Handle<String> name,
-                                             Register r0,
-                                             Register r1) {
+static MaybeObject* GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                                     Label* miss_label,
+                                                     Register receiver,
+                                                     String* name,
+                                                     Register r0,
+                                                     Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
@@ -177,14 +142,19 @@
   __ j(not_equal, miss_label);
 
   Label done;
-  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                     miss_label,
-                                                     &done,
-                                                     properties,
-                                                     name,
-                                                     r1);
+  MaybeObject* result =
+      StringDictionaryLookupStub::GenerateNegativeLookup(masm,
+                                                         miss_label,
+                                                         &done,
+                                                         properties,
+                                                         name,
+                                                         r1);
+  if (result->IsFailure()) return result;
+
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+  return result;
 }
 
 
@@ -194,69 +164,56 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2,
-                              Register extra3) {
+                              Register extra2) {
+  Isolate* isolate = Isolate::Current();
   Label miss;
+  USE(extra2);  // The register extra2 is not used on the ia32 platform.
 
-  // Assert that code is valid.  The multiplying code relies on the entry size
-  // being 12.
-  ASSERT(sizeof(Entry) == 12);
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
 
-  // Assert the flags do not name a specific type.
+  // Make sure the flags does not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
 
-  // Assert that there are no register conflicts.
+  // Make sure that there are no register conflicts.
   ASSERT(!scratch.is(receiver));
   ASSERT(!scratch.is(name));
   ASSERT(!extra.is(receiver));
   ASSERT(!extra.is(name));
   ASSERT(!extra.is(scratch));
 
-  // Assert scratch and extra registers are valid, and extra2/3 are unused.
+  // Check scratch and extra registers are valid, and extra2 is unused.
   ASSERT(!scratch.is(no_reg));
   ASSERT(extra2.is(no_reg));
-  ASSERT(extra3.is(no_reg));
-
-  Register offset = scratch;
-  scratch = no_reg;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Get the map of the receiver and compute the hash.
-  __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, flags);
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  // ProbeTable expects the offset to be pointer scaled, which it is, because
-  // the heap object tag size is 2 and the pointer size log 2 is also 2.
-  ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
+  __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
+  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, flags);
+  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
 
   // Probe the primary table.
-  ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
+  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra);
 
   // Primary miss: Compute hash for secondary probe.
-  __ mov(offset, FieldOperand(name, String::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, flags);
-  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  __ sub(offset, name);
-  __ add(offset, Immediate(flags));
-  __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
+  __ mov(scratch, FieldOperand(name, String::kHashFieldOffset));
+  __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, flags);
+  __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
+  __ sub(scratch, Operand(name));
+  __ add(Operand(scratch), Immediate(flags));
+  __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
 
   // Probe the secondary table.
-  ProbeTable(
-      isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
+  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
 }
 
 
@@ -271,17 +228,14 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
   // Check we're still in the same context.
   __ cmp(Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)),
          masm->isolate()->global());
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(masm->isolate()->global_context()->get(index)));
+  JSFunction* function =
+      JSFunction::cast(masm->isolate()->global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
   __ Set(prototype, Immediate(Handle<Map>(function->initial_map())));
   // Load the prototype from the initial map.
@@ -364,7 +318,7 @@
                                                  Register scratch2,
                                                  Label* miss_label) {
   __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(eax, scratch1);
+  __ mov(eax, Operand(scratch1));
   __ ret(0);
 }
 
@@ -373,10 +327,8 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            Handle<JSObject> holder,
-                                            int index) {
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -396,12 +348,12 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     Handle<JSObject> holder_obj) {
+                                     JSObject* holder_obj) {
   __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
   Register scratch = name;
-  __ mov(scratch, Immediate(interceptor));
+  __ mov(scratch, Immediate(Handle<Object>(interceptor)));
   __ push(scratch);
   __ push(receiver);
   __ push(holder);
@@ -409,12 +361,11 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Register name,
+                                                   JSObject* holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallExternalReference(
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
@@ -455,15 +406,15 @@
   //                                          frame.
   // -----------------------------------
   __ pop(scratch);
-  __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
+  __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
   __ push(scratch);
 }
 
 
 // Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
-                                const CallOptimization& optimization,
-                                int argc) {
+static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
+                                        const CallOptimization& optimization,
+                                        int argc) {
   // ----------- S t a t e -------------
   //  -- esp[0]              : return address
   //  -- esp[4]              : object passing the type check
@@ -478,25 +429,30 @@
   //  -- esp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  Handle<JSFunction> function = optimization.constant_function();
-  __ LoadHeapObject(edi, function);
+  JSFunction* function = optimization.constant_function();
+  __ mov(edi, Immediate(Handle<JSFunction>(function)));
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ mov(Operand(esp, 2 * kPointerSize), edi);
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data(api_call_info->data());
-  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
-    __ mov(ecx, api_call_info);
+  Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+  if (masm->isolate()->heap()->InNewSpace(call_data)) {
+    __ mov(ecx, api_call_info_handle);
     __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
     __ mov(Operand(esp, 3 * kPointerSize), ebx);
   } else {
-    __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data));
+    __ mov(Operand(esp, 3 * kPointerSize),
+           Immediate(Handle<Object>(call_data)));
   }
 
   // Prepare arguments.
   __ lea(eax, Operand(esp, 3 * kPointerSize));
 
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
+
   const int kApiArgc = 1;  // API function gets reference to the v8::Arguments.
 
   // Allocate the v8::Arguments structure in the arguments' space since
@@ -506,7 +462,7 @@
   __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
 
   __ mov(ApiParameterOperand(1), eax);  // v8::Arguments::implicit_args_.
-  __ add(eax, Immediate(argc * kPointerSize));
+  __ add(Operand(eax), Immediate(argc * kPointerSize));
   __ mov(ApiParameterOperand(2), eax);  // v8::Arguments::values_.
   __ Set(ApiParameterOperand(3), Immediate(argc));  // v8::Arguments::length_.
   // v8::Arguments::is_construct_call_.
@@ -516,10 +472,12 @@
   __ lea(eax, ApiParameterOperand(1));
   __ mov(ApiParameterOperand(0), eax);
 
-  // Function address is a foreign pointer outside V8's heap.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  __ CallApiFunctionAndReturn(function_address,
-                              argc + kFastApiCallArguments + 1);
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  return masm->TryCallApiFunctionAndReturn(&fun,
+                                           argc + kFastApiCallArguments + 1);
 }
 
 
@@ -528,22 +486,22 @@
   CallInterceptorCompiler(StubCompiler* stub_compiler,
                           const ParameterCount& arguments,
                           Register name,
-                          Code::ExtraICState extra_state)
+                          Code::ExtraICState extra_ic_state)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
         name_(name),
-        extra_state_(extra_state) {}
+        extra_ic_state_(extra_ic_state) {}
 
-  void Compile(MacroAssembler* masm,
-               Handle<JSObject> object,
-               Handle<JSObject> holder,
-               Handle<String> name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
+  MaybeObject* Compile(MacroAssembler* masm,
+                       JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -551,27 +509,45 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
+
     if (optimization.is_constant_call()) {
-      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
-                       holder, lookup, name, optimization, miss);
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
     } else {
-      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
-                     name, holder, miss);
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     scratch3,
+                     name,
+                     holder,
+                     miss);
+      return masm->isolate()->heap()->undefined_value();  // Success.
     }
   }
 
  private:
-  void CompileCacheable(MacroAssembler* masm,
-                        Handle<JSObject> object,
-                        Register receiver,
-                        Register scratch1,
-                        Register scratch2,
-                        Register scratch3,
-                        Handle<JSObject> interceptor_holder,
-                        LookupResult* lookup,
-                        Handle<String> name,
-                        const CallOptimization& optimization,
-                        Label* miss_label) {
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -580,14 +556,16 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(
-          object, interceptor_holder);
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                       interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(
-            interceptor_holder, Handle<JSObject>(lookup->holder()));
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                         lookup->holder());
       }
-      can_do_fast_api_call =
-          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -603,9 +581,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver,
+                                        interceptor_holder, scratch1,
+                                        scratch2, scratch3, name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -618,11 +596,10 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      Handle<JSObject>(lookup->holder()),
-                                      scratch1, scratch2, scratch3,
-                                      name, depth2, miss);
+                                      lookup->holder(), scratch1,
+                                      scratch2, scratch3, name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -633,9 +610,11 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      GenerateFastApiCall(masm, optimization, arguments_.immediate());
+      MaybeObject* result =
+          GenerateFastApiCall(masm, optimization, arguments_.immediate());
+      if (result->IsFailure()) return result;
     } else {
-      CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+      CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
@@ -654,27 +633,33 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
+
+    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      Handle<JSObject> object,
+                      JSObject* object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      Handle<String> name,
-                      Handle<JSObject> interceptor_holder,
+                      String* name,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, miss_label);
+                                        scratch1, scratch2, scratch3, name,
+                                        miss_label);
 
-    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ EnterInternalFrame();
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -683,30 +668,27 @@
 
     // Restore the name_ register.
     __ pop(name_);
-
-    // Leave the internal frame.
+    __ LeaveInternalFrame();
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           Handle<JSObject> holder_obj,
+                           JSObject* holder_obj,
                            Label* interceptor_succeeded) {
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(holder);  // Save the holder.
-      __ push(name_);  // Save the name.
+    __ EnterInternalFrame();
+    __ push(holder);  // Save the holder.
+    __ push(name_);  // Save the name.
 
-      CompileCallLoadPropertyWithInterceptor(masm,
-                                             receiver,
-                                             holder,
-                                             name_,
-                                             holder_obj);
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
 
-      __ pop(name_);  // Restore the name.
-      __ pop(receiver);  // Restore the holder.
-      // Leave the internal frame.
-    }
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
 
     __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
     __ j(not_equal, interceptor_succeeded);
@@ -715,39 +697,49 @@
   StubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
-  Code::ExtraICState extra_state_;
+  Code::ExtraICState extra_ic_state_;
 };
 
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Handle<Code> code = (kind == Code::LOAD_IC)
-      ? masm->isolate()->builtins()->LoadIC_Miss()
-      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
-  __ jmp(code, RelocInfo::CODE_TARGET);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+  } else {
+    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Handle<Code> code =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ jmp(code, RelocInfo::CODE_TARGET);
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ jmp(ic, RelocInfo::CODE_TARGET);
 }
 
 
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Handle<JSObject> object,
+                                      JSObject* object,
                                       int index,
-                                      Handle<Map> transition,
+                                      Map* transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(receiver_reg, Handle<Map>(object->map()),
-              miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, miss_label);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -759,12 +751,12 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ pop(scratch);  // Return address.
     __ push(receiver_reg);
-    __ push(Immediate(transition));
+    __ push(Immediate(Handle<Map>(transition)));
     __ push(eax);
     __ push(scratch);
     __ TailCallExternalReference(
@@ -775,11 +767,11 @@
     return;
   }
 
-  if (!transition.is_null()) {
+  if (transition != NULL) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
     __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset),
-           Immediate(transition));
+           Immediate(Handle<Map>(transition)));
   }
 
   // Adjust for the number of properties stored in the object. Even in the
@@ -794,12 +786,8 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, eax);
-    __ RecordWriteField(receiver_reg,
-                        offset,
-                        name_reg,
-                        scratch,
-                        kDontSaveFPRegs);
+    __ mov(name_reg, Operand(eax));
+    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -809,12 +797,8 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, eax);
-    __ RecordWriteField(scratch,
-                        offset,
-                        name_reg,
-                        receiver_reg,
-                        kDontSaveFPRegs);
+    __ mov(name_reg, Operand(eax));
+    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
   }
 
   // Return the value (register eax).
@@ -825,58 +809,70 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
-                                      Handle<GlobalObject> global,
-                                      Handle<String> name,
-                                      Register scratch,
-                                      Label* miss) {
-  Handle<JSGlobalPropertyCell> cell =
-      GlobalObject::EnsurePropertyCell(global, name);
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+    MacroAssembler* masm,
+    GlobalObject* global,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  Object* probe;
+  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
   ASSERT(cell->value()->IsTheHole());
-  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
   if (Serializer::enabled()) {
-    __ mov(scratch, Immediate(cell));
+    __ mov(scratch, Immediate(Handle<Object>(cell)));
     __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
-           Immediate(the_hole));
+           Immediate(masm->isolate()->factory()->the_hole_value()));
   } else {
-    __ cmp(Operand::Cell(cell), Immediate(the_hole));
+    __ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
+           Immediate(masm->isolate()->factory()->the_hole_value()));
   }
   __ j(not_equal, miss);
+  return cell;
 }
 
 
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
-                                       Handle<JSObject> object,
-                                       Handle<JSObject> holder,
-                                       Handle<String> name,
-                                       Register scratch,
-                                       Label* miss) {
-  Handle<JSObject> current = object;
-  while (!current.is_identical_to(holder)) {
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
     if (current->IsGlobalObject()) {
-      GenerateCheckPropertyCell(masm,
-                                Handle<GlobalObject>::cast(current),
-                                name,
-                                scratch,
-                                miss);
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
     }
-    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
   }
+  return NULL;
 }
 
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(JSObject* object,
                                        Register object_reg,
-                                       Handle<JSObject> holder,
+                                       JSObject* holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       Handle<String> name,
+                                       String* name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -886,7 +882,7 @@
 
   // Keep track of the current object in register reg.
   Register reg = object_reg;
-  Handle<JSObject> current = object;
+  JSObject* current = object;
   int depth = 0;
 
   if (save_at_depth == depth) {
@@ -895,55 +891,79 @@
 
   // Traverse the prototype chain and check the maps in the prototype chain for
   // fast and global objects or do negative lookup for normal objects.
-  while (!current.is_identical_to(holder)) {
-    ++depth;
+  while (current != holder) {
+    depth++;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
+    ASSERT(current->GetPrototype()->IsJSObject());
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        name = factory()->LookupSymbol(name);
+        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
+          return reg;
+        }
+        name = String::cast(lookup_result);
       }
-      ASSERT(current->property_dictionary()->FindEntry(*name) ==
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
 
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
+      reg = holder_reg;  // from now the object is in holder_reg
       __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      Handle<Map> current_map(current->map());
-      if (in_new_space) {
-        // Save the map in scratch1 for later.
+    } else if (heap()->InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
+      // Branch on the result of the map check.
+      __ j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+
+        // Restore scratch register to be the map of the object.
+        // We load the prototype from the map in the scratch register.
         __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       }
-      __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
-                  ALLOW_ELEMENT_TRANSITION_MAPS);
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Check the map of the current object.
+      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+             Immediate(Handle<Map>(current->map())));
+      // Branch on the result of the map check.
+      __ j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
       }
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (in_new_space) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ mov(reg, prototype);
-      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, Handle<JSObject>(prototype));
     }
 
     if (save_at_depth == depth) {
@@ -953,46 +973,54 @@
     // Go to the next object in the prototype chain.
     current = prototype;
   }
-  ASSERT(current.is_identical_to(holder));
+  ASSERT(current == holder);
 
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
   // Check the holder map.
-  __ CheckMap(reg, Handle<Map>(holder->map()),
-              miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(holder->map())));
+  __ j(not_equal, miss);
 
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
+  };
 
-  // If we've skipped any global objects, it's not enough to verify that
-  // their maps haven't changed.  We also need to check that the property
-  // cell for the property is still empty.
-  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
-                                     Handle<JSObject> holder,
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     Handle<String> name,
+                                     String* name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
-  Register reg = CheckPrototypes(
-      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, scratch3, name, miss);
 
   // Get the value from the properties.
   GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
@@ -1000,37 +1028,40 @@
 }
 
 
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        Handle<AccessorInfo> callback,
-                                        Handle<String> name,
-                                        Label* miss) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
-                                 scratch2, scratch3, name, miss);
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1,
+                      scratch2, scratch3, name, miss);
+
+  Handle<AccessorInfo> callback_handle(callback);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch3.is(reg));
   __ pop(scratch3);  // Get return address to place it below.
 
   __ push(receiver);  // receiver
-  __ mov(scratch2, esp);
+  __ mov(scratch2, Operand(esp));
   ASSERT(!scratch2.is(reg));
   __ push(reg);  // holder
   // Push data from AccessorInfo.
-  if (isolate()->heap()->InNewSpace(callback->data())) {
-    __ mov(scratch1, Immediate(callback));
+  if (isolate()->heap()->InNewSpace(callback_handle->data())) {
+    __ mov(scratch1, Immediate(callback_handle));
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
   } else {
-    __ push(Immediate(Handle<Object>(callback->data())));
+    __ push(Immediate(Handle<Object>(callback_handle->data())));
   }
 
   // Save a pointer to where we pushed the arguments pointer.
@@ -1042,56 +1073,59 @@
 
   __ push(scratch3);  // Restore return address.
 
-  // 3 elements array for v8::Arguments::values_, handler for name and pointer
+  // Do call through the api.
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+
+  // 3 elements array for v8::Agruments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
   const int kStackSpace = 5;
   const int kApiArgc = 2;
 
   __ PrepareCallApiFunction(kApiArgc);
   __ mov(ApiParameterOperand(0), ebx);  // name.
-  __ add(ebx, Immediate(kPointerSize));
+  __ add(Operand(ebx), Immediate(kPointerSize));
   __ mov(ApiParameterOperand(1), ebx);  // arguments pointer.
 
   // Emitting a stub call may try to allocate (if the code is not
   // already generated).  Do not allow the assembler to perform a
   // garbage collection but instead return the allocation failure
   // object.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
+  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
 }
 
 
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<JSFunction> value,
-                                        Handle<String> name,
+                                        Object* value,
+                                        String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(
-      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(object, receiver, holder,
+                  scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ LoadHeapObject(eax, value);
+  __ mov(eax, Handle<Object>(value));
   __ ret(0);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
-                                           Handle<JSObject> interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           Handle<String> name,
+                                           String* name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1103,13 +1137,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsAccessorInfo()) {
-      compile_followup_inline =
-          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+        lookup->GetCallbackObject()->IsAccessorInfo() &&
+        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+      compile_followup_inline = true;
     }
   }
 
@@ -1124,49 +1158,47 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+    __ EnterInternalFrame();
 
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
-        __ push(receiver);
-      }
-      __ push(holder_reg);
-      __ push(name_reg);
-
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(masm(),
-                                             receiver,
-                                             holder_reg,
-                                             name_reg,
-                                             interceptor_holder);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ cmp(eax, factory()->no_interceptor_result_sentinel());
-      __ j(equal, &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ ret(0);
-
-      __ bind(&interceptor_failed);
-      __ pop(name_reg);
-      __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        __ pop(receiver);
-      }
-
-      // Leave the internal frame.
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      // CALLBACKS case needs a receiver to be passed into C++ callback.
+      __ push(receiver);
     }
+    __ push(holder_reg);
+    __ push(name_reg);
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           interceptor_holder);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ cmp(eax, factory()->no_interceptor_result_sentinel());
+    __ j(equal, &interceptor_failed);
+    __ LeaveInternalFrame();
+    __ ret(0);
+
+    __ bind(&interceptor_failed);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into holder_reg.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   Handle<JSObject>(lookup->holder()),
+                                   lookup->holder(),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1178,15 +1210,15 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), eax, holder_reg,
-                               Handle<JSObject>(lookup->holder()),
-                               lookup->GetFieldIndex());
+                               lookup->holder(), lookup->GetFieldIndex());
       __ ret(0);
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      Handle<AccessorInfo> callback(
-          AccessorInfo::cast(lookup->GetCallbackObject()));
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1195,7 +1227,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ mov(holder_reg, Immediate(callback));
+      __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1225,17 +1257,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ cmp(ecx, Immediate(name));
+    __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
     __ j(not_equal, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<String> name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+                                                   JSObject* holder,
+                                                   String* name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1245,27 +1277,31 @@
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(edx, miss);
+  }
 
   // Check that the maps haven't changed.
-  __ JumpIfSmi(edx, miss);
   CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    Label* miss) {
   // Get the value from the cell.
   if (Serializer::enabled()) {
-    __ mov(edi, Immediate(cell));
+    __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
     __ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(edi, Operand::Cell(cell));
+    __ mov(edi, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
   }
 
   // Check that the cell contains the same function.
-  if (isolate()->heap()->InNewSpace(*function)) {
+  if (isolate()->heap()->InNewSpace(function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1278,26 +1314,31 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
            Immediate(Handle<SharedFunctionInfo>(function->shared())));
+    __ j(not_equal, miss);
   } else {
-    __ cmp(edi, Immediate(function));
+    __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+    __ j(not_equal, miss);
   }
-  __ j(not_equal, miss);
 }
 
 
-void CallStubCompiler::GenerateMissBranch() {
-  Handle<Code> code =
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
+  MaybeObject* maybe_obj =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_state_);
-  __ jmp(code, RelocInfo::CODE_TARGET);
+                                               extra_ic_state_);
+  Object* obj;
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  __ jmp(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+  return obj;
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
-                                                int index,
-                                                Handle<String> name) {
+MUST_USE_RESULT MaybeObject* CallStubCompiler::CompileCallField(
+    JSObject* object,
+    JSObject* holder,
+    int index,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1335,7 +1376,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -1343,19 +1384,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1365,8 +1406,8 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) {
-    return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) {
+    return isolate()->heap()->undefined_value();
   }
 
   Label miss;
@@ -1380,8 +1421,9 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
 
-  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
-                  name, &miss);
+  CheckPrototypes(JSObject::cast(object), edx,
+                  holder, ebx,
+                  eax, edi, name, &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1390,85 +1432,51 @@
   } else {
     Label call_builtin;
 
+    // Get the elements array of the object.
+    __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode and writable.
+    __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+           Immediate(factory()->fixed_array_map()));
+    __ j(not_equal, &call_builtin);
+
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label attempt_to_grow_elements, with_write_barrier;
-
-      // Get the elements array of the object.
-      __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
-
-      // Check that the elements are in fast mode and writable.
-      __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
-             Immediate(factory()->fixed_array_map()));
-      __ j(not_equal, &call_builtin);
+      Label exit, with_write_barrier, attempt_to_grow_elements;
 
       // Get the array's length into eax and calculate new length.
       __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
       STATIC_ASSERT(kSmiTagSize == 1);
       STATIC_ASSERT(kSmiTag == 0);
-      __ add(eax, Immediate(Smi::FromInt(argc)));
+      __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
 
-      // Get the elements' length into ecx.
-      __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+      // Get the element's length into ecx.
+      __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
-      __ cmp(eax, ecx);
+      __ cmp(eax, Operand(ecx));
       __ j(greater, &attempt_to_grow_elements);
 
-      // Check if value is a smi.
-      __ mov(ecx, Operand(esp, argc * kPointerSize));
-      __ JumpIfNotSmi(ecx, &with_write_barrier);
-
       // Save new length.
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
-      // Store the value.
-      __ mov(FieldOperand(edi,
-                          eax,
-                          times_half_pointer_size,
-                          FixedArray::kHeaderSize - argc * kPointerSize),
-             ecx);
+      // Push the element.
+      __ lea(edx, FieldOperand(ebx,
+                               eax, times_half_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ mov(ecx, Operand(esp, argc * kPointerSize));
+      __ mov(Operand(edx, 0), ecx);
 
+      // Check if value is a smi.
+      __ JumpIfNotSmi(ecx, &with_write_barrier);
+
+      __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&with_write_barrier);
 
-      __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+      __ InNewSpace(ebx, ecx, equal, &exit);
 
-      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
-        Label fast_object, not_fast_object;
-        __ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
-        __ jmp(&fast_object);
-        // In case of fast smi-only, convert to fast object, otherwise bail out.
-        __ bind(&not_fast_object);
-        __ CheckFastSmiOnlyElements(ebx, &call_builtin);
-        // edi: elements array
-        // edx: receiver
-        // ebx: map
-        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                               FAST_ELEMENTS,
-                                               ebx,
-                                               edi,
-                                               &call_builtin);
-        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
-        // Restore edi.
-        __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
-        __ bind(&fast_object);
-      } else {
-        __ CheckFastObjectElements(ebx, &call_builtin);
-      }
-
-      // Save new length.
-      __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
-
-      // Store the value.
-      __ lea(edx, FieldOperand(edi,
-                               eax, times_half_pointer_size,
-                               FixedArray::kHeaderSize - argc * kPointerSize));
-      __ mov(Operand(edx, 0), ecx);
-
-      __ RecordWrite(edi, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                     OMIT_SMI_CHECK);
-
+      __ RecordWriteHelper(ebx, edx, ecx);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1476,19 +1484,6 @@
         __ jmp(&call_builtin);
       }
 
-      __ mov(ebx, Operand(esp, argc * kPointerSize));
-      // Growing elements that are SMI-only requires special handling in case
-      // the new element is non-Smi. For now, delegate to the builtin.
-      Label no_fast_elements_check;
-      __ JumpIfSmi(ebx, &no_fast_elements_check);
-      __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-      __ CheckFastObjectElements(ecx, &call_builtin, Label::kFar);
-      __ bind(&no_fast_elements_check);
-
-      // We could be lucky and the elements array could be at the top of
-      // new-space.  In this case we can just grow it in place by moving the
-      // allocation pointer up.
-
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1499,46 +1494,36 @@
       __ mov(ecx, Operand::StaticVariable(new_space_allocation_top));
 
       // Check if it's the end of elements.
-      __ lea(edx, FieldOperand(edi,
+      __ lea(edx, FieldOperand(ebx,
                                eax, times_half_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
-      __ cmp(edx, ecx);
+      __ cmp(edx, Operand(ecx));
       __ j(not_equal, &call_builtin);
-      __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
+      __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
       __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
       __ j(above, &call_builtin);
 
       // We fit and could grow elements.
       __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
+      __ mov(ecx, Operand(esp, argc * kPointerSize));
 
       // Push the argument...
-      __ mov(Operand(edx, 0), ebx);
+      __ mov(Operand(edx, 0), ecx);
       // ... and fill the rest with holes.
       for (int i = 1; i < kAllocationDelta; i++) {
         __ mov(Operand(edx, i * kPointerSize),
                Immediate(factory()->the_hole_value()));
       }
 
-      // We know the elements array is in new space so we don't need the
-      // remembered set, but we just pushed a value onto it so we may have to
-      // tell the incremental marker to rescan the object that we just grew.  We
-      // don't need to worry about the holes because they are in old space and
-      // already marked black.
-      __ RecordWrite(edi, edx, ebx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
       // Restore receiver to edx as finish sequence assumes it's here.
       __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
       // Increment element's and array's sizes.
-      __ add(FieldOperand(edi, FixedArray::kLengthOffset),
+      __ add(FieldOperand(ebx, FixedArray::kLengthOffset),
              Immediate(Smi::FromInt(kAllocationDelta)));
-
-      // NOTE: This only happen in new-space, where we don't
-      // care about the black-byte-count on pages. Otherwise we should
-      // update that too if the object is black.
-
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
+      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -1550,19 +1535,19 @@
   }
 
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                                   JSObject* holder,
+                                                   JSGlobalPropertyCell* cell,
+                                                   JSFunction* function,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1572,8 +1557,8 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) {
-    return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) {
+    return heap()->undefined_value();
   }
 
   Label miss, return_undefined, call_builtin;
@@ -1586,8 +1571,9 @@
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
-  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
-                  name, &miss);
+  CheckPrototypes(JSObject::cast(object), edx,
+                  holder, ebx,
+                  eax, edi, name, &miss);
 
   // Get the elements array of the object.
   __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1599,7 +1585,7 @@
 
   // Get the array's length into ecx and calculate new length.
   __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
-  __ sub(ecx, Immediate(Smi::FromInt(1)));
+  __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
   __ j(negative, &return_undefined);
 
   // Get the last element.
@@ -1608,7 +1594,7 @@
   __ mov(eax, FieldOperand(ebx,
                            ecx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
-  __ cmp(eax, Immediate(factory()->the_hole_value()));
+  __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
   __ j(equal, &call_builtin);
 
   // Set the array's length.
@@ -1632,19 +1618,20 @@
       1);
 
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1654,8 +1641,8 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) {
-    return Handle<Code>::null();
+  if (!object->IsString() || cell != NULL) {
+    return isolate()->heap()->undefined_value();
   }
 
   const int argc = arguments().immediate();
@@ -1666,7 +1653,7 @@
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
@@ -1678,95 +1665,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             eax,
                                             &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  eax, holder, ebx, edx, edi, name, &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+                  ebx, edx, edi, name, &miss);
 
   Register receiver = ebx;
   Register index = edi;
-  Register result = eax;
-  __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
-  if (argc > 0) {
-    __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
-  } else {
-    __ Set(index, Immediate(factory()->undefined_value()));
-  }
-
-  StringCharCodeAtGenerator generator(receiver,
-                                      index,
-                                      result,
-                                      &miss,  // When not a string.
-                                      &miss,  // When not a number.
-                                      index_out_of_range_label,
-                                      STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
-  __ ret((argc + 1) * kPointerSize);
-
-  StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ Set(eax, Immediate(factory()->nan_value()));
-    __ ret((argc + 1) * kPointerSize);
-  }
-
-  __ bind(&miss);
-  // Restore function name in ecx.
-  __ Set(ecx, Immediate(name));
-  __ bind(&name_miss);
-  GenerateMissBranch();
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
-  // ----------- S t a t e -------------
-  //  -- ecx                 : function name
-  //  -- esp[0]              : return address
-  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- esp[(argc + 1) * 4] : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) {
-    return Handle<Code>::null();
-  }
-
-  const int argc = arguments().immediate();
-
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            eax,
-                                            &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  eax, holder, ebx, edx, edi, name, &miss);
-
-  Register receiver = eax;
-  Register index = edi;
   Register scratch = edx;
   Register result = eax;
   __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
@@ -1776,19 +1680,107 @@
     __ Set(index, Immediate(factory()->undefined_value()));
   }
 
-  StringCharAtGenerator generator(receiver,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &miss,  // When not a string.
-                                  &miss,  // When not a number.
-                                  index_out_of_range_label,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
+  StringCharCodeAtGenerator char_code_at_generator(receiver,
+                                                   index,
+                                                   scratch,
+                                                   result,
+                                                   &miss,  // When not a string.
+                                                   &miss,  // When not a number.
+                                                   index_out_of_range_label,
+                                                   STRING_INDEX_IS_NUMBER);
+  char_code_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ Set(eax, Immediate(factory()->nan_value()));
+    __ ret((argc + 1) * kPointerSize);
+  }
+
+  __ bind(&miss);
+  // Restore function name in ecx.
+  __ Set(ecx, Immediate(Handle<String>(name)));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
+  // ----------- S t a t e -------------
+  //  -- ecx                 : function name
+  //  -- esp[0]              : return address
+  //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- esp[(argc + 1) * 4] : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) {
+    return heap()->undefined_value();
+  }
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            eax,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+                  ebx, edx, edi, name, &miss);
+
+  Register receiver = eax;
+  Register index = edi;
+  Register scratch1 = ebx;
+  Register scratch2 = edx;
+  Register result = eax;
+  __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
+  if (argc > 0) {
+    __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
+  } else {
+    __ Set(index, Immediate(factory()->undefined_value()));
+  }
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          index_out_of_range_label,
+                                          STRING_INDEX_IS_NUMBER);
+  char_at_generator.GenerateFast(masm());
+  __ ret((argc + 1) * kPointerSize);
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1798,21 +1790,22 @@
 
   __ bind(&miss);
   // Restore function name in ecx.
-  __ Set(ecx, Immediate(name));
+  __ Set(ecx, Immediate(Handle<String>(name)));
   __ bind(&name_miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : function name
   //  -- esp[0]              : return address
@@ -1826,22 +1819,23 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return Handle<Code>::null();
+    return isolate()->heap()->undefined_value();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
+
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
-    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
-                    name, &miss);
+
+    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1857,17 +1851,17 @@
   // Convert the smi code to uint16.
   __ and_(code, Immediate(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator generator(code, eax);
-  generator.GenerateFast(masm());
+  StringCharFromCodeGenerator char_from_code_generator(code, eax);
+  char_from_code_generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1875,19 +1869,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -1897,7 +1891,7 @@
   // -----------------------------------
 
   if (!CpuFeatures::IsSupported(SSE2)) {
-    return Handle<Code>::null();
+    return isolate()->heap()->undefined_value();
   }
 
   CpuFeatures::Scope use_sse2(SSE2);
@@ -1907,24 +1901,23 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return Handle<Code>::null();
+    return isolate()->heap()->undefined_value();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
-                    name, &miss);
+    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2005,19 +1998,19 @@
 
   __ bind(&miss);
   // ecx: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+                                                  JSObject* holder,
+                                                  JSGlobalPropertyCell* cell,
+                                                  JSFunction* function,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2031,24 +2024,23 @@
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
   if (!object->IsJSObject() || argc != 1) {
-    return Handle<Code>::null();
+    return isolate()->heap()->undefined_value();
   }
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ mov(edx, Operand(esp, 2 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(edx, &miss);
 
-    CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
-                    name, &miss);
+    CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2066,10 +2058,10 @@
   __ sar(ebx, kBitsPerInt - 1);
 
   // Do bitwise not or do nothing depending on ebx.
-  __ xor_(eax, ebx);
+  __ xor_(eax, Operand(ebx));
 
   // Add 1 or do nothing depending on ebx.
-  __ sub(eax, ebx);
+  __ sub(eax, Operand(ebx));
 
   // If the result is still negative, go to the slow case.
   // This only happens for the most negative smi.
@@ -2110,29 +2102,30 @@
 
   __ bind(&miss);
   // ecx: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileFastApiCall(
+MaybeObject* CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return Handle<Code>::null();
-  if (!cell.is_null()) return Handle<Code>::null();
-  if (!object->IsJSObject()) return Handle<Code>::null();
+  if (object->IsGlobalObject()) return heap()->undefined_value();
+  if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-      Handle<JSObject>::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
+            JSObject::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
 
   Label miss, miss_before_stack_reserved;
 
@@ -2151,11 +2144,11 @@
 
   // Allocate space for v8::Arguments implicit values. Must be initialized
   // before calling any runtime function.
-  __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
+  __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax, edi,
-                  name, depth, &miss);
+  CheckPrototypes(JSObject::cast(object), edx, holder,
+                  ebx, eax, edi, name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ mov(eax, Operand(esp, 3 * kPointerSize));
@@ -2163,24 +2156,27 @@
 
   // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains
   // duplicate of return address and will be overwritten.
-  GenerateFastApiCall(masm(), optimization, argc);
+  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+  if (result->IsFailure()) return result;
 
   __ bind(&miss);
-  __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
+  __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
-                                                   CheckType check) {
+MaybeObject* CallStubCompiler::CompileCallConstant(
+    Object* object,
+    JSObject* holder,
+    JSFunction* function,
+    String* name,
+    CheckType check) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2190,14 +2186,16 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, NULL, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -2212,13 +2210,15 @@
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(isolate()->counters()->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(Handle<JSObject>::cast(object), edx, holder, ebx, eax,
-                      edi, name, &miss);
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, eax, edi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2229,25 +2229,28 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
         // Check that the object is a string or a symbol.
         __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            eax, holder, ebx, edx, edi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+                        ebx, edx, edi, name, &miss);
       }
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+    case NUMBER_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(edx, &fast);
@@ -2257,18 +2260,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            eax, holder, ebx, edx, edi, name, &miss);
-      } else {
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+                        ebx, edx, edi, name, &miss);
+      }
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      }
-      break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      } else {
         Label fast;
         // Check that the object is a boolean.
         __ cmp(edx, factory()->true_value());
@@ -2279,18 +2282,17 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            eax, holder, ebx, edx, edi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+                        ebx, edx, edi, name, &miss);
       }
       break;
+    }
+
+    default:
+      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2298,16 +2300,17 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2322,15 +2325,24 @@
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state_);
-  compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
-                   &miss);
+  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_ic_state_);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         edx,
+                                         ebx,
+                                         edi,
+                                         eax,
+                                         &miss);
+  if (result->IsFailure()) return result;
 
   // Restore receiver.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
@@ -2349,7 +2361,7 @@
 
   // Invoke the function.
   __ mov(edi, eax);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(edi, arguments(), JUMP_FUNCTION,
@@ -2357,19 +2369,20 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(
+    JSObject* object,
+    GlobalObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2379,17 +2392,23 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, cell, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
+
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
+
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2398,37 +2417,46 @@
     __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
   }
 
-  // Set up the context (function already in edi).
+  // Setup the context (function already in edi).
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1);
+  ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
-                expected, arguments(), JUMP_FUNCTION,
-                NullCallWrapper(), call_kind);
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
+                  expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
+  } else {
+    Handle<Code> code(function->code());
+    __ InvokeCode(code, expected, arguments(),
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
                                                   int index,
-                                                  Handle<Map> transition,
-                                                  Handle<String> name) {
+                                                  Map* transition,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2438,23 +2466,27 @@
   Label miss;
 
   // Generate store field code.  Trashes the name register.
-  GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     edx, ecx, ebx,
+                     &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
-  __ mov(ecx, Immediate(name));  // restore name
+  __ mov(ecx, Immediate(Handle<String>(name)));  // restore name
   Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<AccessorInfo> callback,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                     AccessorInfo* callback,
+                                                     String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2463,9 +2495,13 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(edx, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(edx, Handle<Map>(object->map()),
-              &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(object->map())));
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2478,7 +2514,7 @@
 
   __ pop(ebx);  // remove the return address
   __ push(edx);  // receiver
-  __ push(Immediate(callback));  // callback info
+  __ push(Immediate(Handle<AccessorInfo>(callback)));  // callback info
   __ push(ecx);  // name
   __ push(eax);  // value
   __ push(ebx);  // restore return address
@@ -2498,9 +2534,8 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> receiver,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                        String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2509,9 +2544,13 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(edx, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(edx, Handle<Map>(receiver->map()),
-              &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(receiver->map())));
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2544,10 +2583,9 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
-    Handle<GlobalObject> object,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                                   JSGlobalPropertyCell* cell,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : name
@@ -2561,9 +2599,13 @@
          Immediate(Handle<Map>(object->map())));
   __ j(not_equal, &miss);
 
+
   // Compute the cell operand to use.
-  __ mov(ebx, Immediate(cell));
-  Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
+  Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
+  if (Serializer::enabled()) {
+    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+    cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
+  }
 
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
@@ -2574,7 +2616,6 @@
 
   // Store the value in the cell.
   __ mov(cell_operand, eax);
-  // No write barrier here, because cells are always rescanned.
 
   // Return the value (register eax).
   Counters* counters = isolate()->counters();
@@ -2592,10 +2633,10 @@
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                        int index,
-                                                       Handle<Map> transition,
-                                                       Handle<String> name) {
+                                                       Map* transition,
+                                                       String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2608,11 +2649,16 @@
   __ IncrementCounter(counters->keyed_store_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(ecx, Immediate(name));
+  __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   // Generate store field code.  Trashes the name register.
-  GenerateStoreField(masm(), object, index, transition, edx, ecx, ebx, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     edx, ecx, ebx,
+                     &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2621,37 +2667,39 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  Handle<Code> stub =
-      KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode();
-
-  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -2659,33 +2707,28 @@
   //  -- esp[0] : return address
   // -----------------------------------
   Label miss;
-  __ JumpIfSmi(edx, &miss, Label::kNear);
-  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
-  // ebx: receiver->map().
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    __ cmp(edi, receiver_maps->at(i));
-    if (transitioned_maps->at(i).is_null()) {
-      __ j(equal, handler_stubs->at(i));
-    } else {
-      Label next_map;
-      __ j(not_equal, &next_map, Label::kNear);
-      __ mov(ebx, Immediate(transitioned_maps->at(i)));
-      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-      __ bind(&next_map);
-    }
+  __ JumpIfSmi(edx, &miss);
+
+  Register map_reg = ebx;
+  __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    __ cmp(map_reg, map);
+    __ j(equal, Handle<Code>(handler_ics->at(current)));
   }
   __ bind(&miss);
   Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
-                                                      Handle<JSObject> object,
-                                                      Handle<JSObject> last) {
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                      JSObject* object,
+                                                      JSObject* last) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2706,8 +2749,15 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, edx, &miss);
+    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                  GlobalObject::cast(last),
+                                                  name,
+                                                  edx,
+                                                  &miss);
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2719,14 +2769,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, factory()->empty_string());
+  return GetCode(NONEXISTENT, isolate()->heap()->empty_string());
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                                JSObject* holder,
                                                 int index,
-                                                Handle<String> name) {
+                                                String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2743,11 +2793,10 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2755,8 +2804,13 @@
   // -----------------------------------
   Label miss;
 
-  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, edi, callback,
-                       name, &miss);
+  MaybeObject* result = GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+                                             edi, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2765,10 +2819,10 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> value,
-                                                   Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* value,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2785,9 +2839,9 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2795,13 +2849,21 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver, holder, &lookup, eax, ecx, edx, ebx, edi,
-                          name, &miss);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          eax,
+                          ecx,
+                          edx,
+                          ebx,
+                          edi,
+                          name,
+                          &miss);
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2811,12 +2873,11 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name,
-    bool is_dont_delete) {
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 String* name,
+                                                 bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- eax    : receiver
   //  -- ecx    : name
@@ -2824,16 +2885,22 @@
   // -----------------------------------
   Label miss;
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual loads. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(eax, &miss);
+  }
+
   // Check that the maps haven't changed.
-  __ JumpIfSmi(eax, &miss);
   CheckPrototypes(object, eax, holder, ebx, edx, edi, name, &miss);
 
   // Get the value from the cell.
   if (Serializer::enabled()) {
-    __ mov(ebx, Immediate(cell));
+    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
     __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
   } else {
-    __ mov(ebx, Operand::Cell(cell));
+    __ mov(ebx, Operand::Cell(Handle<JSGlobalPropertyCell>(cell)));
   }
 
   // Check for deleted property if property can actually be deleted.
@@ -2859,9 +2926,9 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
-                                                     Handle<JSObject> receiver,
-                                                     Handle<JSObject> holder,
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                     JSObject* receiver,
+                                                     JSObject* holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- eax    : key
@@ -2874,7 +2941,7 @@
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
@@ -2888,11 +2955,11 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+    String* name,
+    JSObject* receiver,
+    JSObject* holder,
+    AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -2904,13 +2971,18 @@
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
-  GenerateLoadCallback(receiver, holder, edx, eax, ebx, ecx, edi, callback,
-                       name, &miss);
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
+                                             ecx, edi, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
 
   __ bind(&miss);
+
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2919,11 +2991,10 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<JSFunction> value) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                        JSObject* receiver,
+                                                        JSObject* holder,
+                                                        Object* value) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -2935,11 +3006,11 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
-  GenerateLoadConstant(
-      receiver, holder, edx, ebx, ecx, edi, value, name, &miss);
+  GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
+                       value, name, &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_constant_function(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2949,10 +3020,9 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                           JSObject* holder,
+                                                           String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -2964,13 +3034,21 @@
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver, holder, &lookup, edx, eax, ecx, ebx, edi,
-                          name, &miss);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          edx,
+                          eax,
+                          ecx,
+                          ebx,
+                          edi,
+                          name,
+                          &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2980,8 +3058,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -2993,7 +3070,7 @@
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), edx, ecx, &miss);
@@ -3006,8 +3083,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3019,7 +3095,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
@@ -3032,8 +3108,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3045,7 +3120,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ cmp(eax, Immediate(name));
+  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
@@ -3058,29 +3133,31 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(edx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_ics) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- eax    : key
   //  -- edx    : receiver
@@ -3093,22 +3170,22 @@
   __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
   int receiver_count = receiver_maps->length();
   for (int current = 0; current < receiver_count; ++current) {
-    __ cmp(map_reg, receiver_maps->at(current));
-    __ j(equal, handler_ics->at(current));
+    Handle<Map> map(receiver_maps->at(current));
+    __ cmp(map_reg, map);
+    __ j(equal, Handle<Code>(handler_ics->at(current)));
   }
 
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
-    Handle<JSFunction> function) {
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // ----------- S t a t e -------------
   //  -- eax : argc
   //  -- edi : constructor
@@ -3147,8 +3224,12 @@
   // ebx: initial map
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
   __ shl(ecx, kPointerSizeLog2);
-  __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
-                        &generic_stub_call, NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(ecx,
+                        edx,
+                        ecx,
+                        no_reg,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // ebx: initial map
@@ -3179,7 +3260,7 @@
   // edi: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  Handle<SharedFunctionInfo> shared(function->shared());
+  SharedFunctionInfo* shared = function->shared();
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3217,7 +3298,7 @@
   // Move argc to ebx and retrieve and tag the JSObject to return.
   __ mov(ebx, eax);
   __ pop(eax);
-  __ or_(eax, Immediate(kHeapObjectTag));
+  __ or_(Operand(eax), Immediate(kHeapObjectTag));
 
   // Remove caller arguments and receiver from the stack and return.
   __ pop(ecx);
@@ -3231,8 +3312,9 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
-  __ jmp(code, RelocInfo::CODE_TARGET);
+  Handle<Code> generic_construct_stub =
+      isolate()->builtins()->JSConstructStubGeneric();
+  __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();
@@ -3424,7 +3506,8 @@
   // If we fail allocation of the HeapNumber, we still have a value on
   // top of the FPU stack. Remove it.
   __ bind(&failed_allocation);
-  __ fstp(0);
+  __ ffree();
+  __ fincstp();
   // Fall through to slow case.
 
   // Slow case: Jump to runtime.
@@ -3596,10 +3679,10 @@
             // If the value is NaN or +/-infinity, the result is 0x80000000,
             // which is automatically zero when taken mod 2^n, n < 32.
             __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-            __ sub(esp, Immediate(2 * kPointerSize));
+            __ sub(Operand(esp), Immediate(2 * kPointerSize));
             __ fisttp_d(Operand(esp, 0));
             __ pop(ebx);
-            __ add(esp, Immediate(kPointerSize));
+            __ add(Operand(esp), Immediate(kPointerSize));
           } else {
             ASSERT(CpuFeatures::IsSupported(SSE2));
             CpuFeatures::Scope scope(SSE2);
@@ -3741,7 +3824,8 @@
   // A value was pushed on the floating point stack before the allocation, if
   // the allocation fails it needs to be removed.
   if (!CpuFeatures::IsSupported(SSE2)) {
-    __ fstp(0);
+    __ ffree();
+    __ fincstp();
   }
   Handle<Code> slow_ic =
       masm->isolate()->builtins()->KeyedLoadIC_Slow();
@@ -3754,19 +3838,15 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
-    MacroAssembler* masm,
-    bool is_js_array,
-    ElementsKind elements_kind,
-    KeyedAccessGrowMode grow_mode) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, grow, slow, transition_elements_kind;
-  Label check_capacity, prepare_slow, finish_store, commit_backing_store;
+  Label miss_force_generic;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3774,52 +3854,27 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(ecx, &miss_force_generic);
 
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    __ JumpIfNotSmi(eax, &transition_elements_kind);
-  }
-
   // Get the elements array and make sure it is a fast element array, not 'cow'.
   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
+  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->fixed_array_map()));
+  __ j(not_equal, &miss_force_generic);
+
   if (is_js_array) {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // smis.
-    if (grow_mode == ALLOW_JSARRAY_GROWTH) {
-      __ j(above_equal, &grow);
-    } else {
-      __ j(above_equal, &miss_force_generic);
-    }
+    __ j(above_equal, &miss_force_generic);
   } else {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // smis.
     __ j(above_equal, &miss_force_generic);
   }
 
-  __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_array_map()));
-  __ j(not_equal, &miss_force_generic);
-
-  __ bind(&finish_store);
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    // ecx is a smi, use times_half_pointer_size instead of
-    // times_pointer_size
-    __ mov(FieldOperand(edi,
-                        ecx,
-                        times_half_pointer_size,
-                        FixedArray::kHeaderSize), eax);
-  } else {
-    ASSERT(elements_kind == FAST_ELEMENTS);
-    // Do the store and update the write barrier.
-    // ecx is a smi, use times_half_pointer_size instead of
-    // times_pointer_size
-    __ lea(ecx, FieldOperand(edi,
-                             ecx,
-                             times_half_pointer_size,
-                             FixedArray::kHeaderSize));
-    __ mov(Operand(ecx, 0), eax);
-    // Make sure to preserve the value in register eax.
-    __ mov(ebx, eax);
-    __ RecordWrite(edi, ecx, ebx, kDontSaveFPRegs);
-  }
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ mov(edx, Operand(eax));
+  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+  __ RecordWrite(edi, 0, edx, ecx);
 
   // Done.
   __ ret(0);
@@ -3829,99 +3884,20 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
-  // Handle transition to other elements kinds without using the generic stub.
-  __ bind(&transition_elements_kind);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Handle transition requiring the array to grow.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime. Flags are already set by previous
-    // compare.
-    __ j(not_equal, &miss_force_generic);
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-    __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-    __ j(not_equal, &check_capacity);
-
-    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-    // Restore the key, which is known to be the array length.
-
-    // eax: value
-    // ecx: key
-    // edx: receiver
-    // edi: elements
-    // Make sure that the backing store can hold additional elements.
-    __ mov(FieldOperand(edi, JSObject::kMapOffset),
-           Immediate(masm->isolate()->factory()->fixed_array_map()));
-    __ mov(FieldOperand(edi, FixedArray::kLengthOffset),
-           Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-    __ mov(ebx, Immediate(masm->isolate()->factory()->the_hole_value()));
-    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
-      __ mov(FieldOperand(edi, FixedArray::SizeFor(i)), ebx);
-    }
-
-    // Store the element at index zero.
-    __ mov(FieldOperand(edi, FixedArray::SizeFor(0)), eax);
-
-    // Install the new backing store in the JSArray.
-    __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
-    __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
-                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ mov(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-    __ ret(0);
-
-    __ bind(&check_capacity);
-    __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
-           Immediate(masm->isolate()->factory()->fixed_cow_array_map()));
-    __ j(equal, &miss_force_generic);
-
-    // eax: value
-    // ecx: key
-    // edx: receiver
-    // edi: elements
-    // Make sure that the backing store can hold additional elements.
-    __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-    __ j(above_equal, &slow);
-
-    // Grow the array and finish the store.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-    __ jmp(&finish_store);
-
-    __ bind(&prepare_slow);
-    // Restore the key, which is known to be the array length.
-    __ mov(ecx, Immediate(0));
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array,
-    KeyedAccessGrowMode grow_mode) {
+    bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, transition_elements_kind, grow, slow;
-  Label check_capacity, prepare_slow, finish_store, commit_backing_store;
+  Label miss_force_generic, smi_value, is_nan, maybe_nan;
+  Label have_double_value, not_nan;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3936,20 +3912,65 @@
   if (is_js_array) {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // smis.
-    if (grow_mode == ALLOW_JSARRAY_GROWTH) {
-      __ j(above_equal, &grow);
-    } else {
-      __ j(above_equal, &miss_force_generic);
-    }
   } else {
     // Check that the key is within bounds.
     __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));  // smis.
-    __ j(above_equal, &miss_force_generic);
+  }
+  __ j(above_equal, &miss_force_generic);
+
+  __ JumpIfSmi(eax, &smi_value, Label::kNear);
+
+  __ CheckMap(eax,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
+  __ j(greater_equal, &maybe_nan, Label::kNear);
+
+  __ bind(&not_nan);
+  ExternalReference canonical_nan_reference =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ bind(&have_double_value);
+    __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
+              xmm0);
+    __ ret(0);
+  } else {
+    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ bind(&have_double_value);
+    __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+    __ ret(0);
   }
 
-  __ bind(&finish_store);
-  __ StoreNumberToDoubleElements(eax, edi, ecx, edx, xmm0,
-                                 &transition_elements_kind, true);
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ j(greater, &is_nan, Label::kNear);
+  __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
+  __ j(zero, &not_nan);
+  __ bind(&is_nan);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
+  } else {
+    __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+  }
+  __ jmp(&have_double_value, Label::kNear);
+
+  __ bind(&smi_value);
+  // Value is a smi. convert to a double and store.
+  // Preserve original value.
+  __ mov(edx, eax);
+  __ SmiUntag(edx);
+  __ push(edx);
+  __ fild_s(Operand(esp, 0));
+  __ pop(edx);
+  __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -3957,84 +3978,6 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
-  // Handle transition to other elements kinds without using the generic stub.
-  __ bind(&transition_elements_kind);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Handle transition requiring the array to grow.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime. Flags are already set by previous
-    // compare.
-    __ j(not_equal, &miss_force_generic);
-
-    // Transition on values that can't be stored in a FixedDoubleArray.
-    Label value_is_smi;
-    __ JumpIfSmi(eax, &value_is_smi);
-    __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-           Immediate(Handle<Map>(masm->isolate()->heap()->heap_number_map())));
-    __ j(not_equal, &transition_elements_kind);
-    __ bind(&value_is_smi);
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-    __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-    __ j(not_equal, &check_capacity);
-
-    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT);
-    // Restore the key, which is known to be the array length.
-    __ mov(ecx, Immediate(0));
-
-    // eax: value
-    // ecx: key
-    // edx: receiver
-    // edi: elements
-    // Initialize the new FixedDoubleArray. Leave elements unitialized for
-    // efficiency, they are guaranteed to be initialized before use.
-    __ mov(FieldOperand(edi, JSObject::kMapOffset),
-           Immediate(masm->isolate()->factory()->fixed_double_array_map()));
-    __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
-           Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-
-    // Install the new backing store in the JSArray.
-    __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
-    __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
-                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-    __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-    __ jmp(&finish_store);
-
-    __ bind(&check_capacity);
-    // eax: value
-    // ecx: key
-    // edx: receiver
-    // edi: elements
-    // Make sure that the backing store can hold additional elements.
-    __ cmp(ecx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-    __ j(above_equal, &slow);
-
-    // Grow the array and finish the store.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-    __ jmp(&finish_store);
-
-    __ bind(&prepare_slow);
-    // Restore the key, which is known to be the array length.
-    __ mov(ecx, Immediate(0));
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 6a86921..b4f789c 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,8 +29,6 @@
 #define V8_IC_INL_H_
 
 #include "ic.h"
-
-#include "compiler.h"
 #include "debug.h"
 #include "macro-assembler.h"
 
@@ -38,7 +36,7 @@
 namespace internal {
 
 
-Address IC::address() const {
+Address IC::address() {
   // Get the address of the call.
   Address result = pc() - Assembler::kCallTargetAddressOffset;
 
@@ -79,20 +77,16 @@
 
 void IC::SetTargetAtAddress(Address address, Code* target) {
   ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
-  Code* old_target = GetTargetAtAddress(address);
 #ifdef DEBUG
   // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
   // ICs as strict mode. The strict-ness of the IC must be preserved.
+  Code* old_target = GetTargetAtAddress(address);
   if (old_target->kind() == Code::STORE_IC ||
       old_target->kind() == Code::KEYED_STORE_IC) {
-    ASSERT(Code::GetStrictMode(old_target->extra_ic_state()) ==
-           Code::GetStrictMode(target->extra_ic_state()));
+    ASSERT(old_target->extra_ic_state() == target->extra_ic_state());
   }
 #endif
   Assembler::set_target_address_at(address, target->instruction_start());
-  target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
-                                                                  target);
-  PostPatching(address, target, old_target);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index c762127..0f76a9a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,13 +40,13 @@
 namespace internal {
 
 #ifdef DEBUG
-char IC::TransitionMarkFromState(IC::State state) {
+static char TransitionMarkFromState(IC::State state) {
   switch (state) {
     case UNINITIALIZED: return '0';
     case PREMONOMORPHIC: return 'P';
     case MONOMORPHIC: return '1';
     case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
-    case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
+    case MEGAMORPHIC: return 'N';
 
     // We never see the debugger states here, because the state is
     // computed from the original code - not the patched code. Let
@@ -80,34 +80,28 @@
         raw_frame = it.frame();
       }
     }
-    JavaScriptFrame::PrintTop(stdout, false, true);
-    bool new_can_grow =
-        Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) ==
-        ALLOW_JSARRAY_GROWTH;
-    PrintF(" (%c->%c%s)",
+    if (raw_frame->is_java_script()) {
+      JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+      Code* js_code = frame->unchecked_code();
+      // Find the function on the stack and both the active code for the
+      // function and the original code.
+      JSFunction* function = JSFunction::cast(frame->function());
+      function->PrintName();
+      int code_offset =
+          static_cast<int>(address() - js_code->instruction_start());
+      PrintF("+%d", code_offset);
+    } else {
+      PrintF("<unknown>");
+    }
+    PrintF(" (%c->%c)",
            TransitionMarkFromState(old_state),
-           TransitionMarkFromState(new_state),
-           new_can_grow ? ".GROW" : "");
+           TransitionMarkFromState(new_state));
     name->Print();
     PrintF("]\n");
   }
 }
+#endif
 
-#define TRACE_GENERIC_IC(type, reason)                          \
-  do {                                                          \
-    if (FLAG_trace_ic) {                                        \
-      PrintF("[%s patching generic stub in ", type);            \
-      JavaScriptFrame::PrintTop(stdout, false, true);           \
-      PrintF(" (%s)]\n", reason);                               \
-    }                                                           \
-  } while (false)
-
-#else
-#define TRACE_GENERIC_IC(type, reason)
-#endif  // DEBUG
-
-#define TRACE_IC(type, name, old_state, new_target)             \
-  ASSERT((TraceIC(type, name, old_state, new_target), true))
 
 IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
   ASSERT(isolate == Isolate::Current());
@@ -139,7 +133,7 @@
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() const {
+Address IC::OriginalCodeAddress() {
   HandleScope scope;
   // Compute the JavaScript frame for the frame pointer of this IC
   // structure. We need this to be able to find the function
@@ -173,7 +167,7 @@
                                              LookupResult* lookup,
                                              Object* receiver) {
   Object* end = lookup->IsProperty()
-      ? lookup->holder() : Object::cast(isolate->heap()->null_value());
+      ? lookup->holder() : isolate->heap()->null_value();
   for (Object* current = receiver;
        current != end;
        current = current->GetPrototype()) {
@@ -296,61 +290,6 @@
 }
 
 
-void IC::PostPatching(Address address, Code* target, Code* old_target) {
-  if (FLAG_type_info_threshold > 0) {
-    if (old_target->is_inline_cache_stub() &&
-        target->is_inline_cache_stub()) {
-      State old_state = old_target->ic_state();
-      State new_state = target->ic_state();
-      bool was_uninitialized =
-          old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
-      bool is_uninitialized =
-          new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
-      int delta = 0;
-      if (was_uninitialized && !is_uninitialized) {
-        delta = 1;
-      } else if (!was_uninitialized && is_uninitialized) {
-        delta = -1;
-      }
-      if (delta != 0) {
-        Code* host = target->GetHeap()->isolate()->
-            inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
-        // Not all Code objects have TypeFeedbackInfo.
-        if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
-          TypeFeedbackInfo* info =
-              TypeFeedbackInfo::cast(host->type_feedback_info());
-          info->set_ic_with_typeinfo_count(
-              info->ic_with_typeinfo_count() + delta);
-        }
-      }
-    }
-  }
-  if (FLAG_watch_ic_patching) {
-    Isolate::Current()->runtime_profiler()->NotifyICChanged();
-    // We do not want to optimize until the ICs have settled down,
-    // so when they are patched, we postpone optimization for the
-    // current function and the functions above it on the stack that
-    // might want to inline this one.
-    StackFrameIterator it;
-    if (it.done()) return;
-    it.Advance();
-    static const int kStackFramesToMark = Compiler::kMaxInliningLevels - 1;
-    for (int i = 0; i < kStackFramesToMark; ++i) {
-      if (it.done()) return;
-      StackFrame* raw_frame = it.frame();
-      if (raw_frame->is_java_script()) {
-        JSFunction* function =
-            JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function());
-        if (function->IsOptimized()) continue;
-        SharedFunctionInfo* shared = function->shared();
-        shared->set_profiler_ticks(0);
-      }
-      it.Advance();
-    }
-  }
-}
-
-
 void IC::Clear(Address address) {
   Code* target = GetTargetAtAddress(address);
 
@@ -409,7 +348,7 @@
 void StoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   SetTargetAtAddress(address,
-      (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
+      (target->extra_ic_state() == kStrictMode)
         ? initialize_stub_strict()
         : initialize_stub());
 }
@@ -418,7 +357,7 @@
 void KeyedStoreIC::Clear(Address address, Code* target) {
   if (target->ic_state() == UNINITIALIZED) return;
   SetTargetAtAddress(address,
-      (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
+      (target->extra_ic_state() == kStrictMode)
         ? initialize_stub_strict()
         : initialize_stub());
 }
@@ -429,13 +368,15 @@
 }
 
 
-static void LookupForRead(Handle<Object> object,
-                          Handle<String> name,
+static void LookupForRead(Object* object,
+                          String* name,
                           LookupResult* lookup) {
+  AssertNoAllocation no_gc;  // pointers must stay valid
+
   // Skip all the objects with named interceptors, but
   // without actual getter.
   while (true) {
-    object->Lookup(*name, lookup);
+    object->Lookup(name, lookup);
     // Besides normal conditions (property not found or it's not
     // an interceptor), bail out if lookup is not cacheable: we won't
     // be able to IC it anyway and regular lookup should work fine.
@@ -445,18 +386,18 @@
       return;
     }
 
-    Handle<JSObject> holder(lookup->holder());
-    if (HasInterceptorGetter(*holder)) {
+    JSObject* holder = lookup->holder();
+    if (HasInterceptorGetter(holder)) {
       return;
     }
 
-    holder->LocalLookupRealNamedProperty(*name, lookup);
+    holder->LocalLookupRealNamedProperty(name, lookup);
     if (lookup->IsProperty()) {
       ASSERT(lookup->type() != INTERCEPTOR);
       return;
     }
 
-    Handle<Object> proto(holder->GetPrototype());
+    Object* proto = holder->GetPrototype();
     if (proto->IsNull()) {
       lookup->NotFound();
       return;
@@ -467,32 +408,31 @@
 }
 
 
-Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
-  Handle<Object> delegate = Execution::GetFunctionDelegate(object);
+Object* CallICBase::TryCallAsFunction(Object* object) {
+  HandleScope scope(isolate());
+  Handle<Object> target(object, isolate());
+  Handle<Object> delegate = Execution::GetFunctionDelegate(target);
 
-  if (delegate->IsJSFunction() && !object->IsJSFunctionProxy()) {
+  if (delegate->IsJSFunction()) {
     // Patch the receiver and use the delegate as the function to
-    // invoke. This is used for invoking objects as if they were functions.
-    const int argc = target()->arguments_count();
+    // invoke. This is used for invoking objects as if they were
+    // functions.
+    const int argc = this->target()->arguments_count();
     StackFrameLocator locator;
     JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
     int index = frame->ComputeExpressionsCount() - (argc + 1);
-    frame->SetExpression(index, *object);
+    frame->SetExpression(index, *target);
   }
 
-  return delegate;
+  return *delegate;
 }
 
 
 void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
                                             Handle<Object> object) {
-  while (callee->IsJSFunctionProxy()) {
-    callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap());
-  }
-
   if (callee->IsJSFunction()) {
     Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
-    if (!function->shared()->is_classic_mode() || function->IsBuiltin()) {
+    if (function->shared()->strict_mode() || function->IsBuiltin()) {
       // Do not wrap receiver for strict mode functions or for builtins.
       return;
     }
@@ -524,27 +464,31 @@
   // the element if so.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    Handle<Object> result = Object::GetElement(object, index);
-    RETURN_IF_EMPTY_HANDLE(isolate(), result);
-    if (result->IsJSFunction()) return *result;
+    Object* result;
+    { MaybeObject* maybe_result = object->GetElement(index);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+
+    if (result->IsJSFunction()) return result;
 
     // Try to find a suitable function delegate for the object at hand.
     result = TryCallAsFunction(result);
-    if (result->IsJSFunction()) return *result;
+    if (result->IsJSFunction()) return result;
 
     // Otherwise, it will fail in the lookup step.
   }
 
   // Lookup the property in the object.
-  LookupResult lookup(isolate());
-  LookupForRead(object, name, &lookup);
+  LookupResult lookup;
+  LookupForRead(*object, *name, &lookup);
 
   if (!lookup.IsProperty()) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
-    return IsContextual(object)
-        ? ReferenceError("not_defined", name)
-        : TypeError("undefined_method", object, name);
+    if (IsContextual(object)) {
+      return ReferenceError("not_defined", name);
+    }
+    return TypeError("undefined_method", object, name);
   }
 
   // Lookup is valid: Update inline cache and stub cache.
@@ -554,42 +498,53 @@
 
   // Get the property.
   PropertyAttributes attr;
-  Handle<Object> result =
-      Object::GetProperty(object, object, &lookup, name, &attr);
-  RETURN_IF_EMPTY_HANDLE(isolate(), result);
+  Object* result;
+  { MaybeObject* maybe_result =
+        object->GetProperty(*object, &lookup, *name, &attr);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
 
-  if (lookup.type() == INTERCEPTOR && attr == ABSENT) {
+  if (lookup.type() == INTERCEPTOR) {
     // If the object does not have the requested property, check which
     // exception we need to throw.
-    return IsContextual(object)
-        ? ReferenceError("not_defined", name)
-        : TypeError("undefined_method", object, name);
+    if (attr == ABSENT) {
+      if (IsContextual(object)) {
+        return ReferenceError("not_defined", name);
+      }
+      return TypeError("undefined_method", object, name);
+    }
   }
 
   ASSERT(!result->IsTheHole());
 
+  HandleScope scope(isolate());
+  // Wrap result in a handle because ReceiverToObjectIfRequired may allocate
+  // new object and cause GC.
+  Handle<Object> result_handle(result);
   // Make receiver an object if the callee requires it. Strict mode or builtin
   // functions do not wrap the receiver, non-strict functions and objects
   // called as functions do.
-  ReceiverToObjectIfRequired(result, object);
+  ReceiverToObjectIfRequired(result_handle, object);
 
-  if (result->IsJSFunction()) {
-    Handle<JSFunction> function = Handle<JSFunction>::cast(result);
+  if (result_handle->IsJSFunction()) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
     // Handle stepping into a function if step into is active.
     Debug* debug = isolate()->debug();
     if (debug->StepInActive()) {
       // Protect the result in a handle as the debugger can allocate and might
       // cause GC.
+      Handle<JSFunction> function(JSFunction::cast(*result_handle), isolate());
       debug->HandleStepIn(function, object, fp(), false);
+      return *function;
     }
 #endif
-    return *function;
+
+    return *result_handle;
   }
 
   // Try to find a suitable function delegate for the object at hand.
-  result = TryCallAsFunction(result);
-  if (result->IsJSFunction()) return *result;
+  result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
+  if (result_handle->IsJSFunction()) return *result_handle;
 
   return TypeError("property_not_function", object, name);
 }
@@ -639,57 +594,89 @@
 }
 
 
-Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
-                                                State state,
-                                                Code::ExtraICState extra_state,
-                                                Handle<Object> object,
-                                                Handle<String> name) {
+MaybeObject* CallICBase::ComputeMonomorphicStub(
+    LookupResult* lookup,
+    State state,
+    Code::ExtraICState extra_ic_state,
+    Handle<Object> object,
+    Handle<String> name) {
   int argc = target()->arguments_count();
-  Handle<JSObject> holder(lookup->holder());
+  MaybeObject* maybe_code = NULL;
   switch (lookup->type()) {
     case FIELD: {
       int index = lookup->GetFieldIndex();
-      return isolate()->stub_cache()->ComputeCallField(
-          argc, kind_, extra_state, name, object, holder, index);
+      maybe_code = isolate()->stub_cache()->ComputeCallField(argc,
+                                                             kind_,
+                                                             extra_ic_state,
+                                                             *name,
+                                                             *object,
+                                                             lookup->holder(),
+                                                             index);
+      break;
     }
     case CONSTANT_FUNCTION: {
       // Get the constant function and compute the code stub for this
       // call; used for rewriting to monomorphic state and making sure
       // that the code stub is in the stub cache.
-      Handle<JSFunction> function(lookup->GetConstantFunction());
-      return isolate()->stub_cache()->ComputeCallConstant(
-          argc, kind_, extra_state, name, object, holder, function);
+      JSFunction* function = lookup->GetConstantFunction();
+      maybe_code =
+          isolate()->stub_cache()->ComputeCallConstant(argc,
+                                                       kind_,
+                                                       extra_ic_state,
+                                                       *name,
+                                                       *object,
+                                                       lookup->holder(),
+                                                       function);
+      break;
     }
     case NORMAL: {
-      // If we return a null handle, the IC will not be patched.
-      if (!object->IsJSObject()) return Handle<Code>::null();
+      if (!object->IsJSObject()) return NULL;
       Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
-      if (holder->IsGlobalObject()) {
-        Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
-        Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
-        if (!cell->value()->IsJSFunction()) return Handle<Code>::null();
-        Handle<JSFunction> function(JSFunction::cast(cell->value()));
-        return isolate()->stub_cache()->ComputeCallGlobal(
-            argc, kind_, extra_state, name, receiver, global, cell, function);
+      if (lookup->holder()->IsGlobalObject()) {
+        GlobalObject* global = GlobalObject::cast(lookup->holder());
+        JSGlobalPropertyCell* cell =
+            JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+        if (!cell->value()->IsJSFunction()) return NULL;
+        JSFunction* function = JSFunction::cast(cell->value());
+        maybe_code = isolate()->stub_cache()->ComputeCallGlobal(argc,
+                                                                kind_,
+                                                                extra_ic_state,
+                                                                *name,
+                                                                *receiver,
+                                                                global,
+                                                                cell,
+                                                                function);
       } else {
         // There is only one shared stub for calling normalized
         // properties. It does not traverse the prototype chain, so the
         // property must be found in the receiver for the stub to be
         // applicable.
-        if (!holder.is_identical_to(receiver)) return Handle<Code>::null();
-        return isolate()->stub_cache()->ComputeCallNormal(
-            argc, kind_, extra_state);
+        if (lookup->holder() != *receiver) return NULL;
+        maybe_code = isolate()->stub_cache()->ComputeCallNormal(argc,
+                                                                kind_,
+                                                                extra_ic_state,
+                                                                *name,
+                                                                *receiver);
       }
       break;
     }
-    case INTERCEPTOR:
-      ASSERT(HasInterceptorGetter(*holder));
-      return isolate()->stub_cache()->ComputeCallInterceptor(
-          argc, kind_, extra_state, name, object, holder);
+    case INTERCEPTOR: {
+      ASSERT(HasInterceptorGetter(lookup->holder()));
+      maybe_code = isolate()->stub_cache()->ComputeCallInterceptor(
+          argc,
+          kind_,
+          extra_ic_state,
+          *name,
+          *object,
+          lookup->holder());
+      break;
+    }
     default:
-      return Handle<Code>::null();
+      maybe_code = NULL;
+      break;
   }
+  return maybe_code;
 }
 
 
@@ -711,57 +698,75 @@
 
   // Compute the number of arguments.
   int argc = target()->arguments_count();
+  MaybeObject* maybe_code = NULL;
   bool had_proto_failure = false;
-  Handle<Code> code;
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    code = isolate()->stub_cache()->ComputeCallPreMonomorphic(
-        argc, kind_, extra_ic_state);
+    maybe_code =
+        isolate()->stub_cache()->ComputeCallPreMonomorphic(argc,
+                                                           kind_,
+                                                           extra_ic_state);
   } else if (state == MONOMORPHIC) {
     if (kind_ == Code::CALL_IC &&
         TryUpdateExtraICState(lookup, object, &extra_ic_state)) {
-      code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
-                                    object, name);
+      maybe_code = ComputeMonomorphicStub(lookup,
+                                          state,
+                                          extra_ic_state,
+                                          object,
+                                          name);
     } else if (kind_ == Code::CALL_IC &&
                TryRemoveInvalidPrototypeDependentStub(target(),
                                                       *object,
                                                       *name)) {
       had_proto_failure = true;
-      code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
-                                    object, name);
+      maybe_code = ComputeMonomorphicStub(lookup,
+                                          state,
+                                          extra_ic_state,
+                                          object,
+                                          name);
     } else {
-      code = isolate()->stub_cache()->ComputeCallMegamorphic(
-          argc, kind_, extra_ic_state);
+      maybe_code =
+          isolate()->stub_cache()->ComputeCallMegamorphic(argc,
+                                                          kind_,
+                                                          extra_ic_state);
     }
   } else {
-    code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
-                                  object, name);
+    maybe_code = ComputeMonomorphicStub(lookup,
+                                        state,
+                                        extra_ic_state,
+                                        object,
+                                        name);
   }
 
-  // If there's no appropriate stub we simply avoid updating the caches.
-  if (code.is_null()) return;
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  Object* code;
+  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
 
   // Patch the call site depending on the state of the cache.
   if (state == UNINITIALIZED ||
       state == PREMONOMORPHIC ||
       state == MONOMORPHIC ||
       state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(*code);
+    set_target(Code::cast(code));
   } else if (state == MEGAMORPHIC) {
     // Cache code holding map should be consistent with
     // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
-    Handle<JSObject> cache_object = object->IsJSObject()
-        ? Handle<JSObject>::cast(object)
-        : Handle<JSObject>(JSObject::cast(object->GetPrototype()));
+    Map* map = JSObject::cast(object->IsJSObject() ? *object :
+                              object->GetPrototype())->map();
+
     // Update the stub cache.
-    isolate()->stub_cache()->Set(*name, cache_object->map(), *code);
+    isolate()->stub_cache()->Set(*name, map, Code::cast(code));
   }
 
+  USE(had_proto_failure);
+#ifdef DEBUG
   if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
-  TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
-           name, state, target());
+  TraceIC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
+          name, state, target());
+#endif
 }
 
 
@@ -781,22 +786,34 @@
 
   if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
     int argc = target()->arguments_count();
-    Handle<Map> map =
-        isolate()->factory()->non_strict_arguments_elements_map();
+    Heap* heap = Handle<HeapObject>::cast(object)->GetHeap();
+    Map* map = heap->non_strict_arguments_elements_map();
     if (object->IsJSObject() &&
-        Handle<JSObject>::cast(object)->elements()->map() == *map) {
-      Handle<Code> code = isolate()->stub_cache()->ComputeCallArguments(
+        Handle<JSObject>::cast(object)->elements()->map() == map) {
+      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallArguments(
           argc, Code::KEYED_CALL_IC);
-      set_target(*code);
-      TRACE_IC("KeyedCallIC", key, state, target());
-    } else if (!object->IsAccessCheckNeeded()) {
-      Handle<Code> code = isolate()->stub_cache()->ComputeCallMegamorphic(
+      Object* code;
+      if (maybe_code->ToObject(&code)) {
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedCallIC", key, state, target());
+#endif
+      }
+    } else if (FLAG_use_ic && state != MEGAMORPHIC &&
+               !object->IsAccessCheckNeeded()) {
+      MaybeObject* maybe_code = isolate()->stub_cache()->ComputeCallMegamorphic(
           argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
-      set_target(*code);
-      TRACE_IC("KeyedCallIC", key, state, target());
+      Object* code;
+      if (maybe_code->ToObject(&code)) {
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedCallIC", key, state, target());
+#endif
+      }
     }
   }
 
+  HandleScope scope(isolate());
   Handle<Object> result = GetProperty(object, key);
   RETURN_IF_EMPTY_HANDLE(isolate(), result);
 
@@ -804,9 +821,9 @@
   // functions do not wrap the receiver, non-strict functions and objects
   // called as functions do.
   ReceiverToObjectIfRequired(result, object);
-  if (result->IsJSFunction()) return *result;
 
-  result = TryCallAsFunction(result);
+  if (result->IsJSFunction()) return *result;
+  result = Handle<Object>(TryCallAsFunction(*result));
   if (result->IsJSFunction()) return *result;
 
   return TypeError("property_not_function", object, key);
@@ -829,44 +846,53 @@
     // the underlying string value.  See ECMA-262 15.5.5.1.
     if ((object->IsString() || object->IsStringWrapper()) &&
         name->Equals(isolate()->heap()->length_symbol())) {
-      Handle<Code> stub;
+      AssertNoAllocation no_allocation;
+      Code* stub = NULL;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
       } else if (state == PREMONOMORPHIC) {
-        stub = object->IsString()
-            ? isolate()->builtins()->LoadIC_StringLength()
-            : isolate()->builtins()->LoadIC_StringWrapperLength();
+        if (object->IsString()) {
+          stub = isolate()->builtins()->builtin(
+              Builtins::kLoadIC_StringLength);
+        } else {
+          stub = isolate()->builtins()->builtin(
+              Builtins::kLoadIC_StringWrapperLength);
+        }
       } else if (state == MONOMORPHIC && object->IsStringWrapper()) {
-        stub = isolate()->builtins()->LoadIC_StringWrapperLength();
+        stub = isolate()->builtins()->builtin(
+            Builtins::kLoadIC_StringWrapperLength);
       } else if (state != MEGAMORPHIC) {
         stub = megamorphic_stub();
       }
-      if (!stub.is_null()) {
-        set_target(*stub);
+      if (stub != NULL) {
+        set_target(stub);
 #ifdef DEBUG
         if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n");
 #endif
       }
       // Get the string if we have a string wrapper object.
-      Handle<Object> string = object->IsJSValue()
-          ? Handle<Object>(Handle<JSValue>::cast(object)->value())
-          : object;
-      return Smi::FromInt(String::cast(*string)->length());
+      if (object->IsJSValue()) {
+        return Smi::FromInt(
+            String::cast(Handle<JSValue>::cast(object)->value())->length());
+      }
+      return Smi::FromInt(String::cast(*object)->length());
     }
 
     // Use specialized code for getting the length of arrays.
     if (object->IsJSArray() &&
         name->Equals(isolate()->heap()->length_symbol())) {
-      Handle<Code> stub;
+      AssertNoAllocation no_allocation;
+      Code* stub = NULL;
       if (state == UNINITIALIZED) {
         stub = pre_monomorphic_stub();
       } else if (state == PREMONOMORPHIC) {
-        stub = isolate()->builtins()->LoadIC_ArrayLength();
+        stub = isolate()->builtins()->builtin(
+            Builtins::kLoadIC_ArrayLength);
       } else if (state != MEGAMORPHIC) {
         stub = megamorphic_stub();
       }
-      if (!stub.is_null()) {
-        set_target(*stub);
+      if (stub != NULL) {
+        set_target(stub);
 #ifdef DEBUG
         if (FLAG_trace_ic) PrintF("[LoadIC : +#length /array]\n");
 #endif
@@ -877,20 +903,23 @@
     // Use specialized code for getting prototype of functions.
     if (object->IsJSFunction() &&
         name->Equals(isolate()->heap()->prototype_symbol()) &&
-        Handle<JSFunction>::cast(object)->should_have_prototype()) {
-      Handle<Code> stub;
-      if (state == UNINITIALIZED) {
-        stub = pre_monomorphic_stub();
-      } else if (state == PREMONOMORPHIC) {
-        stub = isolate()->builtins()->LoadIC_FunctionPrototype();
-      } else if (state != MEGAMORPHIC) {
-        stub = megamorphic_stub();
-      }
-      if (!stub.is_null()) {
-        set_target(*stub);
+        JSFunction::cast(*object)->should_have_prototype()) {
+      { AssertNoAllocation no_allocation;
+        Code* stub = NULL;
+        if (state == UNINITIALIZED) {
+          stub = pre_monomorphic_stub();
+        } else if (state == PREMONOMORPHIC) {
+          stub = isolate()->builtins()->builtin(
+              Builtins::kLoadIC_FunctionPrototype);
+        } else if (state != MEGAMORPHIC) {
+          stub = megamorphic_stub();
+        }
+        if (stub != NULL) {
+          set_target(stub);
 #ifdef DEBUG
-        if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
+          if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
 #endif
+        }
       }
       return Accessors::FunctionGetPrototype(*object, 0);
     }
@@ -902,8 +931,8 @@
   if (name->AsArrayIndex(&index)) return object->GetElement(index);
 
   // Named lookup in the object.
-  LookupResult lookup(isolate());
-  LookupForRead(object, name, &lookup);
+  LookupResult lookup;
+  LookupForRead(*object, *name, &lookup);
 
   // If we did not find a property, check if we need to throw an exception.
   if (!lookup.IsProperty()) {
@@ -919,18 +948,20 @@
   }
 
   PropertyAttributes attr;
-  if (lookup.IsFound() &&
+  if (lookup.IsProperty() &&
       (lookup.type() == INTERCEPTOR || lookup.type() == HANDLER)) {
     // Get the property.
-    Handle<Object> result =
-        Object::GetProperty(object, object, &lookup, name, &attr);
-    RETURN_IF_EMPTY_HANDLE(isolate(), result);
+    Object* result;
+    { MaybeObject* maybe_result =
+          object->GetProperty(*object, &lookup, *name, &attr);
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
     // If the property is not present, check if we need to throw an
     // exception.
     if (attr == ABSENT && IsContextual(object)) {
       return ReferenceError("not_defined", name);
     }
-    return *result;
+    return result;
   }
 
   // Get the property.
@@ -953,117 +984,120 @@
   if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
 
   // Compute the code stub for this load.
-  Handle<Code> code;
+  MaybeObject* maybe_code = NULL;
+  Object* code;
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    code = pre_monomorphic_stub();
+    maybe_code = pre_monomorphic_stub();
   } else if (!lookup->IsProperty()) {
     // Nonexistent property. The result is undefined.
-    code = isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
+    maybe_code = isolate()->stub_cache()->ComputeLoadNonexistent(*name,
+                                                                 *receiver);
   } else {
     // Compute monomorphic stub.
-    Handle<JSObject> holder(lookup->holder());
     switch (lookup->type()) {
-      case FIELD:
-        code = isolate()->stub_cache()->ComputeLoadField(
-            name, receiver, holder, lookup->GetFieldIndex());
-        break;
-      case CONSTANT_FUNCTION: {
-        Handle<JSFunction> constant(lookup->GetConstantFunction());
-        code = isolate()->stub_cache()->ComputeLoadConstant(
-            name, receiver, holder, constant);
+      case FIELD: {
+        maybe_code = isolate()->stub_cache()->ComputeLoadField(
+            *name,
+            *receiver,
+            lookup->holder(),
+            lookup->GetFieldIndex());
         break;
       }
-      case NORMAL:
-        if (holder->IsGlobalObject()) {
-          Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
-          Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
-          code = isolate()->stub_cache()->ComputeLoadGlobal(
-              name, receiver, global, cell, lookup->IsDontDelete());
+      case CONSTANT_FUNCTION: {
+        Object* constant = lookup->GetConstantFunction();
+        maybe_code = isolate()->stub_cache()->ComputeLoadConstant(
+            *name, *receiver, lookup->holder(), constant);
+        break;
+      }
+      case NORMAL: {
+        if (lookup->holder()->IsGlobalObject()) {
+          GlobalObject* global = GlobalObject::cast(lookup->holder());
+          JSGlobalPropertyCell* cell =
+              JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+          maybe_code = isolate()->stub_cache()->ComputeLoadGlobal(*name,
+                                                    *receiver,
+                                                    global,
+                                                    cell,
+                                                    lookup->IsDontDelete());
         } else {
           // There is only one shared stub for loading normalized
           // properties. It does not traverse the prototype chain, so the
           // property must be found in the receiver for the stub to be
           // applicable.
-          if (!holder.is_identical_to(receiver)) return;
-          code = isolate()->stub_cache()->ComputeLoadNormal();
+          if (lookup->holder() != *receiver) return;
+          maybe_code = isolate()->stub_cache()->ComputeLoadNormal();
         }
         break;
+      }
       case CALLBACKS: {
-        Handle<Object> callback_object(lookup->GetCallbackObject());
-        if (!callback_object->IsAccessorInfo()) return;
-        Handle<AccessorInfo> callback =
-            Handle<AccessorInfo>::cast(callback_object);
+        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+        AccessorInfo* callback =
+            AccessorInfo::cast(lookup->GetCallbackObject());
         if (v8::ToCData<Address>(callback->getter()) == 0) return;
-        code = isolate()->stub_cache()->ComputeLoadCallback(
-            name, receiver, holder, callback);
+        maybe_code = isolate()->stub_cache()->ComputeLoadCallback(
+            *name, *receiver, lookup->holder(), callback);
         break;
       }
-      case INTERCEPTOR:
-        ASSERT(HasInterceptorGetter(*holder));
-        code = isolate()->stub_cache()->ComputeLoadInterceptor(
-            name, receiver, holder);
+      case INTERCEPTOR: {
+        ASSERT(HasInterceptorGetter(lookup->holder()));
+        maybe_code = isolate()->stub_cache()->ComputeLoadInterceptor(
+            *name, *receiver, lookup->holder());
         break;
+      }
       default:
         return;
     }
   }
 
-  // Patch the call site depending on the state of the cache.
-  if (state == UNINITIALIZED ||
-      state == PREMONOMORPHIC ||
-      state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(*code);
-  } else if (state == MONOMORPHIC) {
-    // We are transitioning from monomorphic to megamorphic case.
-    // Place the current monomorphic stub and stub compiled for
-    // the receiver into stub cache.
-    Map* map = target()->FindFirstMap();
-    if (map != NULL) {
-      isolate()->stub_cache()->Set(*name, map, target());
-    }
-    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
 
-    set_target(*megamorphic_stub());
+  // Patch the call site depending on the state of the cache.
+  if (state == UNINITIALIZED || state == PREMONOMORPHIC ||
+      state == MONOMORPHIC_PROTOTYPE_FAILURE) {
+    set_target(Code::cast(code));
+  } else if (state == MONOMORPHIC) {
+    set_target(megamorphic_stub());
   } else if (state == MEGAMORPHIC) {
     // Cache code holding map should be consistent with
     // GenerateMonomorphicCacheProbe.
-    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
+    Map* map = JSObject::cast(object->IsJSObject() ? *object :
+                              object->GetPrototype())->map();
+
+    isolate()->stub_cache()->Set(*name, map, Code::cast(code));
   }
 
-  TRACE_IC("LoadIC", name, state, target());
+#ifdef DEBUG
+  TraceIC("LoadIC", name, state, target());
+#endif
 }
 
 
-Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
+MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck(
     bool is_js_array,
-    ElementsKind elements_kind,
-    KeyedAccessGrowMode grow_mode) {
-  ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
-  return KeyedLoadElementStub(elements_kind).GetCode();
+    ElementsKind elements_kind) {
+  return KeyedLoadElementStub(elements_kind).TryGetCode();
 }
 
 
-Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
-    MapHandleList* receiver_maps,
-    StrictModeFlag strict_mode,
-    KeyedAccessGrowMode growth_mode) {
-  CodeHandleList handler_ics(receiver_maps->length());
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    Handle<Map> receiver_map = receiver_maps->at(i);
-    Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
-        receiver_map, strict_mode, growth_mode);
-    handler_ics.Add(cached_stub);
-  }
-  KeyedLoadStubCompiler compiler(isolate());
-  Handle<Code> code = compiler.CompileLoadPolymorphic(
-      receiver_maps, &handler_ics);
+MaybeObject* KeyedLoadIC::ConstructMegamorphicStub(
+    MapList* receiver_maps,
+    CodeList* targets,
+    StrictModeFlag strict_mode) {
+  Object* object;
+  KeyedLoadStubCompiler compiler;
+  MaybeObject* maybe_code = compiler.CompileLoadMegamorphic(receiver_maps,
+                                                            targets);
+  if (!maybe_code->ToObject(&object)) return maybe_code;
   isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG, *code, 0));
-  return code;
+  PROFILE(isolate(), CodeCreateEvent(
+      Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG,
+      Code::cast(object), 0));
+  return object;
 }
 
 
@@ -1073,8 +1107,9 @@
                                bool force_generic_stub) {
   // Check for values that can be converted into a symbol.
   // TODO(1295): Remove this code.
+  HandleScope scope(isolate());
   if (key->IsHeapNumber() &&
-      isnan(Handle<HeapNumber>::cast(key)->value())) {
+      isnan(HeapNumber::cast(*key)->value())) {
     key = isolate()->factory()->nan_symbol();
   } else if (key->IsUndefined()) {
     key = isolate()->factory()->undefined_symbol();
@@ -1096,11 +1131,16 @@
       if (object->IsString() &&
           name->Equals(isolate()->heap()->length_symbol())) {
         Handle<String> string = Handle<String>::cast(object);
-        Handle<Code> code =
-            isolate()->stub_cache()->ComputeKeyedLoadStringLength(name, string);
-        ASSERT(!code.is_null());
-        set_target(*code);
-        TRACE_IC("KeyedLoadIC", name, state, target());
+        Object* code = NULL;
+        { MaybeObject* maybe_code =
+              isolate()->stub_cache()->ComputeKeyedLoadStringLength(*name,
+                                                                    *string);
+          if (!maybe_code->ToObject(&code)) return maybe_code;
+        }
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedLoadIC", name, state, target());
+#endif  // DEBUG
         return Smi::FromInt(string->length());
       }
 
@@ -1108,25 +1148,34 @@
       if (object->IsJSArray() &&
           name->Equals(isolate()->heap()->length_symbol())) {
         Handle<JSArray> array = Handle<JSArray>::cast(object);
-        Handle<Code> code =
-            isolate()->stub_cache()->ComputeKeyedLoadArrayLength(name, array);
-        ASSERT(!code.is_null());
-        set_target(*code);
-        TRACE_IC("KeyedLoadIC", name, state, target());
-        return array->length();
+        Object* code;
+        { MaybeObject* maybe_code =
+              isolate()->stub_cache()->ComputeKeyedLoadArrayLength(*name,
+                                                                   *array);
+          if (!maybe_code->ToObject(&code)) return maybe_code;
+        }
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedLoadIC", name, state, target());
+#endif  // DEBUG
+        return JSArray::cast(*object)->length();
       }
 
       // Use specialized code for getting prototype of functions.
       if (object->IsJSFunction() &&
           name->Equals(isolate()->heap()->prototype_symbol()) &&
-          Handle<JSFunction>::cast(object)->should_have_prototype()) {
+        JSFunction::cast(*object)->should_have_prototype()) {
         Handle<JSFunction> function = Handle<JSFunction>::cast(object);
-        Handle<Code> code =
-            isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
-                name, function);
-        ASSERT(!code.is_null());
-        set_target(*code);
-        TRACE_IC("KeyedLoadIC", name, state, target());
+        Object* code;
+        { MaybeObject* maybe_code =
+              isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
+                  *name, *function);
+          if (!maybe_code->ToObject(&code)) return maybe_code;
+        }
+        set_target(Code::cast(code));
+#ifdef DEBUG
+        TraceIC("KeyedLoadIC", name, state, target());
+#endif  // DEBUG
         return Accessors::FunctionGetPrototype(*object, 0);
       }
     }
@@ -1135,14 +1184,15 @@
     // the element or char if so.
     uint32_t index = 0;
     if (name->AsArrayIndex(&index)) {
+      HandleScope scope(isolate());
       // Rewrite to the generic keyed load stub.
-      if (FLAG_use_ic) set_target(*generic_stub());
+      if (FLAG_use_ic) set_target(generic_stub());
       return Runtime::GetElementOrCharAt(isolate(), object, index);
     }
 
     // Named lookup.
-    LookupResult lookup(isolate());
-    LookupForRead(object, name, &lookup);
+    LookupResult lookup;
+    LookupForRead(*object, *name, &lookup);
 
     // If we did not find a property, check if we need to throw an exception.
     if (!lookup.IsProperty() && IsContextual(object)) {
@@ -1154,17 +1204,19 @@
     }
 
     PropertyAttributes attr;
-    if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
+    if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
       // Get the property.
-      Handle<Object> result =
-          Object::GetProperty(object, object, &lookup, name, &attr);
-      RETURN_IF_EMPTY_HANDLE(isolate(), result);
+      Object* result;
+      { MaybeObject* maybe_result =
+            object->GetProperty(*object, &lookup, *name, &attr);
+        if (!maybe_result->ToObject(&result)) return maybe_result;
+      }
       // If the property is not present, check if we need to throw an
       // exception.
       if (attr == ABSENT && IsContextual(object)) {
         return ReferenceError("not_defined", name);
       }
-      return *result;
+      return result;
     }
 
     return object->GetProperty(*object, &lookup, *name, &attr);
@@ -1175,40 +1227,44 @@
   bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
 
   if (use_ic) {
-    Handle<Code> stub = generic_stub();
+    Code* stub = generic_stub();
     if (!force_generic_stub) {
       if (object->IsString() && key->IsNumber()) {
         if (state == UNINITIALIZED) {
           stub = string_stub();
         }
       } else if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        if (receiver->elements()->map() ==
-            isolate()->heap()->non_strict_arguments_elements_map()) {
+        JSObject* receiver = JSObject::cast(*object);
+        Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+        Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+        if (elements_map == heap->non_strict_arguments_elements_map()) {
           stub = non_strict_arguments_stub();
         } else if (receiver->HasIndexedInterceptor()) {
           stub = indexed_interceptor_stub();
-        } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
-          stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
+        } else if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+          MaybeObject* maybe_stub = ComputeStub(receiver,
+                                                false,
+                                                kNonStrictMode,
+                                                stub);
+          stub = maybe_stub->IsFailure() ?
+              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
         }
       }
-    } else {
-      TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
     }
-    if (!stub.is_null()) set_target(*stub);
+    if (stub != NULL) set_target(stub);
   }
 
-  TRACE_IC("KeyedLoadIC", key, state, target());
+#ifdef DEBUG
+  TraceIC("KeyedLoadIC", key, state, target());
+#endif  // DEBUG
 
   // Get the property.
   return Runtime::GetObjectProperty(isolate(), object, key);
 }
 
 
-void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
-                               State state,
-                               Handle<Object> object,
-                               Handle<String> name) {
+void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state,
+                               Handle<Object> object, Handle<String> name) {
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
@@ -1218,89 +1274,97 @@
   if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
 
   // Compute the code stub for this load.
-  Handle<Code> code;
+  MaybeObject* maybe_code = NULL;
+  Object* code;
 
   if (state == UNINITIALIZED) {
     // This is the first time we execute this inline cache.
     // Set the target to the pre monomorphic stub to delay
     // setting the monomorphic state.
-    code = pre_monomorphic_stub();
+    maybe_code = pre_monomorphic_stub();
   } else {
     // Compute a monomorphic stub.
-    Handle<JSObject> holder(lookup->holder());
     switch (lookup->type()) {
-      case FIELD:
-        code = isolate()->stub_cache()->ComputeKeyedLoadField(
-            name, receiver, holder, lookup->GetFieldIndex());
+      case FIELD: {
+        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadField(
+            *name, *receiver, lookup->holder(), lookup->GetFieldIndex());
         break;
+      }
       case CONSTANT_FUNCTION: {
-        Handle<JSFunction> constant(lookup->GetConstantFunction());
-        code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
-            name, receiver, holder, constant);
+        Object* constant = lookup->GetConstantFunction();
+        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
+            *name, *receiver, lookup->holder(), constant);
         break;
       }
       case CALLBACKS: {
-        Handle<Object> callback_object(lookup->GetCallbackObject());
-        if (!callback_object->IsAccessorInfo()) return;
-        Handle<AccessorInfo> callback =
-            Handle<AccessorInfo>::cast(callback_object);
+        if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+        AccessorInfo* callback =
+            AccessorInfo::cast(lookup->GetCallbackObject());
         if (v8::ToCData<Address>(callback->getter()) == 0) return;
-        code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
-            name, receiver, holder, callback);
+        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
+            *name, *receiver, lookup->holder(), callback);
         break;
       }
-      case INTERCEPTOR:
+      case INTERCEPTOR: {
         ASSERT(HasInterceptorGetter(lookup->holder()));
-        code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
-            name, receiver, holder);
+        maybe_code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
+            *name, *receiver, lookup->holder());
         break;
-      default:
+      }
+      default: {
         // Always rewrite to the generic case so that we do not
         // repeatedly try to rewrite.
-        code = generic_stub();
+        maybe_code = generic_stub();
         break;
+      }
     }
   }
 
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+
   // Patch the call site depending on the state of the cache.  Make
   // sure to always rewrite from monomorphic to megamorphic.
   ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
   if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
-    set_target(*code);
+    set_target(Code::cast(code));
   } else if (state == MONOMORPHIC) {
-    set_target(*megamorphic_stub());
+    set_target(megamorphic_stub());
   }
 
-  TRACE_IC("KeyedLoadIC", name, state, target());
+#ifdef DEBUG
+  TraceIC("KeyedLoadIC", name, state, target());
+#endif
 }
 
 
 static bool StoreICableLookup(LookupResult* lookup) {
   // Bail out if we didn't find a result.
-  if (!lookup->IsFound() || lookup->type() == NULL_DESCRIPTOR) return false;
+  if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return false;
 
-  // Bail out if inline caching is not allowed.
-  if (!lookup->IsCacheable()) return false;
-
-  // If the property is read-only, we leave the IC in its current state.
+  // If the property is read-only, we leave the IC in its current
+  // state.
   if (lookup->IsReadOnly()) return false;
 
   return true;
 }
 
 
-static bool LookupForWrite(Handle<JSObject> receiver,
-                           Handle<String> name,
+static bool LookupForWrite(JSReceiver* receiver,
+                           String* name,
                            LookupResult* lookup) {
-  receiver->LocalLookup(*name, lookup);
+  receiver->LocalLookup(name, lookup);
   if (!StoreICableLookup(lookup)) {
     return false;
   }
 
-  if (lookup->type() == INTERCEPTOR &&
-      receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
-    receiver->LocalLookupRealNamedProperty(*name, lookup);
-    return StoreICableLookup(lookup);
+  if (lookup->type() == INTERCEPTOR) {
+    JSObject* object = JSObject::cast(receiver);
+    if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
+      object->LocalLookupRealNamedProperty(name, lookup);
+      return StoreICableLookup(lookup);
+    }
   }
 
   return true;
@@ -1312,69 +1376,65 @@
                             Handle<Object> object,
                             Handle<String> name,
                             Handle<Object> value) {
-  if (!object->IsJSObject()) {
-    // Handle proxies.
-    if (object->IsJSProxy()) {
-      return JSProxy::cast(*object)->
-          SetProperty(*name, *value, NONE, strict_mode);
-    }
+  // If the object is undefined or null it's illegal to try to set any
+  // properties on it; throw a TypeError in that case.
+  if (object->IsUndefined() || object->IsNull()) {
+    return TypeError("non_object_property_store", object, name);
+  }
 
-    // If the object is undefined or null it's illegal to try to set any
-    // properties on it; throw a TypeError in that case.
-    if (object->IsUndefined() || object->IsNull()) {
-      return TypeError("non_object_property_store", object, name);
-    }
-
+  if (!object->IsJSReceiver()) {
     // The length property of string values is read-only. Throw in strict mode.
     if (strict_mode == kStrictMode && object->IsString() &&
         name->Equals(isolate()->heap()->length_symbol())) {
       return TypeError("strict_read_only_property", object, name);
     }
-    // Ignore other stores where the receiver is not a JSObject.
-    // TODO(1475): Must check prototype chains of object wrappers.
+    // Ignore stores where the receiver is not a JSObject.
     return *value;
   }
 
+  // Handle proxies.
+  if (object->IsJSProxy()) {
+    return JSReceiver::cast(*object)->
+        SetProperty(*name, *value, NONE, strict_mode);
+  }
+
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
   // Check if the given name is an array index.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    Handle<Object> result =
-        JSObject::SetElement(receiver, index, value, NONE, strict_mode);
-    RETURN_IF_EMPTY_HANDLE(isolate(), result);
+    HandleScope scope(isolate());
+    Handle<Object> result = SetElement(receiver, index, value, strict_mode);
+    if (result.is_null()) return Failure::Exception();
     return *value;
   }
 
-  // Use specialized code for setting the length of arrays with fast
-  // properties.  Slow properties might indicate redefinition of the
-  // length property.
-  if (receiver->IsJSArray() &&
-      name->Equals(isolate()->heap()->length_symbol()) &&
-      Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
-      receiver->HasFastProperties()) {
+  // Use specialized code for setting the length of arrays.
+  if (receiver->IsJSArray()
+      && name->Equals(isolate()->heap()->length_symbol())
+      && JSArray::cast(*receiver)->AllowsSetElementsLength()) {
 #ifdef DEBUG
     if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
 #endif
-    Handle<Code> stub = (strict_mode == kStrictMode)
-        ? isolate()->builtins()->StoreIC_ArrayLength_Strict()
-        : isolate()->builtins()->StoreIC_ArrayLength();
-    set_target(*stub);
+    Builtins::Name target = (strict_mode == kStrictMode)
+        ? Builtins::kStoreIC_ArrayLength_Strict
+        : Builtins::kStoreIC_ArrayLength;
+    set_target(isolate()->builtins()->builtin(target));
     return receiver->SetProperty(*name, *value, NONE, strict_mode);
   }
 
   // Lookup the property locally in the receiver.
   if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
-    LookupResult lookup(isolate());
+    LookupResult lookup;
 
-    if (LookupForWrite(receiver, name, &lookup)) {
+    if (LookupForWrite(*receiver, *name, &lookup)) {
       // Generate a stub for this store.
       UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
     } else {
       // Strict mode doesn't allow setting non-existent global property
       // or an assignment to a read only property.
       if (strict_mode == kStrictMode) {
-        if (lookup.IsProperty() && lookup.IsReadOnly()) {
+        if (lookup.IsFound() && lookup.IsReadOnly()) {
           return TypeError("strict_read_only_property", object, name);
         } else if (IsContextual(object)) {
           return ReferenceError("not_defined", name);
@@ -1384,15 +1444,16 @@
   }
 
   if (receiver->IsJSGlobalProxy()) {
-    // TODO(ulan): find out why we patch this site even with --no-use-ic
     // Generate a generic stub that goes to the runtime when we see a global
     // proxy as receiver.
-    Handle<Code> stub = (strict_mode == kStrictMode)
+    Code* stub = (strict_mode == kStrictMode)
         ? global_proxy_stub_strict()
         : global_proxy_stub();
-    if (target() != *stub) {
-      set_target(*stub);
-      TRACE_IC("StoreIC", name, state, target());
+    if (target() != stub) {
+      set_target(stub);
+#ifdef DEBUG
+      TraceIC("StoreIC", name, state, target());
+#endif
     }
   }
 
@@ -1407,12 +1468,10 @@
                            Handle<JSObject> receiver,
                            Handle<String> name,
                            Handle<Object> value) {
+  // Skip JSGlobalProxy.
   ASSERT(!receiver->IsJSGlobalProxy());
+
   ASSERT(StoreICableLookup(lookup));
-  // These are not cacheable, so we never see such LookupResults here.
-  ASSERT(lookup->type() != HANDLER);
-  // We get only called for properties or transitions, see StoreICableLookup.
-  ASSERT(lookup->type() != NULL_DESCRIPTOR);
 
   // If the property has a non-field type allowing map transitions
   // where there is extra room in the object, we leave the IC in its
@@ -1422,87 +1481,89 @@
   // Compute the code stub for this store; used for rewriting to
   // monomorphic state and making sure that the code stub is in the
   // stub cache.
-  Handle<Code> code;
+  MaybeObject* maybe_code = NULL;
+  Object* code = NULL;
   switch (type) {
-    case FIELD:
-      code = isolate()->stub_cache()->ComputeStoreField(name,
-                                                        receiver,
-                                                        lookup->GetFieldIndex(),
-                                                        Handle<Map>::null(),
-                                                        strict_mode);
-      break;
-    case MAP_TRANSITION: {
-      if (lookup->GetAttributes() != NONE) return;
-      Handle<Map> transition(lookup->GetTransitionMap());
-      int index = transition->PropertyIndexFor(*name);
-      code = isolate()->stub_cache()->ComputeStoreField(
-          name, receiver, index, transition, strict_mode);
+    case FIELD: {
+      maybe_code = isolate()->stub_cache()->ComputeStoreField(
+          *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
       break;
     }
-    case NORMAL:
+    case MAP_TRANSITION: {
+      if (lookup->GetAttributes() != NONE) return;
+      HandleScope scope(isolate());
+      ASSERT(type == MAP_TRANSITION);
+      Handle<Map> transition(lookup->GetTransitionMap());
+      int index = transition->PropertyIndexFor(*name);
+      maybe_code = isolate()->stub_cache()->ComputeStoreField(
+          *name, *receiver, index, *transition, strict_mode);
+      break;
+    }
+    case NORMAL: {
       if (receiver->IsGlobalObject()) {
         // The stub generated for the global object picks the value directly
         // from the property cell. So the property must be directly on the
         // global object.
         Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
-        Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
-        code = isolate()->stub_cache()->ComputeStoreGlobal(
-            name, global, cell, strict_mode);
+        JSGlobalPropertyCell* cell =
+            JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+        maybe_code = isolate()->stub_cache()->ComputeStoreGlobal(
+            *name, *global, cell, strict_mode);
       } else {
         if (lookup->holder() != *receiver) return;
-        code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
+        maybe_code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
       }
       break;
+    }
     case CALLBACKS: {
-      Handle<Object> callback_object(lookup->GetCallbackObject());
-      if (!callback_object->IsAccessorInfo()) return;
-      Handle<AccessorInfo> callback =
-          Handle<AccessorInfo>::cast(callback_object);
+      if (!lookup->GetCallbackObject()->IsAccessorInfo()) return;
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
       if (v8::ToCData<Address>(callback->setter()) == 0) return;
-      code = isolate()->stub_cache()->ComputeStoreCallback(
-          name, receiver, callback, strict_mode);
+      maybe_code = isolate()->stub_cache()->ComputeStoreCallback(
+          *name, *receiver, callback, strict_mode);
       break;
     }
-    case INTERCEPTOR:
+    case INTERCEPTOR: {
       ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
-      code = isolate()->stub_cache()->ComputeStoreInterceptor(
-          name, receiver, strict_mode);
+      maybe_code = isolate()->stub_cache()->ComputeStoreInterceptor(
+          *name, *receiver, strict_mode);
       break;
-    case CONSTANT_FUNCTION:
-    case CONSTANT_TRANSITION:
-    case ELEMENTS_TRANSITION:
-      return;
-    case HANDLER:
-    case NULL_DESCRIPTOR:
-      UNREACHABLE();
+    }
+    default:
       return;
   }
 
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
+
   // Patch the call site depending on the state of the cache.
   if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
-    set_target(*code);
+    set_target(Code::cast(code));
   } else if (state == MONOMORPHIC) {
     // Only move to megamorphic if the target changes.
-    if (target() != *code) {
+    if (target() != Code::cast(code)) {
       set_target((strict_mode == kStrictMode)
                    ? megamorphic_stub_strict()
                    : megamorphic_stub());
     }
   } else if (state == MEGAMORPHIC) {
     // Update the stub cache.
-    isolate()->stub_cache()->Set(*name, receiver->map(), *code);
+    isolate()->stub_cache()->Set(*name,
+                                 receiver->map(),
+                                 Code::cast(code));
   }
 
-  TRACE_IC("StoreIC", name, state, target());
+#ifdef DEBUG
+  TraceIC("StoreIC", name, state, target());
+#endif
 }
 
 
-static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
-                                       Handle<Map> new_receiver_map) {
-  ASSERT(!new_receiver_map.is_null());
+static bool AddOneReceiverMapIfMissing(MapList* receiver_maps,
+                                       Map* new_receiver_map) {
   for (int current = 0; current < receiver_maps->length(); ++current) {
-    if (!receiver_maps->at(current).is_null() &&
-        receiver_maps->at(current).is_identical_to(new_receiver_map)) {
+    if (receiver_maps->at(current) == new_receiver_map) {
       return false;
     }
   }
@@ -1511,261 +1572,159 @@
 }
 
 
-void KeyedIC::GetReceiverMapsForStub(Handle<Code> stub,
-                                     MapHandleList* result) {
+void KeyedIC::GetReceiverMapsForStub(Code* stub, MapList* result) {
   ASSERT(stub->is_inline_cache_stub());
-  if (!string_stub().is_null() && stub.is_identical_to(string_stub())) {
-    return result->Add(isolate()->factory()->string_map());
+  if (stub == string_stub()) {
+    return result->Add(isolate()->heap()->string_map());
   } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
     if (stub->ic_state() == MONOMORPHIC) {
-      result->Add(Handle<Map>(stub->FindFirstMap()));
+      result->Add(Map::cast(stub->FindFirstMap()));
     } else {
       ASSERT(stub->ic_state() == MEGAMORPHIC);
       AssertNoAllocation no_allocation;
       int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-      for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
+      for (RelocIterator it(stub, mask); !it.done(); it.next()) {
         RelocInfo* info = it.rinfo();
-        Handle<Object> object(info->target_object());
+        Object* object = info->target_object();
         ASSERT(object->IsMap());
-        AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
+        result->Add(Map::cast(object));
       }
     }
   }
 }
 
 
-Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
-                                  StubKind stub_kind,
+MaybeObject* KeyedIC::ComputeStub(JSObject* receiver,
+                                  bool is_store,
                                   StrictModeFlag strict_mode,
-                                  Handle<Code> generic_stub) {
+                                  Code* generic_stub) {
   State ic_state = target()->ic_state();
-  KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind)
-      ? ALLOW_JSARRAY_GROWTH
-      : DO_NOT_ALLOW_JSARRAY_GROWTH;
+  if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+    Code* monomorphic_stub;
+    MaybeObject* maybe_stub = ComputeMonomorphicStub(receiver,
+                                                     is_store,
+                                                     strict_mode,
+                                                     generic_stub);
+    if (!maybe_stub->To(&monomorphic_stub)) return maybe_stub;
+
+    return monomorphic_stub;
+  }
+  ASSERT(target() != generic_stub);
 
   // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
   // via megamorphic stubs, since they don't have a map in their relocation info
   // and so the stubs can't be harvested for the object needed for a map check.
   if (target()->type() != NORMAL) {
-    TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
     return generic_stub;
   }
 
-  bool monomorphic = false;
-  MapHandleList target_receiver_maps;
-  if (ic_state != UNINITIALIZED && ic_state != PREMONOMORPHIC) {
-    GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
-  }
-  if (!IsTransitionStubKind(stub_kind)) {
-    if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
-      monomorphic = true;
-    } else {
-      if (ic_state == MONOMORPHIC) {
-        // The first time a receiver is seen that is a transitioned version of
-        // the previous monomorphic receiver type, assume the new ElementsKind
-        // is the monomorphic type. This benefits global arrays that only
-        // transition once, and all call sites accessing them are faster if they
-        // remain monomorphic. If this optimistic assumption is not true, the IC
-        // will miss again and it will become polymorphic and support both the
-        // untransitioned and transitioned maps.
-        monomorphic = IsMoreGeneralElementsKindTransition(
-            target_receiver_maps.at(0)->elements_kind(),
-            receiver->GetElementsKind());
-      }
-    }
-  }
-
-  if (monomorphic) {
-    return ComputeMonomorphicStub(
-        receiver, stub_kind, strict_mode, generic_stub);
-  }
-  ASSERT(target() != *generic_stub);
-
   // Determine the list of receiver maps that this call site has seen,
   // adding the map that was just encountered.
-  Handle<Map> receiver_map(receiver->map());
-  bool map_added =
-      AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
-  if (IsTransitionStubKind(stub_kind)) {
-    Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind);
-    map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
-  }
-  if (!map_added) {
-    // If the miss wasn't due to an unseen map, a polymorphic stub
+  MapList target_receiver_maps;
+  GetReceiverMapsForStub(target(), &target_receiver_maps);
+  if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver->map())) {
+    // If the miss wasn't due to an unseen map, a MEGAMORPHIC stub
     // won't help, use the generic stub.
-    TRACE_GENERIC_IC("KeyedIC", "same map added twice");
     return generic_stub;
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
     return generic_stub;
   }
 
-  if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) ==
-       ALLOW_JSARRAY_GROWTH)) {
-    grow_mode = ALLOW_JSARRAY_GROWTH;
+  PolymorphicCodeCache* cache = isolate()->heap()->polymorphic_code_cache();
+  Code::Flags flags = Code::ComputeFlags(this->kind(),
+                                         MEGAMORPHIC,
+                                         strict_mode);
+  Object* maybe_cached_stub = cache->Lookup(&target_receiver_maps, flags);
+  // If there is a cached stub, use it.
+  if (!maybe_cached_stub->IsUndefined()) {
+    ASSERT(maybe_cached_stub->IsCode());
+    return Code::cast(maybe_cached_stub);
   }
-
-  Handle<PolymorphicCodeCache> cache =
-      isolate()->factory()->polymorphic_code_cache();
-  Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
-                                                             strict_mode);
-  Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state);
-  Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  Handle<Code> stub =
-      ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode);
-  PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
+  // Collect MONOMORPHIC stubs for all target_receiver_maps.
+  CodeList handler_ics(target_receiver_maps.length());
+  for (int i = 0; i < target_receiver_maps.length(); ++i) {
+    Map* receiver_map(target_receiver_maps.at(i));
+    MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck(
+        receiver_map, strict_mode);
+    Code* cached_stub;
+    if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub;
+    handler_ics.Add(cached_stub);
+  }
+  // Build the MEGAMORPHIC stub.
+  Code* stub;
+  MaybeObject* maybe_stub = ConstructMegamorphicStub(&target_receiver_maps,
+                                                     &handler_ics,
+                                                     strict_mode);
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  MaybeObject* maybe_update = cache->Update(&target_receiver_maps, flags, stub);
+  if (maybe_update->IsFailure()) return maybe_update;
   return stub;
 }
 
 
-Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
-    Handle<Map> receiver_map,
-    StrictModeFlag strict_mode,
-    KeyedAccessGrowMode grow_mode) {
+MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
+    Map* receiver_map,
+    StrictModeFlag strict_mode) {
   if ((receiver_map->instance_type() & kNotStringTag) == 0) {
-    ASSERT(!string_stub().is_null());
+    ASSERT(string_stub() != NULL);
     return string_stub();
   } else {
     ASSERT(receiver_map->has_dictionary_elements() ||
            receiver_map->has_fast_elements() ||
-           receiver_map->has_fast_smi_only_elements() ||
            receiver_map->has_fast_double_elements() ||
            receiver_map->has_external_array_elements());
     bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
     return GetElementStubWithoutMapCheck(is_js_array,
-                                         receiver_map->elements_kind(),
-                                         grow_mode);
+                                         receiver_map->elements_kind());
   }
 }
 
 
-Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<JSObject> receiver,
-                                             StubKind stub_kind,
+MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver,
+                                             bool is_store,
                                              StrictModeFlag strict_mode,
-                                             Handle<Code> generic_stub) {
+                                             Code* generic_stub) {
+  Code* result = NULL;
   if (receiver->HasFastElements() ||
-      receiver->HasFastSmiOnlyElements() ||
       receiver->HasExternalArrayElements() ||
       receiver->HasFastDoubleElements() ||
       receiver->HasDictionaryElements()) {
-    return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
-        receiver, stub_kind, strict_mode);
+    MaybeObject* maybe_stub =
+        isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
+            receiver, is_store, strict_mode);
+    if (!maybe_stub->To(&result)) return maybe_stub;
   } else {
-    return generic_stub;
+    result = generic_stub;
   }
+  return result;
 }
 
 
-Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
-                                            StubKind stub_kind) {
-  switch (stub_kind) {
-    case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
-    case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
-    case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
-    case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
-      return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
-      break;
-    case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
-    case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
-      return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
-      break;
-    default:
-      UNREACHABLE();
-      return Handle<Map>::null();
-  }
-}
-
-
-Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
+MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck(
     bool is_js_array,
-    ElementsKind elements_kind,
-    KeyedAccessGrowMode grow_mode) {
-  return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode();
+    ElementsKind elements_kind) {
+  return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
 }
 
 
-Handle<Code> KeyedStoreIC::ComputePolymorphicStub(
-    MapHandleList* receiver_maps,
-    StrictModeFlag strict_mode,
-    KeyedAccessGrowMode grow_mode) {
-  // Collect MONOMORPHIC stubs for all target_receiver_maps.
-  CodeHandleList handler_ics(receiver_maps->length());
-  MapHandleList transitioned_maps(receiver_maps->length());
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    Handle<Map> receiver_map(receiver_maps->at(i));
-    Handle<Code> cached_stub;
-    Handle<Map> transitioned_map =
-        receiver_map->FindTransitionedMap(receiver_maps);
-    if (!transitioned_map.is_null()) {
-      cached_stub = ElementsTransitionAndStoreStub(
-          receiver_map->elements_kind(),  // original elements_kind
-          transitioned_map->elements_kind(),
-          receiver_map->instance_type() == JS_ARRAY_TYPE,  // is_js_array
-          strict_mode, grow_mode).GetCode();
-    } else {
-      cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
-                                                          strict_mode,
-                                                          grow_mode);
-    }
-    ASSERT(!cached_stub.is_null());
-    handler_ics.Add(cached_stub);
-    transitioned_maps.Add(transitioned_map);
-  }
-  KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
-  Handle<Code> code = compiler.CompileStorePolymorphic(
-      receiver_maps, &handler_ics, &transitioned_maps);
+MaybeObject* KeyedStoreIC::ConstructMegamorphicStub(
+    MapList* receiver_maps,
+    CodeList* targets,
+    StrictModeFlag strict_mode) {
+  Object* object;
+  KeyedStoreStubCompiler compiler(strict_mode);
+  MaybeObject* maybe_code = compiler.CompileStoreMegamorphic(receiver_maps,
+                                                             targets);
+  if (!maybe_code->ToObject(&object)) return maybe_code;
   isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG, *code, 0));
-  return code;
-}
-
-
-KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
-                                            Handle<Object> key,
-                                            Handle<Object> value) {
-  ASSERT(key->IsSmi());
-  int index = Smi::cast(*key)->value();
-  bool allow_growth = receiver->IsJSArray() &&
-      JSArray::cast(*receiver)->length()->IsSmi() &&
-      index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
-
-  if (allow_growth) {
-    // Handle growing array in stub if necessary.
-    if (receiver->HasFastSmiOnlyElements()) {
-      if (value->IsHeapNumber()) {
-        return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
-      }
-      if (value->IsHeapObject()) {
-        return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
-      }
-    } else if (receiver->HasFastDoubleElements()) {
-      if (!value->IsSmi() && !value->IsHeapNumber()) {
-        return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
-      }
-    }
-    return STORE_AND_GROW_NO_TRANSITION;
-  } else {
-    // Handle only in-bounds elements accesses.
-    if (receiver->HasFastSmiOnlyElements()) {
-      if (value->IsHeapNumber()) {
-        return STORE_TRANSITION_SMI_TO_DOUBLE;
-      } else if (value->IsHeapObject()) {
-        return STORE_TRANSITION_SMI_TO_OBJECT;
-      }
-    } else if (receiver->HasFastDoubleElements()) {
-      if (!value->IsSmi() && !value->IsHeapNumber()) {
-        return STORE_TRANSITION_DOUBLE_TO_OBJECT;
-      }
-    }
-    return STORE_NO_TRANSITION;
-  }
+  PROFILE(isolate(), CodeCreateEvent(
+      Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG,
+      Code::cast(object), 0));
+  return object;
 }
 
 
@@ -1778,12 +1737,6 @@
   if (key->IsSymbol()) {
     Handle<String> name = Handle<String>::cast(key);
 
-    // Handle proxies.
-    if (object->IsJSProxy()) {
-      return JSProxy::cast(*object)->SetProperty(
-          *name, *value, NONE, strict_mode);
-    }
-
     // If the object is undefined or null it's illegal to try to set any
     // properties on it; throw a TypeError in that case.
     if (object->IsUndefined() || object->IsNull()) {
@@ -1797,18 +1750,19 @@
     // Check if the given name is an array index.
     uint32_t index;
     if (name->AsArrayIndex(&index)) {
-      Handle<Object> result =
-          JSObject::SetElement(receiver, index, value, NONE, strict_mode);
-      RETURN_IF_EMPTY_HANDLE(isolate(), result);
+      HandleScope scope(isolate());
+      Handle<Object> result = SetElement(receiver, index, value, strict_mode);
+      if (result.is_null()) return Failure::Exception();
       return *value;
     }
 
+    // Lookup the property locally in the receiver.
+    LookupResult lookup;
+    receiver->LocalLookup(*name, &lookup);
+
     // Update inline cache and stub cache.
-    if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
-      LookupResult lookup(isolate());
-      if (LookupForWrite(receiver, name, &lookup)) {
-        UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
-      }
+    if (FLAG_use_ic) {
+      UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
     }
 
     // Set the property.
@@ -1821,27 +1775,33 @@
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
   if (use_ic) {
-    Handle<Code> stub = (strict_mode == kStrictMode)
+    Code* stub = (strict_mode == kStrictMode)
         ? generic_stub_strict()
         : generic_stub();
     if (object->IsJSObject()) {
-      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-      if (receiver->elements()->map() ==
-          isolate()->heap()->non_strict_arguments_elements_map()) {
+      JSObject* receiver = JSObject::cast(*object);
+      Heap* heap = Handle<JSObject>::cast(object)->GetHeap();
+      Map* elements_map = Handle<JSObject>::cast(object)->elements()->map();
+      if (elements_map == heap->non_strict_arguments_elements_map()) {
         stub = non_strict_arguments_stub();
       } else if (!force_generic) {
-        if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
-          StubKind stub_kind = GetStubKind(receiver, key, value);
-          stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
+        if (key->IsSmi() && (target() != non_strict_arguments_stub())) {
+          HandleScope scope(isolate());
+          MaybeObject* maybe_stub = ComputeStub(receiver,
+                                                true,
+                                                strict_mode,
+                                                stub);
+          stub = maybe_stub->IsFailure() ?
+              NULL : Code::cast(maybe_stub->ToObjectUnchecked());
         }
-      } else {
-        TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
       }
     }
-    if (!stub.is_null()) set_target(*stub);
+    if (stub != NULL) set_target(stub);
   }
 
-  TRACE_IC("KeyedStoreIC", key, state, target());
+#ifdef DEBUG
+  TraceIC("KeyedStoreIC", key, state, target());
+#endif
 
   // Set the property.
   return Runtime::SetObjectProperty(
@@ -1855,12 +1815,15 @@
                                 Handle<JSObject> receiver,
                                 Handle<String> name,
                                 Handle<Object> value) {
-  ASSERT(!receiver->IsJSGlobalProxy());
-  ASSERT(StoreICableLookup(lookup));
-  // These are not cacheable, so we never see such LookupResults here.
-  ASSERT(lookup->type() != HANDLER);
-  // We get only called for properties or transitions, see StoreICableLookup.
-  ASSERT(lookup->type() != NULL_DESCRIPTOR);
+  // Skip JSGlobalProxy.
+  if (receiver->IsJSGlobalProxy()) return;
+
+  // Bail out if we didn't find a result.
+  if (!lookup->IsPropertyOrTransition() || !lookup->IsCacheable()) return;
+
+  // If the property is read-only, we leave the IC in its current
+  // state.
+  if (lookup->IsReadOnly()) return;
 
   // If the property has a non-field type allowing map transitions
   // where there is extra room in the object, we leave the IC in its
@@ -1870,68 +1833,75 @@
   // Compute the code stub for this store; used for rewriting to
   // monomorphic state and making sure that the code stub is in the
   // stub cache.
-  Handle<Code> code;
+  MaybeObject* maybe_code = NULL;
+  Object* code = NULL;
 
   switch (type) {
-    case FIELD:
-      code = isolate()->stub_cache()->ComputeKeyedStoreField(
-          name, receiver, lookup->GetFieldIndex(),
-          Handle<Map>::null(), strict_mode);
+    case FIELD: {
+      maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
+          *name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
       break;
-    case MAP_TRANSITION:
+    }
+    case MAP_TRANSITION: {
       if (lookup->GetAttributes() == NONE) {
+        HandleScope scope(isolate());
+        ASSERT(type == MAP_TRANSITION);
         Handle<Map> transition(lookup->GetTransitionMap());
         int index = transition->PropertyIndexFor(*name);
-        code = isolate()->stub_cache()->ComputeKeyedStoreField(
-            name, receiver, index, transition, strict_mode);
+        maybe_code = isolate()->stub_cache()->ComputeKeyedStoreField(
+            *name, *receiver, index, *transition, strict_mode);
         break;
       }
       // fall through.
-    case NORMAL:
-    case CONSTANT_FUNCTION:
-    case CALLBACKS:
-    case INTERCEPTOR:
-    case CONSTANT_TRANSITION:
-    case ELEMENTS_TRANSITION:
+    }
+    default: {
       // Always rewrite to the generic case so that we do not
       // repeatedly try to rewrite.
-      code = (strict_mode == kStrictMode)
+      maybe_code = (strict_mode == kStrictMode)
           ? generic_stub_strict()
           : generic_stub();
       break;
-    case HANDLER:
-    case NULL_DESCRIPTOR:
-      UNREACHABLE();
-      return;
+    }
   }
 
-  ASSERT(!code.is_null());
+  // If we're unable to compute the stub (not enough memory left), we
+  // simply avoid updating the caches.
+  if (maybe_code == NULL || !maybe_code->ToObject(&code)) return;
 
   // Patch the call site depending on the state of the cache.  Make
   // sure to always rewrite from monomorphic to megamorphic.
   ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
   if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
-    set_target(*code);
+    set_target(Code::cast(code));
   } else if (state == MONOMORPHIC) {
     set_target((strict_mode == kStrictMode)
-                 ? *megamorphic_stub_strict()
-                 : *megamorphic_stub());
+                 ? megamorphic_stub_strict()
+                 : megamorphic_stub());
   }
 
-  TRACE_IC("KeyedStoreIC", name, state, target());
+#ifdef DEBUG
+  TraceIC("KeyedStoreIC", name, state, target());
+#endif
 }
 
 
-#undef TRACE_IC
-
-
 // ----------------------------------------------------------------------------
 // Static IC stub generators.
 //
 
+static JSFunction* CompileFunction(Isolate* isolate,
+                                   JSFunction* function) {
+  // Compile now with optimization.
+  HandleScope scope(isolate);
+  Handle<JSFunction> function_handle(function, isolate);
+  CompileLazy(function_handle, CLEAR_EXCEPTION);
+  return *function_handle;
+}
+
+
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
-  HandleScope scope(isolate);
+  NoHandleAllocation na;
   ASSERT(args.length() == 2);
   CallIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1940,46 +1910,45 @@
                                               extra_ic_state,
                                               args.at<Object>(0),
                                               args.at<String>(1));
-  // Result could be a function or a failure.
-  JSFunction* raw_function = NULL;
-  if (!maybe_result->To(&raw_function)) return maybe_result;
+  Object* result;
+  if (!maybe_result->ToObject(&result)) return maybe_result;
 
   // The first time the inline cache is updated may be the first time the
-  // function it references gets called.  If the function is lazily compiled
+  // function it references gets called.  If the function was lazily compiled
   // then the first call will trigger a compilation.  We check for this case
   // and we do the compilation immediately, instead of waiting for the stub
-  // currently attached to the JSFunction object to trigger compilation.
-  if (raw_function->is_compiled()) return raw_function;
-
-  Handle<JSFunction> function(raw_function);
-  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
-  return *function;
+  // currently attached to the JSFunction object to trigger compilation.  We
+  // do this in the case where we know that the inline cache is inside a loop,
+  // because then we know that we want to optimize the function.
+  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
+    return result;
+  }
+  return CompileFunction(isolate, JSFunction::cast(result));
 }
 
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
-  HandleScope scope(isolate);
+  NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedCallIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
-  MaybeObject* maybe_result =
+  Object* result;
+  { MaybeObject* maybe_result =
       ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1));
-  // Result could be a function or a failure.
-  JSFunction* raw_function = NULL;
-  if (!maybe_result->To(&raw_function)) return maybe_result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
 
-  if (raw_function->is_compiled()) return raw_function;
-
-  Handle<JSFunction> function(raw_function);
-  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
-  return *function;
+  if (!result->IsJSFunction() || JSFunction::cast(result)->is_compiled()) {
+    return result;
+  }
+  return CompileFunction(isolate, JSFunction::cast(result));
 }
 
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
-  HandleScope scope(isolate);
+  NoHandleAllocation na;
   ASSERT(args.length() == 2);
   LoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1989,7 +1958,7 @@
 
 // Used from ic-<arch>.cc
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
-  HandleScope scope(isolate);
+  NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -1998,7 +1967,7 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
-  HandleScope scope(isolate);
+  NoHandleAllocation na;
   ASSERT(args.length() == 2);
   KeyedLoadIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2008,13 +1977,13 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
-  HandleScope scope;
+  NoHandleAllocation na;
   ASSERT(args.length() == 3);
   StoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
   return ic.Store(state,
-                  Code::GetStrictMode(extra_ic_state),
+                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
                   args.at<Object>(0),
                   args.at<String>(1),
                   args.at<Object>(2));
@@ -2025,19 +1994,12 @@
   NoHandleAllocation nha;
 
   ASSERT(args.length() == 2);
-  JSArray* receiver = JSArray::cast(args[0]);
+  JSObject* receiver = JSObject::cast(args[0]);
   Object* len = args[1];
 
   // The generated code should filter out non-Smis before we get here.
   ASSERT(len->IsSmi());
 
-#ifdef DEBUG
-  // The length property has to be a writable callback property.
-  LookupResult debug_lookup(isolate);
-  receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup);
-  ASSERT(debug_lookup.type() == CALLBACKS && !debug_lookup.IsReadOnly());
-#endif
-
   Object* result;
   { MaybeObject* maybe_result = receiver->SetElementsLength(len);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2084,13 +2046,13 @@
 
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
-  HandleScope scope(isolate);
+  NoHandleAllocation na;
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
   return ic.Store(state,
-                  Code::GetStrictMode(extra_ic_state),
+                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
                   args.at<Object>(0),
                   args.at<Object>(1),
                   args.at<Object>(2),
@@ -2106,7 +2068,8 @@
   Handle<Object> object = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
   Handle<Object> value = args.at<Object>(2);
-  StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state);
+  StrictModeFlag strict_mode =
+      static_cast<StrictModeFlag>(extra_ic_state & kStrictMode);
   return Runtime::SetObjectProperty(isolate,
                                     object,
                                     key,
@@ -2117,13 +2080,13 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
-  HandleScope scope(isolate);
+  NoHandleAllocation na;
   ASSERT(args.length() == 3);
   KeyedStoreIC ic(isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
   return ic.Store(state,
-                  Code::GetStrictMode(extra_ic_state),
+                  static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
                   args.at<Object>(0),
                   args.at<Object>(1),
                   args.at<Object>(2),
@@ -2439,7 +2402,7 @@
   Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate);
 
   bool caught_exception;
-  Handle<Object> builtin_args[] = { right };
+  Object** builtin_args[] = { right.location() };
   Handle<Object> result = Execution::Call(builtin_function,
                                           left,
                                           ARRAY_SIZE(builtin_args),
@@ -2472,7 +2435,6 @@
     case SMIS: return "SMIS";
     case HEAP_NUMBERS: return "HEAP_NUMBERS";
     case OBJECTS: return "OBJECTS";
-    case KNOWN_OBJECTS: return "OBJECTS";
     case SYMBOLS: return "SYMBOLS";
     case STRINGS: return "STRINGS";
     case GENERIC: return "GENERIC";
@@ -2487,50 +2449,19 @@
                                         bool has_inlined_smi_code,
                                         Handle<Object> x,
                                         Handle<Object> y) {
-  switch (state) {
-    case UNINITIALIZED:
-      if (x->IsSmi() && y->IsSmi()) return SMIS;
-      if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
-      if (Token::IsOrderedRelationalCompareOp(op_)) {
-        // Ordered comparisons treat undefined as NaN, so the
-        // HEAP_NUMBER stub will do the right thing.
-        if ((x->IsNumber() && y->IsUndefined()) ||
-            (y->IsNumber() && x->IsUndefined())) {
-          return HEAP_NUMBERS;
-        }
-      }
-      if (x->IsSymbol() && y->IsSymbol()) {
-        // We compare symbols as strings if we need to determine
-        // the order in a non-equality compare.
-        return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
-      }
-      if (x->IsString() && y->IsString()) return STRINGS;
-      if (!Token::IsEqualityOp(op_)) return GENERIC;
-      if (x->IsJSObject() && y->IsJSObject()) {
-        if (Handle<JSObject>::cast(x)->map() ==
-            Handle<JSObject>::cast(y)->map() &&
-            Token::IsEqualityOp(op_)) {
-          return KNOWN_OBJECTS;
-        } else {
-          return OBJECTS;
-        }
-      }
-      return GENERIC;
-    case SMIS:
-      return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
-          ? HEAP_NUMBERS
-          : GENERIC;
-    case SYMBOLS:
-      ASSERT(Token::IsEqualityOp(op_));
-      return x->IsString() && y->IsString() ? STRINGS : GENERIC;
-    case HEAP_NUMBERS:
-    case STRINGS:
-    case OBJECTS:
-    case KNOWN_OBJECTS:
-    case GENERIC:
-      return GENERIC;
+  if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
+    return GENERIC;
   }
-  UNREACHABLE();
+  if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
+  if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
+      x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
+  if (state == UNINITIALIZED &&
+      x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
+  if ((state == UNINITIALIZED || state == SYMBOLS) &&
+      x->IsString() && y->IsString()) return STRINGS;
+  if (state == UNINITIALIZED &&
+      x->IsJSObject() && y->IsJSObject()) return OBJECTS;
   return GENERIC;
 }
 
diff --git a/src/ic.h b/src/ic.h
index 5662552..ece5be9 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -91,13 +91,10 @@
   // Construct the IC structure with the given number of extra
   // JavaScript frames on the stack.
   IC(FrameDepth depth, Isolate* isolate);
-  virtual ~IC() {}
 
   // Get the call-site target; used for determining the state.
-  Code* target() const { return GetTargetAtAddress(address()); }
-  inline Address address() const;
-
-  virtual bool IsGeneric() const { return false; }
+  Code* target() { return GetTargetAtAddress(address()); }
+  inline Address address();
 
   // Compute the current IC state based on the target stub, receiver and name.
   static State StateFrom(Code* target, Object* receiver, Object* name);
@@ -142,15 +139,13 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Computes the address in the original code when the code running is
   // containing break points (calls to DebugBreakXXX builtins).
-  Address OriginalCodeAddress() const;
+  Address OriginalCodeAddress();
 #endif
 
   // Set the call-site target.
   void set_target(Code* code) { SetTargetAtAddress(address(), code); }
 
 #ifdef DEBUG
-  char TransitionMarkFromState(IC::State state);
-
   void TraceIC(const char* type,
                Handle<Object> name,
                State old_state,
@@ -165,7 +160,6 @@
   // Access the target code for the given IC address.
   static inline Code* GetTargetAtAddress(Address address);
   static inline void SetTargetAtAddress(Address address, Code* target);
-  static void PostPatching(Address address, Code* target, Code* old_target);
 
  private:
   // Frame pointer for the frame that uses (calls) the IC.
@@ -204,60 +198,47 @@
   class Contextual: public BitField<bool, 0, 1> {};
   class StringStubState: public BitField<StringStubFeedback, 1, 1> {};
 
-  // Returns a JSFunction or a Failure.
+ protected:
+  CallICBase(Code::Kind kind, Isolate* isolate)
+      : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
+
+ public:
   MUST_USE_RESULT MaybeObject* LoadFunction(State state,
                                             Code::ExtraICState extra_ic_state,
                                             Handle<Object> object,
                                             Handle<String> name);
 
  protected:
-  CallICBase(Code::Kind kind, Isolate* isolate)
-      : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {}
+  Code::Kind kind_;
 
   bool TryUpdateExtraICState(LookupResult* lookup,
                              Handle<Object> object,
                              Code::ExtraICState* extra_ic_state);
 
-  // Compute a monomorphic stub if possible, otherwise return a null handle.
-  Handle<Code> ComputeMonomorphicStub(LookupResult* lookup,
-                                      State state,
-                                      Code::ExtraICState extra_state,
-                                      Handle<Object> object,
-                                      Handle<String> name);
+  MUST_USE_RESULT MaybeObject* ComputeMonomorphicStub(
+      LookupResult* lookup,
+      State state,
+      Code::ExtraICState extra_ic_state,
+      Handle<Object> object,
+      Handle<String> name);
 
-  // Update the inline cache and the global stub cache based on the lookup
-  // result.
+  // Update the inline cache and the global stub cache based on the
+  // lookup result.
   void UpdateCaches(LookupResult* lookup,
                     State state,
                     Code::ExtraICState extra_ic_state,
                     Handle<Object> object,
                     Handle<String> name);
 
-  // Returns a JSFunction if the object can be called as a function, and
-  // patches the stack to be ready for the call.  Otherwise, it returns the
-  // undefined value.
-  Handle<Object> TryCallAsFunction(Handle<Object> object);
+  // Returns a JSFunction if the object can be called as a function,
+  // and patches the stack to be ready for the call.
+  // Otherwise, it returns the undefined value.
+  Object* TryCallAsFunction(Object* object);
 
   void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
 
   static void Clear(Address address, Code* target);
 
-  // Platform-specific code generation functions used by both call and
-  // keyed call.
-  static void GenerateMiss(MacroAssembler* masm,
-                           int argc,
-                           IC::UtilityId id,
-                           Code::ExtraICState extra_state);
-
-  static void GenerateNormal(MacroAssembler* masm, int argc);
-
-  static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                            int argc,
-                                            Code::Kind kind,
-                                            Code::ExtraICState extra_state);
-
-  Code::Kind kind_;
-
   friend class IC;
 };
 
@@ -271,24 +252,16 @@
   // Code generator routines.
   static void GenerateInitialize(MacroAssembler* masm,
                                  int argc,
-                                 Code::ExtraICState extra_state) {
-    GenerateMiss(masm, argc, extra_state);
+                                 Code::ExtraICState extra_ic_state) {
+    GenerateMiss(masm, argc, extra_ic_state);
   }
-
   static void GenerateMiss(MacroAssembler* masm,
                            int argc,
-                           Code::ExtraICState extra_state) {
-    CallICBase::GenerateMiss(masm, argc, IC::kCallIC_Miss, extra_state);
-  }
-
+                           Code::ExtraICState extra_ic_state);
   static void GenerateMegamorphic(MacroAssembler* masm,
                                   int argc,
                                   Code::ExtraICState extra_ic_state);
-
-  static void GenerateNormal(MacroAssembler* masm, int argc) {
-    CallICBase::GenerateNormal(masm, argc);
-    GenerateMiss(masm, argc, Code::kNoExtraICState);
-  }
+  static void GenerateNormal(MacroAssembler* masm, int argc);
 };
 
 
@@ -307,12 +280,7 @@
   static void GenerateInitialize(MacroAssembler* masm, int argc) {
     GenerateMiss(masm, argc);
   }
-
-  static void GenerateMiss(MacroAssembler* masm, int argc) {
-    CallICBase::GenerateMiss(masm, argc, IC::kKeyedCallIC_Miss,
-                             Code::kNoExtraICState);
-  }
-
+  static void GenerateMiss(MacroAssembler* masm, int argc);
   static void GenerateMegamorphic(MacroAssembler* masm, int argc);
   static void GenerateNormal(MacroAssembler* masm, int argc);
   static void GenerateNonStrictArguments(MacroAssembler* masm, int argc);
@@ -353,15 +321,17 @@
                     Handle<String> name);
 
   // Stub accessors.
-  Handle<Code> megamorphic_stub() {
-    return isolate()->builtins()->LoadIC_Megamorphic();
+  Code* megamorphic_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kLoadIC_Megamorphic);
   }
   static Code* initialize_stub() {
     return Isolate::Current()->builtins()->builtin(
         Builtins::kLoadIC_Initialize);
   }
-  Handle<Code> pre_monomorphic_stub() {
-    return isolate()->builtins()->LoadIC_PreMonomorphic();
+  Code* pre_monomorphic_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kLoadIC_PreMonomorphic);
   }
 
   static void Clear(Address address, Code* target);
@@ -372,95 +342,41 @@
 
 class KeyedIC: public IC {
  public:
-  enum StubKind {
-    LOAD,
-    STORE_NO_TRANSITION,
-    STORE_TRANSITION_SMI_TO_OBJECT,
-    STORE_TRANSITION_SMI_TO_DOUBLE,
-    STORE_TRANSITION_DOUBLE_TO_OBJECT,
-    STORE_AND_GROW_NO_TRANSITION,
-    STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
-    STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
-    STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT
-  };
-
-  static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
-      STORE_NO_TRANSITION;
-  STATIC_ASSERT(kGrowICDelta ==
-                STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
-                STORE_TRANSITION_SMI_TO_OBJECT);
-  STATIC_ASSERT(kGrowICDelta ==
-                STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
-                STORE_TRANSITION_SMI_TO_DOUBLE);
-  STATIC_ASSERT(kGrowICDelta ==
-                STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
-                STORE_TRANSITION_DOUBLE_TO_OBJECT);
-
   explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
   virtual ~KeyedIC() {}
 
-  static inline KeyedAccessGrowMode GetGrowModeFromStubKind(
-      StubKind stub_kind) {
-    return (stub_kind >= STORE_AND_GROW_NO_TRANSITION)
-        ? ALLOW_JSARRAY_GROWTH
-        : DO_NOT_ALLOW_JSARRAY_GROWTH;
-  }
-
-  static inline StubKind GetGrowStubKind(StubKind stub_kind) {
-    ASSERT(stub_kind != LOAD);
-    if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
-      stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
-                                        kGrowICDelta);
-    }
-    return stub_kind;
-  }
-
-  virtual Handle<Code> GetElementStubWithoutMapCheck(
+  virtual MaybeObject* GetElementStubWithoutMapCheck(
       bool is_js_array,
-      ElementsKind elements_kind,
-      KeyedAccessGrowMode grow_mode) = 0;
+      ElementsKind elements_kind) = 0;
 
  protected:
-  virtual Handle<Code> string_stub() {
-    return Handle<Code>::null();
+  virtual Code* string_stub() {
+    return NULL;
   }
 
   virtual Code::Kind kind() const = 0;
 
-  Handle<Code> ComputeStub(Handle<JSObject> receiver,
-                           StubKind stub_kind,
+  MaybeObject* ComputeStub(JSObject* receiver,
+                           bool is_store,
                            StrictModeFlag strict_mode,
-                           Handle<Code> default_stub);
+                           Code* default_stub);
 
-  virtual Handle<Code> ComputePolymorphicStub(
-      MapHandleList* receiver_maps,
-      StrictModeFlag strict_mode,
-      KeyedAccessGrowMode grow_mode) = 0;
-
-  Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
-      Handle<Map> receiver_map,
-      StrictModeFlag strict_mode,
-      KeyedAccessGrowMode grow_mode);
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode) = 0;
 
  private:
-  void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
+  void GetReceiverMapsForStub(Code* stub, MapList* result);
 
-  Handle<Code> ComputeMonomorphicStub(Handle<JSObject> receiver,
-                                      StubKind stub_kind,
+  MaybeObject* ComputeMonomorphicStubWithoutMapCheck(
+      Map* receiver_map,
+      StrictModeFlag strict_mode);
+
+  MaybeObject* ComputeMonomorphicStub(JSObject* receiver,
+                                      bool is_store,
                                       StrictModeFlag strict_mode,
-                                      Handle<Code> default_stub);
-
-  Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
-                                     StubKind stub_kind);
-
-  static bool IsTransitionStubKind(StubKind stub_kind) {
-    return stub_kind > STORE_NO_TRANSITION &&
-        stub_kind != STORE_AND_GROW_NO_TRANSITION;
-  }
-
-  static bool IsGrowStubKind(StubKind stub_kind) {
-    return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
-  }
+                                      Code* default_stub);
 };
 
 
@@ -496,24 +412,21 @@
   static const int kSlowCaseBitFieldMask =
       (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
 
-  virtual Handle<Code> GetElementStubWithoutMapCheck(
+  virtual MaybeObject* GetElementStubWithoutMapCheck(
       bool is_js_array,
-      ElementsKind elements_kind,
-      KeyedAccessGrowMode grow_mode);
-
-  virtual bool IsGeneric() const {
-    return target() == *generic_stub();
-  }
+      ElementsKind elements_kind);
 
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
 
-  virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
-                                              StrictModeFlag strict_mode,
-                                              KeyedAccessGrowMode grow_mode);
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode);
 
-  virtual Handle<Code> string_stub() {
-    return isolate()->builtins()->KeyedLoadIC_String();
+  virtual Code* string_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_String);
   }
 
  private:
@@ -528,20 +441,25 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedLoadIC_Initialize);
   }
-  Handle<Code> megamorphic_stub() {
-    return isolate()->builtins()->KeyedLoadIC_Generic();
+  Code* megamorphic_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_Generic);
   }
-  Handle<Code> generic_stub() const {
-    return isolate()->builtins()->KeyedLoadIC_Generic();
+  Code* generic_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_Generic);
   }
-  Handle<Code> pre_monomorphic_stub() {
-    return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
+  Code* pre_monomorphic_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_PreMonomorphic);
   }
-  Handle<Code> indexed_interceptor_stub() {
-    return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
+  Code* indexed_interceptor_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_IndexedInterceptor);
   }
-  Handle<Code> non_strict_arguments_stub() {
-    return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
+  Code* non_strict_arguments_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedLoadIC_NonStrictArguments);
   }
 
   static void Clear(Address address, Code* target);
@@ -584,8 +502,8 @@
 
   void set_target(Code* code) {
     // Strict mode must be preserved across IC patching.
-    ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
-           Code::GetStrictMode(target()->extra_ic_state()));
+    ASSERT((code->extra_ic_state() & kStrictMode) ==
+           (target()->extra_ic_state() & kStrictMode));
     IC::set_target(code);
   }
 
@@ -606,11 +524,13 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kStoreIC_Initialize_Strict);
   }
-  Handle<Code> global_proxy_stub() {
-    return isolate()->builtins()->StoreIC_GlobalProxy();
+  Code* global_proxy_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kStoreIC_GlobalProxy);
   }
-  Handle<Code> global_proxy_stub_strict() {
-    return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+  Code* global_proxy_stub_strict() {
+    return isolate()->builtins()->builtin(
+        Builtins::kStoreIC_GlobalProxy_Strict);
   }
 
   static void Clear(Address address, Code* target);
@@ -642,25 +562,18 @@
                                          StrictModeFlag strict_mode);
   static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
   static void GenerateNonStrictArguments(MacroAssembler* masm);
-  static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
-  static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
 
-  virtual Handle<Code> GetElementStubWithoutMapCheck(
+  virtual MaybeObject* GetElementStubWithoutMapCheck(
       bool is_js_array,
-      ElementsKind elements_kind,
-      KeyedAccessGrowMode grow_mode);
-
-  virtual bool IsGeneric() const {
-    return target() == *generic_stub() ||
-        target() == *generic_stub_strict();
-  }
+      ElementsKind elements_kind);
 
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
 
-  virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
-                                              StrictModeFlag strict_mode,
-                                              KeyedAccessGrowMode grow_mode);
+  virtual MaybeObject* ConstructMegamorphicStub(
+      MapList* receiver_maps,
+      CodeList* targets,
+      StrictModeFlag strict_mode);
 
   private:
   // Update the inline cache.
@@ -673,8 +586,8 @@
 
   void set_target(Code* code) {
     // Strict mode must be preserved across IC patching.
-    ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
-           Code::GetStrictMode(target()->extra_ic_state()));
+    ASSERT((code->extra_ic_state() & kStrictMode) ==
+           (target()->extra_ic_state() & kStrictMode));
     IC::set_target(code);
   }
 
@@ -683,32 +596,33 @@
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedStoreIC_Initialize);
   }
+  Code* megamorphic_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedStoreIC_Generic);
+  }
   static Code* initialize_stub_strict() {
     return Isolate::Current()->builtins()->builtin(
         Builtins::kKeyedStoreIC_Initialize_Strict);
   }
-  Handle<Code> megamorphic_stub() {
-    return isolate()->builtins()->KeyedStoreIC_Generic();
+  Code* megamorphic_stub_strict() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedStoreIC_Generic_Strict);
   }
-  Handle<Code> megamorphic_stub_strict() {
-    return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+  Code* generic_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedStoreIC_Generic);
   }
-  Handle<Code> generic_stub() const {
-    return isolate()->builtins()->KeyedStoreIC_Generic();
+  Code* generic_stub_strict() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedStoreIC_Generic_Strict);
   }
-  Handle<Code> generic_stub_strict() const {
-    return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
-  }
-  Handle<Code> non_strict_arguments_stub() {
-    return isolate()->builtins()->KeyedStoreIC_NonStrictArguments();
+  Code* non_strict_arguments_stub() {
+    return isolate()->builtins()->builtin(
+        Builtins::kKeyedStoreIC_NonStrictArguments);
   }
 
   static void Clear(Address address, Code* target);
 
-  StubKind GetStubKind(Handle<JSObject> receiver,
-                       Handle<Object> key,
-                       Handle<Object> value);
-
   friend class IC;
 };
 
@@ -775,7 +689,6 @@
     SYMBOLS,
     STRINGS,
     OBJECTS,
-    KNOWN_OBJECTS,
     GENERIC
   };
 
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
deleted file mode 100644
index 3e3d6c4..0000000
--- a/src/incremental-marking-inl.h
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INCREMENTAL_MARKING_INL_H_
-#define V8_INCREMENTAL_MARKING_INL_H_
-
-#include "incremental-marking.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
-                                         Object** slot,
-                                         Object* value) {
-  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
-  if (Marking::IsWhite(value_bit)) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-
-    // Object is either grey or white.  It will be scanned if survives.
-    return false;
-  }
-  return true;
-}
-
-
-void IncrementalMarking::RecordWrite(HeapObject* obj,
-                                     Object** slot,
-                                     Object* value) {
-  if (IsMarking() && value->NonFailureIsHeapObject()) {
-    RecordWriteSlow(obj, slot, value);
-  }
-}
-
-
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
-                                                Object** slot,
-                                                Code* value) {
-  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
-}
-
-
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
-                                             RelocInfo* rinfo,
-                                             Object* value) {
-  if (IsMarking() && value->NonFailureIsHeapObject()) {
-    RecordWriteIntoCodeSlow(obj, rinfo, value);
-  }
-}
-
-
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
-  if (IsMarking()) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-  }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
-                                               MarkBit mark_bit) {
-  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
-  ASSERT(obj->Size() >= 2*kPointerSize);
-  ASSERT(IsMarking());
-  Marking::BlackToGrey(mark_bit);
-  int obj_size = obj->Size();
-  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
-  bytes_scanned_ -= obj_size;
-  int64_t old_bytes_rescanned = bytes_rescanned_;
-  bytes_rescanned_ = old_bytes_rescanned + obj_size;
-  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
-    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
-      // If we have queued twice the heap size for rescanning then we are
-      // going around in circles, scanning the same objects again and again
-      // as the program mutates the heap faster than we can incrementally
-      // trace it.  In this case we switch to non-incremental marking in
-      // order to finish off this marking phase.
-      if (FLAG_trace_gc) {
-        PrintF("Hurrying incremental marking because of lack of progress\n");
-      }
-      allocation_marking_factor_ = kMaxAllocationMarkingFactor;
-    }
-  }
-
-  marking_deque_.UnshiftGrey(obj);
-}
-
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
-  WhiteToGrey(obj, mark_bit);
-  marking_deque_.PushGrey(obj);
-}
-
-
-void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
-  Marking::WhiteToGrey(mark_bit);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
deleted file mode 100644
index 8fe89b4..0000000
--- a/src/incremental-marking.cc
+++ /dev/null
@@ -1,937 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "incremental-marking.h"
-
-#include "code-stubs.h"
-#include "compilation-cache.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-
-IncrementalMarking::IncrementalMarking(Heap* heap)
-    : heap_(heap),
-      state_(STOPPED),
-      marking_deque_memory_(NULL),
-      marking_deque_memory_committed_(false),
-      steps_count_(0),
-      steps_took_(0),
-      longest_step_(0.0),
-      old_generation_space_available_at_start_of_incremental_(0),
-      old_generation_space_used_at_start_of_incremental_(0),
-      steps_count_since_last_gc_(0),
-      steps_took_since_last_gc_(0),
-      should_hurry_(false),
-      allocation_marking_factor_(0),
-      allocated_(0),
-      no_marking_scope_depth_(0) {
-}
-
-
-void IncrementalMarking::TearDown() {
-  delete marking_deque_memory_;
-}
-
-
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
-                                         Object** slot,
-                                         Object* value) {
-  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      // Object is not going to be rescanned we need to record the slot.
-      heap_->mark_compact_collector()->RecordSlot(
-          HeapObject::RawField(obj, 0), slot, value);
-    }
-  }
-}
-
-
-void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
-                                             Object* value,
-                                             Isolate* isolate) {
-  ASSERT(obj->IsHeapObject());
-
-  // Fast cases should already be covered by RecordWriteStub.
-  ASSERT(value->IsHeapObject());
-  ASSERT(!value->IsHeapNumber());
-  ASSERT(!value->IsString() ||
-         value->IsConsString() ||
-         value->IsSlicedString());
-  ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
-
-  IncrementalMarking* marking = isolate->heap()->incremental_marking();
-  ASSERT(!marking->is_compacting_);
-  marking->RecordWrite(obj, NULL, value);
-}
-
-
-void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
-                                                          Object** slot,
-                                                          Isolate* isolate) {
-  IncrementalMarking* marking = isolate->heap()->incremental_marking();
-  ASSERT(marking->is_compacting_);
-  marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordCodeTargetPatch(Code* host,
-                                               Address pc,
-                                               HeapObject* value) {
-  if (IsMarking()) {
-    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
-    RecordWriteIntoCode(host, &rinfo, value);
-  }
-}
-
-
-void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
-  if (IsMarking()) {
-    Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
-        GcSafeFindCodeForInnerPointer(pc);
-    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
-    RecordWriteIntoCode(host, &rinfo, value);
-  }
-}
-
-
-void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
-                                                Object** slot,
-                                                Code* value) {
-  if (BaseRecordWrite(host, slot, value) && is_compacting_) {
-    ASSERT(slot != NULL);
-    heap_->mark_compact_collector()->
-        RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
-  }
-}
-
-
-void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
-                                                 RelocInfo* rinfo,
-                                                 Object* value) {
-  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
-  if (Marking::IsWhite(value_bit)) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-    // Object is either grey or white.  It will be scanned if survives.
-    return;
-  }
-
-  if (is_compacting_) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      // Object is not going to be rescanned.  We need to record the slot.
-      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
-                                                       Code::cast(value));
-    }
-  }
-}
-
-
-class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
- public:
-  IncrementalMarkingMarkingVisitor(Heap* heap,
-                                   IncrementalMarking* incremental_marking)
-      : heap_(heap),
-        incremental_marking_(incremental_marking) {
-  }
-
-  void VisitEmbeddedPointer(RelocInfo* rinfo) {
-    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-    Object* target = rinfo->target_object();
-    if (target->NonFailureIsHeapObject()) {
-      heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
-      MarkObject(target);
-    }
-  }
-
-  void VisitCodeTarget(RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
-        && (target->ic_age() != heap_->global_ic_age())) {
-      IC::Clear(rinfo->pc());
-      target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    }
-    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
-    MarkObject(target);
-  }
-
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-            rinfo->IsPatchedReturnSequence()) ||
-           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-            rinfo->IsPatchedDebugBreakSlotSequence()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
-    MarkObject(target);
-  }
-
-  void VisitCodeEntry(Address entry_address) {
-    Object* target = Code::GetObjectFromEntryAddress(entry_address);
-    heap_->mark_compact_collector()->
-        RecordCodeEntrySlot(entry_address, Code::cast(target));
-    MarkObject(target);
-  }
-
-  void VisitPointer(Object** p) {
-    Object* obj = *p;
-    if (obj->NonFailureIsHeapObject()) {
-      heap_->mark_compact_collector()->RecordSlot(p, p, obj);
-      MarkObject(obj);
-    }
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) {
-      Object* obj = *p;
-      if (obj->NonFailureIsHeapObject()) {
-        heap_->mark_compact_collector()->RecordSlot(start, p, obj);
-        MarkObject(obj);
-      }
-    }
-  }
-
- private:
-  // Mark object pointed to by p.
-  INLINE(void MarkObject(Object* obj)) {
-    HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-    if (mark_bit.data_only()) {
-      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
-        MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
-                                              heap_object->Size());
-      }
-    } else if (Marking::IsWhite(mark_bit)) {
-      incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
-    }
-  }
-
-  Heap* heap_;
-  IncrementalMarking* incremental_marking_;
-};
-
-
-class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
- public:
-  IncrementalMarkingRootMarkingVisitor(Heap* heap,
-                                       IncrementalMarking* incremental_marking)
-      : heap_(heap),
-        incremental_marking_(incremental_marking) {
-  }
-
-  void VisitPointer(Object** p) {
-    MarkObjectByPointer(p);
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
-  }
-
- private:
-  void MarkObjectByPointer(Object** p) {
-    Object* obj = *p;
-    if (!obj->IsHeapObject()) return;
-
-    HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-    if (mark_bit.data_only()) {
-      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
-          MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
-                                                heap_object->Size());
-      }
-    } else {
-      if (Marking::IsWhite(mark_bit)) {
-        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
-      }
-    }
-  }
-
-  Heap* heap_;
-  IncrementalMarking* incremental_marking_;
-};
-
-
-void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
-                                              bool is_marking,
-                                              bool is_compacting) {
-  if (is_marking) {
-    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-    // It's difficult to filter out slots recorded for large objects.
-    if (chunk->owner()->identity() == LO_SPACE &&
-        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
-        is_compacting) {
-      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-    }
-  } else if (chunk->owner()->identity() == CELL_SPACE ||
-             chunk->scan_on_scavenge()) {
-    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  } else {
-    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  }
-}
-
-
-void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
-                                              bool is_marking) {
-  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-  if (is_marking) {
-    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  } else {
-    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  }
-  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
-    PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    SetOldSpacePageFlags(p, false, false);
-  }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
-    NewSpace* space) {
-  NewSpacePageIterator it(space);
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    SetNewSpacePageFlags(p, false);
-  }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
-  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
-
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
-    SetOldSpacePageFlags(lop, false, false);
-    lop = lop->next_page();
-  }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    SetOldSpacePageFlags(p, true, is_compacting_);
-  }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    SetNewSpacePageFlags(p, true);
-  }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier() {
-  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
-  ActivateIncrementalWriteBarrier(heap_->old_data_space());
-  ActivateIncrementalWriteBarrier(heap_->cell_space());
-  ActivateIncrementalWriteBarrier(heap_->map_space());
-  ActivateIncrementalWriteBarrier(heap_->code_space());
-  ActivateIncrementalWriteBarrier(heap_->new_space());
-
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
-    SetOldSpacePageFlags(lop, true, is_compacting_);
-    lop = lop->next_page();
-  }
-}
-
-
-bool IncrementalMarking::WorthActivating() {
-#ifndef DEBUG
-  static const intptr_t kActivationThreshold = 8 * MB;
-#else
-  // TODO(gc) consider setting this to some low level so that some
-  // debug tests run with incremental marking and some without.
-  static const intptr_t kActivationThreshold = 0;
-#endif
-
-  return !FLAG_expose_gc &&
-      FLAG_incremental_marking &&
-      !Serializer::enabled() &&
-      heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
-}
-
-
-void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
-  ASSERT(RecordWriteStub::GetMode(stub) ==
-         RecordWriteStub::STORE_BUFFER_ONLY);
-
-  if (!IsMarking()) {
-    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
-    // we don't need to do anything if incremental marking is
-    // not active.
-  } else if (IsCompacting()) {
-    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
-  } else {
-    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
-  }
-}
-
-
-static void PatchIncrementalMarkingRecordWriteStubs(
-    Heap* heap, RecordWriteStub::Mode mode) {
-  UnseededNumberDictionary* stubs = heap->code_stubs();
-
-  int capacity = stubs->Capacity();
-  for (int i = 0; i < capacity; i++) {
-    Object* k = stubs->KeyAt(i);
-    if (stubs->IsKey(k)) {
-      uint32_t key = NumberToUint32(k);
-
-      if (CodeStub::MajorKeyFromKey(key) ==
-          CodeStub::RecordWrite) {
-        Object* e = stubs->ValueAt(i);
-        if (e->IsCode()) {
-          RecordWriteStub::Patch(Code::cast(e), mode);
-        }
-      }
-    }
-  }
-}
-
-
-void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
-  if (marking_deque_memory_ == NULL) {
-    marking_deque_memory_ = new VirtualMemory(4 * MB);
-  }
-  if (!marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Commit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size(),
-        false);  // Not executable.
-    CHECK(success);
-    marking_deque_memory_committed_ = true;
-  }
-}
-
-void IncrementalMarking::UncommitMarkingDeque() {
-  if (state_ == STOPPED && marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Uncommit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size());
-    CHECK(success);
-    marking_deque_memory_committed_ = false;
-  }
-}
-
-
-void IncrementalMarking::Start() {
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Start\n");
-  }
-  ASSERT(FLAG_incremental_marking);
-  ASSERT(state_ == STOPPED);
-
-  ResetStepCounters();
-
-  if (heap_->old_pointer_space()->IsSweepingComplete() &&
-      heap_->old_data_space()->IsSweepingComplete()) {
-    StartMarking(ALLOW_COMPACTION);
-  } else {
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Start sweeping.\n");
-    }
-    state_ = SWEEPING;
-  }
-
-  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
-}
-
-
-static void MarkObjectGreyDoNotEnqueue(Object* obj) {
-  if (obj->IsHeapObject()) {
-    HeapObject* heap_obj = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
-    if (Marking::IsBlack(mark_bit)) {
-      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
-                                            -heap_obj->Size());
-    }
-    Marking::AnyToGrey(mark_bit);
-  }
-}
-
-
-void IncrementalMarking::StartMarking(CompactionFlag flag) {
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Start marking\n");
-  }
-
-  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
-      heap_->mark_compact_collector()->StartCompaction(
-          MarkCompactCollector::INCREMENTAL_COMPACTION);
-
-  state_ = MARKING;
-
-  RecordWriteStub::Mode mode = is_compacting_ ?
-      RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
-
-  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
-
-  EnsureMarkingDequeIsCommitted();
-
-  // Initialize marking stack.
-  Address addr = static_cast<Address>(marking_deque_memory_->address());
-  size_t size = marking_deque_memory_->size();
-  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
-  marking_deque_.Initialize(addr, addr + size);
-
-  ActivateIncrementalWriteBarrier();
-
-#ifdef DEBUG
-  // Marking bits are cleared by the sweeper.
-  if (FLAG_verify_heap) {
-    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
-  }
-#endif
-
-  heap_->CompletelyClearInstanceofCache();
-  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
-
-  if (FLAG_cleanup_code_caches_at_gc) {
-    // We will mark cache black with a separate pass
-    // when we finish marking.
-    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
-  }
-
-  // Mark strong roots grey.
-  IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
-  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-
-  // Ready to start incremental marking.
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Running\n");
-  }
-}
-
-
-void IncrementalMarking::PrepareForScavenge() {
-  if (!IsMarking()) return;
-  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
-                          heap_->new_space()->FromSpaceEnd());
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
-}
-
-
-void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
-  if (!IsMarking()) return;
-
-  int current = marking_deque_.bottom();
-  int mask = marking_deque_.mask();
-  int limit = marking_deque_.top();
-  HeapObject** array = marking_deque_.array();
-  int new_top = current;
-
-  Map* filler_map = heap_->one_pointer_filler_map();
-
-  while (current != limit) {
-    HeapObject* obj = array[current];
-    ASSERT(obj->IsHeapObject());
-    current = ((current + 1) & mask);
-    if (heap_->InNewSpace(obj)) {
-      MapWord map_word = obj->map_word();
-      if (map_word.IsForwardingAddress()) {
-        HeapObject* dest = map_word.ToForwardingAddress();
-        array[new_top] = dest;
-        new_top = ((new_top + 1) & mask);
-        ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        ASSERT(Marking::IsGrey(mark_bit) ||
-               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
-      }
-    } else if (obj->map() != filler_map) {
-      // Skip one word filler objects that appear on the
-      // stack when we perform in place array shift.
-      array[new_top] = obj;
-      new_top = ((new_top + 1) & mask);
-      ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        ASSERT(Marking::IsGrey(mark_bit) ||
-               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
-    }
-  }
-  marking_deque_.set_top(new_top);
-
-  steps_took_since_last_gc_ = 0;
-  steps_count_since_last_gc_ = 0;
-  longest_step_ = 0.0;
-}
-
-
-void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
-  v->VisitPointers(
-      HeapObject::RawField(
-          ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
-      HeapObject::RawField(
-          ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
-
-  MarkCompactCollector* collector = heap_->mark_compact_collector();
-  for (int idx = Context::FIRST_WEAK_SLOT;
-       idx < Context::GLOBAL_CONTEXT_SLOTS;
-       ++idx) {
-    Object** slot =
-        HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
-    collector->RecordSlot(slot, slot, *slot);
-  }
-}
-
-
-void IncrementalMarking::Hurry() {
-  if (state() == MARKING) {
-    double start = 0.0;
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Hurry\n");
-      start = OS::TimeCurrentMillis();
-    }
-    // TODO(gc) hurry can mark objects it encounters black as mutator
-    // was stopped.
-    Map* filler_map = heap_->one_pointer_filler_map();
-    Map* global_context_map = heap_->global_context_map();
-    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
-    while (!marking_deque_.IsEmpty()) {
-      HeapObject* obj = marking_deque_.Pop();
-
-      // Explicitly skip one word fillers. Incremental markbit patterns are
-      // correct only for objects that occupy at least two words.
-      Map* map = obj->map();
-      if (map == filler_map) {
-        continue;
-      } else if (map == global_context_map) {
-        // Global contexts have weak fields.
-        VisitGlobalContext(Context::cast(obj), &marking_visitor);
-      } else {
-        obj->Iterate(&marking_visitor);
-      }
-
-      MarkBit mark_bit = Marking::MarkBitFrom(obj);
-      ASSERT(!Marking::IsBlack(mark_bit));
-      Marking::MarkBlack(mark_bit);
-      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-    }
-    state_ = COMPLETE;
-    if (FLAG_trace_incremental_marking) {
-      double end = OS::TimeCurrentMillis();
-      PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
-             static_cast<int>(end - start));
-    }
-  }
-
-  if (FLAG_cleanup_code_caches_at_gc) {
-    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
-    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
-    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
-                                          PolymorphicCodeCache::kSize);
-  }
-
-  Object* context = heap_->global_contexts_list();
-  while (!context->IsUndefined()) {
-    // GC can happen when the context is not fully initialized,
-    // so the cache can be undefined.
-    HeapObject* cache = HeapObject::cast(
-        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
-    if (!cache->IsUndefined()) {
-      MarkBit mark_bit = Marking::MarkBitFrom(cache);
-      if (Marking::IsGrey(mark_bit)) {
-        Marking::GreyToBlack(mark_bit);
-        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
-      }
-    }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
-  }
-}
-
-
-void IncrementalMarking::Abort() {
-  if (IsStopped()) return;
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Aborting.\n");
-  }
-  heap_->new_space()->LowerInlineAllocationLimit(0);
-  IncrementalMarking::set_should_hurry(false);
-  ResetStepCounters();
-  if (IsMarking()) {
-    PatchIncrementalMarkingRecordWriteStubs(heap_,
-                                            RecordWriteStub::STORE_BUFFER_ONLY);
-    DeactivateIncrementalWriteBarrier();
-
-    if (is_compacting_) {
-      LargeObjectIterator it(heap_->lo_space());
-      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-        Page* p = Page::FromAddress(obj->address());
-        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
-        }
-      }
-    }
-  }
-  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
-  state_ = STOPPED;
-  is_compacting_ = false;
-}
-
-
-void IncrementalMarking::Finalize() {
-  Hurry();
-  state_ = STOPPED;
-  is_compacting_ = false;
-  heap_->new_space()->LowerInlineAllocationLimit(0);
-  IncrementalMarking::set_should_hurry(false);
-  ResetStepCounters();
-  PatchIncrementalMarkingRecordWriteStubs(heap_,
-                                          RecordWriteStub::STORE_BUFFER_ONLY);
-  DeactivateIncrementalWriteBarrier();
-  ASSERT(marking_deque_.IsEmpty());
-  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
-}
-
-
-void IncrementalMarking::MarkingComplete() {
-  state_ = COMPLETE;
-  // We will set the stack guard to request a GC now.  This will mean the rest
-  // of the GC gets performed as soon as possible (we can't do a GC here in a
-  // record-write context).  If a few things get allocated between now and then
-  // that shouldn't make us do a scavenge and keep being incremental, so we set
-  // the should-hurry flag to indicate that there can't be much work left to do.
-  set_should_hurry(true);
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Complete (normal).\n");
-  }
-  if (!heap_->idle_notification_will_schedule_next_gc()) {
-    heap_->isolate()->stack_guard()->RequestGC();
-  }
-}
-
-
-void IncrementalMarking::Step(intptr_t allocated_bytes) {
-  if (heap_->gc_state() != Heap::NOT_IN_GC ||
-      !FLAG_incremental_marking ||
-      !FLAG_incremental_marking_steps ||
-      (state_ != SWEEPING && state_ != MARKING)) {
-    return;
-  }
-
-  allocated_ += allocated_bytes;
-
-  if (allocated_ < kAllocatedThreshold) return;
-
-  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
-
-  intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
-  bytes_scanned_ += bytes_to_process;
-
-  double start = 0;
-
-  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
-    start = OS::TimeCurrentMillis();
-  }
-
-  if (state_ == SWEEPING) {
-    if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
-      bytes_scanned_ = 0;
-      StartMarking(PREVENT_COMPACTION);
-    }
-  } else if (state_ == MARKING) {
-    Map* filler_map = heap_->one_pointer_filler_map();
-    Map* global_context_map = heap_->global_context_map();
-    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
-    while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
-      HeapObject* obj = marking_deque_.Pop();
-
-      // Explicitly skip one word fillers. Incremental markbit patterns are
-      // correct only for objects that occupy at least two words.
-      Map* map = obj->map();
-      if (map == filler_map) continue;
-
-      if (obj->IsMap()) {
-        Map* map = Map::cast(obj);
-        heap_->ClearCacheOnMap(map);
-      }
-
-
-      int size = obj->SizeFromMap(map);
-      bytes_to_process -= size;
-      MarkBit map_mark_bit = Marking::MarkBitFrom(map);
-      if (Marking::IsWhite(map_mark_bit)) {
-        WhiteToGreyAndPush(map, map_mark_bit);
-      }
-
-      // TODO(gc) switch to static visitor instead of normal visitor.
-      if (map == global_context_map) {
-        // Global contexts have weak fields.
-        Context* ctx = Context::cast(obj);
-
-        // We will mark cache black with a separate pass
-        // when we finish marking.
-        MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
-
-        VisitGlobalContext(ctx, &marking_visitor);
-      } else {
-        obj->IterateBody(map->instance_type(), size, &marking_visitor);
-      }
-
-      MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
-      SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
-                  (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
-      Marking::MarkBlack(obj_mark_bit);
-      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
-    }
-    if (marking_deque_.IsEmpty()) MarkingComplete();
-  }
-
-  allocated_ = 0;
-
-  steps_count_++;
-  steps_count_since_last_gc_++;
-
-  bool speed_up = false;
-
-  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
-    if (FLAG_trace_gc) {
-      PrintF("Speed up marking after %d steps\n",
-             static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
-    }
-    speed_up = true;
-  }
-
-  bool space_left_is_very_small =
-      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
-  bool only_1_nth_of_space_that_was_available_still_left =
-      (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
-          old_generation_space_available_at_start_of_incremental_);
-
-  if (space_left_is_very_small ||
-      only_1_nth_of_space_that_was_available_still_left) {
-    if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
-    speed_up = true;
-  }
-
-  bool size_of_old_space_multiplied_by_n_during_marking =
-      (heap_->PromotedTotalSize() >
-       (allocation_marking_factor_ + 1) *
-           old_generation_space_used_at_start_of_incremental_);
-  if (size_of_old_space_multiplied_by_n_during_marking) {
-    speed_up = true;
-    if (FLAG_trace_gc) {
-      PrintF("Speed up marking because of heap size increase\n");
-    }
-  }
-
-  int64_t promoted_during_marking = heap_->PromotedTotalSize()
-      - old_generation_space_used_at_start_of_incremental_;
-  intptr_t delay = allocation_marking_factor_ * MB;
-  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
-  // We try to scan at at least twice the speed that we are allocating.
-  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
-    if (FLAG_trace_gc) {
-      PrintF("Speed up marking because marker was not keeping up\n");
-    }
-    speed_up = true;
-  }
-
-  if (speed_up) {
-    if (state_ != MARKING) {
-      if (FLAG_trace_gc) {
-        PrintF("Postponing speeding up marking until marking starts\n");
-      }
-    } else {
-      allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
-      allocation_marking_factor_ = static_cast<int>(
-          Min(kMaxAllocationMarkingFactor,
-              static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
-      if (FLAG_trace_gc) {
-        PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
-      }
-    }
-  }
-
-  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
-    double end = OS::TimeCurrentMillis();
-    double delta = (end - start);
-    longest_step_ = Max(longest_step_, delta);
-    steps_took_ += delta;
-    steps_took_since_last_gc_ += delta;
-  }
-}
-
-
-void IncrementalMarking::ResetStepCounters() {
-  steps_count_ = 0;
-  steps_took_ = 0;
-  longest_step_ = 0.0;
-  old_generation_space_available_at_start_of_incremental_ =
-      SpaceLeftInOldSpace();
-  old_generation_space_used_at_start_of_incremental_ =
-      heap_->PromotedTotalSize();
-  steps_count_since_last_gc_ = 0;
-  steps_took_since_last_gc_ = 0;
-  bytes_rescanned_ = 0;
-  allocation_marking_factor_ = kInitialAllocationMarkingFactor;
-  bytes_scanned_ = 0;
-}
-
-
-int64_t IncrementalMarking::SpaceLeftInOldSpace() {
-  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
-}
-
-} }  // namespace v8::internal
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
deleted file mode 100644
index 4f8fa6b..0000000
--- a/src/incremental-marking.h
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INCREMENTAL_MARKING_H_
-#define V8_INCREMENTAL_MARKING_H_
-
-
-#include "execution.h"
-#include "mark-compact.h"
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-class IncrementalMarking {
- public:
-  enum State {
-    STOPPED,
-    SWEEPING,
-    MARKING,
-    COMPLETE
-  };
-
-  explicit IncrementalMarking(Heap* heap);
-
-  void TearDown();
-
-  State state() {
-    ASSERT(state_ == STOPPED || FLAG_incremental_marking);
-    return state_;
-  }
-
-  bool should_hurry() { return should_hurry_; }
-  void set_should_hurry(bool val) { should_hurry_ = val; }
-
-  inline bool IsStopped() { return state() == STOPPED; }
-
-  INLINE(bool IsMarking()) { return state() >= MARKING; }
-
-  inline bool IsMarkingIncomplete() { return state() == MARKING; }
-
-  inline bool IsComplete() { return state() == COMPLETE; }
-
-  bool WorthActivating();
-
-  void Start();
-
-  void Stop();
-
-  void PrepareForScavenge();
-
-  void UpdateMarkingDequeAfterScavenge();
-
-  void Hurry();
-
-  void Finalize();
-
-  void Abort();
-
-  void MarkingComplete();
-
-  // It's hard to know how much work the incremental marker should do to make
-  // progress in the face of the mutator creating new work for it.  We start
-  // of at a moderate rate of work and gradually increase the speed of the
-  // incremental marker until it completes.
-  // Do some marking every time this much memory has been allocated.
-  static const intptr_t kAllocatedThreshold = 65536;
-  // Start off by marking this many times more memory than has been allocated.
-  static const intptr_t kInitialAllocationMarkingFactor = 1;
-  // But if we are promoting a lot of data we need to mark faster to keep up
-  // with the data that is entering the old space through promotion.
-  static const intptr_t kFastMarking = 3;
-  // After this many steps we increase the marking/allocating factor.
-  static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
-  // This is how much we increase the marking/allocating factor by.
-  static const intptr_t kAllocationMarkingFactorSpeedup = 2;
-  static const intptr_t kMaxAllocationMarkingFactor = 1000;
-
-  void OldSpaceStep(intptr_t allocated) {
-    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
-  }
-
-  void Step(intptr_t allocated);
-
-  inline void RestartIfNotMarking() {
-    if (state_ == COMPLETE) {
-      state_ = MARKING;
-      if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
-      }
-    }
-  }
-
-  static void RecordWriteFromCode(HeapObject* obj,
-                                  Object* value,
-                                  Isolate* isolate);
-
-  static void RecordWriteForEvacuationFromCode(HeapObject* obj,
-                                               Object** slot,
-                                               Isolate* isolate);
-
-  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
-  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
-  INLINE(void RecordWriteIntoCode(HeapObject* obj,
-                                  RelocInfo* rinfo,
-                                  Object* value));
-  INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
-                                     Object** slot,
-                                     Code* value));
-
-
-  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
-  void RecordWriteIntoCodeSlow(HeapObject* obj,
-                               RelocInfo* rinfo,
-                               Object* value);
-  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
-  void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
-  void RecordCodeTargetPatch(Address pc, HeapObject* value);
-
-  inline void RecordWrites(HeapObject* obj);
-
-  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
-
-  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
-
-  inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
-
-  // Does white->black or keeps gray or black color. Returns true if converting
-  // white to black.
-  inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
-    ASSERT(!Marking::IsImpossible(mark_bit));
-    if (mark_bit.Get()) {
-      // Grey or black: Keep the color.
-      return false;
-    }
-    mark_bit.Set();
-    ASSERT(Marking::IsBlack(mark_bit));
-    return true;
-  }
-
-  inline int steps_count() {
-    return steps_count_;
-  }
-
-  inline double steps_took() {
-    return steps_took_;
-  }
-
-  inline double longest_step() {
-    return longest_step_;
-  }
-
-  inline int steps_count_since_last_gc() {
-    return steps_count_since_last_gc_;
-  }
-
-  inline double steps_took_since_last_gc() {
-    return steps_took_since_last_gc_;
-  }
-
-  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
-    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
-  }
-
-  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
-    SetNewSpacePageFlags(chunk, IsMarking());
-  }
-
-  MarkingDeque* marking_deque() { return &marking_deque_; }
-
-  bool IsCompacting() { return IsMarking() && is_compacting_; }
-
-  void ActivateGeneratedStub(Code* stub);
-
-  void NotifyOfHighPromotionRate() {
-    if (IsMarking()) {
-      if (allocation_marking_factor_ < kFastMarking) {
-        if (FLAG_trace_gc) {
-          PrintF("Increasing marking speed to %d due to high promotion rate\n",
-                 static_cast<int>(kFastMarking));
-        }
-        allocation_marking_factor_ = kFastMarking;
-      }
-    }
-  }
-
-  void EnterNoMarkingScope() {
-    no_marking_scope_depth_++;
-  }
-
-  void LeaveNoMarkingScope() {
-    no_marking_scope_depth_--;
-  }
-
-  void UncommitMarkingDeque();
-
- private:
-  int64_t SpaceLeftInOldSpace();
-
-  void ResetStepCounters();
-
-  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
-
-  void StartMarking(CompactionFlag flag);
-
-  void ActivateIncrementalWriteBarrier(PagedSpace* space);
-  static void ActivateIncrementalWriteBarrier(NewSpace* space);
-  void ActivateIncrementalWriteBarrier();
-
-  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
-  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
-  void DeactivateIncrementalWriteBarrier();
-
-  static void SetOldSpacePageFlags(MemoryChunk* chunk,
-                                   bool is_marking,
-                                   bool is_compacting);
-
-  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
-
-  void EnsureMarkingDequeIsCommitted();
-
-  void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
-
-  Heap* heap_;
-
-  State state_;
-  bool is_compacting_;
-
-  VirtualMemory* marking_deque_memory_;
-  bool marking_deque_memory_committed_;
-  MarkingDeque marking_deque_;
-
-  int steps_count_;
-  double steps_took_;
-  double longest_step_;
-  int64_t old_generation_space_available_at_start_of_incremental_;
-  int64_t old_generation_space_used_at_start_of_incremental_;
-  int steps_count_since_last_gc_;
-  double steps_took_since_last_gc_;
-  int64_t bytes_rescanned_;
-  bool should_hurry_;
-  int allocation_marking_factor_;
-  intptr_t bytes_scanned_;
-  intptr_t allocated_;
-
-  int no_marking_scope_depth_;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_INCREMENTAL_MARKING_H_
diff --git a/src/inspector.cc b/src/inspector.cc
index 833d338..8fb80f1 100644
--- a/src/inspector.cc
+++ b/src/inspector.cc
@@ -38,11 +38,11 @@
 //============================================================================
 // The Inspector.
 
-void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) {
+void Inspector::DumpObjectType(FILE* out, Object *obj, bool print_more) {
   // Dump the object pointer.
   OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
   if (obj->IsHeapObject()) {
-    HeapObject* hobj = HeapObject::cast(obj);
+    HeapObject *hobj = HeapObject::cast(obj);
     OS::FPrint(out, " size %d :", hobj->Size());
   }
 
diff --git a/src/inspector.h b/src/inspector.h
index 6962e21..e328bcd 100644
--- a/src/inspector.h
+++ b/src/inspector.h
@@ -41,14 +41,14 @@
 
 class Inspector {
  public:
-  static void DumpObjectType(FILE* out, Object* obj, bool print_more);
-  static void DumpObjectType(FILE* out, Object* obj) {
+  static void DumpObjectType(FILE* out, Object *obj, bool print_more);
+  static void DumpObjectType(FILE* out, Object *obj) {
     DumpObjectType(out, obj, false);
   }
-  static void DumpObjectType(Object* obj, bool print_more) {
+  static void DumpObjectType(Object *obj, bool print_more) {
     DumpObjectType(stdout, obj, print_more);
   }
-  static void DumpObjectType(Object* obj) {
+  static void DumpObjectType(Object *obj) {
     DumpObjectType(stdout, obj, false);
   }
 };
diff --git a/src/interface.cc b/src/interface.cc
deleted file mode 100644
index e344b86..0000000
--- a/src/interface.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "interface.h"
-
-namespace v8 {
-namespace internal {
-
-static bool Match(void* key1, void* key2) {
-  String* name1 = *static_cast<String**>(key1);
-  String* name2 = *static_cast<String**>(key2);
-  ASSERT(name1->IsSymbol());
-  ASSERT(name2->IsSymbol());
-  return name1 == name2;
-}
-
-
-Interface* Interface::Lookup(Handle<String> name) {
-  ASSERT(IsModule());
-  ZoneHashMap* map = Chase()->exports_;
-  if (map == NULL) return NULL;
-  ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false);
-  if (p == NULL) return NULL;
-  ASSERT(*static_cast<String**>(p->key) == *name);
-  ASSERT(p->value != NULL);
-  return static_cast<Interface*>(p->value);
-}
-
-
-#ifdef DEBUG
-// Current nesting depth for debug output.
-class Nesting {
- public:
-  Nesting()  { current_ += 2; }
-  ~Nesting() { current_ -= 2; }
-  static int current() { return current_; }
- private:
-  static int current_;
-};
-
-int Nesting::current_ = 0;
-#endif
-
-
-void Interface::DoAdd(
-    void* name, uint32_t hash, Interface* interface, bool* ok) {
-  MakeModule(ok);
-  if (!*ok) return;
-
-#ifdef DEBUG
-  if (FLAG_print_interface_details) {
-    PrintF("%*s# Adding...\n", Nesting::current(), "");
-    PrintF("%*sthis = ", Nesting::current(), "");
-    this->Print(Nesting::current());
-    PrintF("%*s%s : ", Nesting::current(), "",
-           (*reinterpret_cast<String**>(name))->ToAsciiArray());
-    interface->Print(Nesting::current());
-  }
-#endif
-
-  ZoneHashMap** map = &Chase()->exports_;
-  if (*map == NULL) *map = new ZoneHashMap(Match, 8);
-
-  ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen());
-  if (p == NULL) {
-    // This didn't have name but was frozen already, that's an error.
-    *ok = false;
-  } else if (p->value == NULL) {
-    p->value = interface;
-  } else {
-#ifdef DEBUG
-    Nesting nested;
-#endif
-    reinterpret_cast<Interface*>(p->value)->Unify(interface, ok);
-  }
-
-#ifdef DEBUG
-  if (FLAG_print_interface_details) {
-    PrintF("%*sthis' = ", Nesting::current(), "");
-    this->Print(Nesting::current());
-    PrintF("%*s# Added.\n", Nesting::current(), "");
-  }
-#endif
-}
-
-
-void Interface::Unify(Interface* that, bool* ok) {
-  if (this->forward_) return this->Chase()->Unify(that, ok);
-  if (that->forward_) return this->Unify(that->Chase(), ok);
-  ASSERT(this->forward_ == NULL);
-  ASSERT(that->forward_ == NULL);
-
-  *ok = true;
-  if (this == that) return;
-  if (this->IsValue()) return that->MakeValue(ok);
-  if (that->IsValue()) return this->MakeValue(ok);
-
-#ifdef DEBUG
-  if (FLAG_print_interface_details) {
-    PrintF("%*s# Unifying...\n", Nesting::current(), "");
-    PrintF("%*sthis = ", Nesting::current(), "");
-    this->Print(Nesting::current());
-    PrintF("%*sthat = ", Nesting::current(), "");
-    that->Print(Nesting::current());
-  }
-#endif
-
-  // Merge the smaller interface into the larger, for performance.
-  if (this->exports_ != NULL && (that->exports_ == NULL ||
-      this->exports_->occupancy() >= that->exports_->occupancy())) {
-    this->DoUnify(that, ok);
-  } else {
-    that->DoUnify(this, ok);
-  }
-
-#ifdef DEBUG
-  if (FLAG_print_interface_details) {
-    PrintF("%*sthis' = ", Nesting::current(), "");
-    this->Print(Nesting::current());
-    PrintF("%*sthat' = ", Nesting::current(), "");
-    that->Print(Nesting::current());
-    PrintF("%*s# Unified.\n", Nesting::current(), "");
-  }
-#endif
-}
-
-
-void Interface::DoUnify(Interface* that, bool* ok) {
-  ASSERT(this->forward_ == NULL);
-  ASSERT(that->forward_ == NULL);
-  ASSERT(!this->IsValue());
-  ASSERT(!that->IsValue());
-  ASSERT(*ok);
-
-#ifdef DEBUG
-    Nesting nested;
-#endif
-
-  // Try to merge all members from that into this.
-  ZoneHashMap* map = that->exports_;
-  if (map != NULL) {
-    for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
-      this->DoAdd(p->key, p->hash, static_cast<Interface*>(p->value), ok);
-      if (!*ok) return;
-    }
-  }
-
-  // If the new interface is larger than that's, then there were members in
-  // 'this' which 'that' didn't have. If 'that' was frozen that is an error.
-  int this_size = this->exports_ == NULL ? 0 : this->exports_->occupancy();
-  int that_size = map == NULL ? 0 : map->occupancy();
-  if (that->IsFrozen() && this_size > that_size) {
-    *ok = false;
-    return;
-  }
-
-  // Merge interfaces.
-  this->flags_ |= that->flags_;
-  that->forward_ = this;
-}
-
-
-#ifdef DEBUG
-void Interface::Print(int n) {
-  int n0 = n > 0 ? n : 0;
-
-  if (FLAG_print_interface_details) {
-    PrintF("%p", static_cast<void*>(this));
-    for (Interface* link = this->forward_; link != NULL; link = link->forward_)
-      PrintF("->%p", static_cast<void*>(link));
-    PrintF(" ");
-  }
-
-  if (IsUnknown()) {
-    PrintF("unknown\n");
-  } else if (IsValue()) {
-    PrintF("value\n");
-  } else if (IsModule()) {
-    PrintF("module %s{", IsFrozen() ? "" : "(unresolved) ");
-    ZoneHashMap* map = Chase()->exports_;
-    if (map == NULL || map->occupancy() == 0) {
-      PrintF("}\n");
-    } else if (n < 0 || n0 >= 2 * FLAG_print_interface_depth) {
-      // Avoid infinite recursion on cyclic types.
-      PrintF("...}\n");
-    } else {
-      PrintF("\n");
-      for (ZoneHashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) {
-        String* name = *static_cast<String**>(p->key);
-        Interface* interface = static_cast<Interface*>(p->value);
-        PrintF("%*s%s : ", n0 + 2, "", name->ToAsciiArray());
-        interface->Print(n0 + 2);
-      }
-      PrintF("%*s}\n", n0, "");
-    }
-  }
-}
-#endif
-
-} }  // namespace v8::internal
diff --git a/src/interface.h b/src/interface.h
deleted file mode 100644
index c2991cb..0000000
--- a/src/interface.h
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_INTERFACE_H_
-#define V8_INTERFACE_H_
-
-#include "zone-inl.h"  // For operator new.
-
-namespace v8 {
-namespace internal {
-
-
-// This class implements the following abstract grammar of interfaces
-// (i.e. module types):
-//   interface ::= UNDETERMINED | VALUE | MODULE(exports)
-//   exports ::= {name : interface, ...}
-// A frozen module type is one that is fully determined. Unification does not
-// allow adding additional exports to frozen interfaces.
-// Otherwise, unifying modules merges their exports.
-// Undetermined types are unification variables that can be unified freely.
-
-class Interface : public ZoneObject {
- public:
-  // ---------------------------------------------------------------------------
-  // Factory methods.
-
-  static Interface* NewValue() {
-    static Interface value_interface(VALUE + FROZEN);  // Cached.
-    return &value_interface;
-  }
-
-  static Interface* NewUnknown() {
-    return new Interface(NONE);
-  }
-
-  static Interface* NewModule() {
-    return new Interface(MODULE);
-  }
-
-  // ---------------------------------------------------------------------------
-  // Mutators.
-
-  // Add a name to the list of exports. If it already exists, unify with
-  // interface, otherwise insert unless this is closed.
-  void Add(Handle<String> name, Interface* interface, bool* ok) {
-    DoAdd(name.location(), name->Hash(), interface, ok);
-  }
-
-  // Unify with another interface. If successful, both interface objects will
-  // represent the same type, and changes to one are reflected in the other.
-  void Unify(Interface* that, bool* ok);
-
-  // Determine this interface to be a value interface.
-  void MakeValue(bool* ok) {
-    *ok = !IsModule();
-    if (*ok) Chase()->flags_ |= VALUE;
-  }
-
-  // Determine this interface to be a module interface.
-  void MakeModule(bool* ok) {
-    *ok = !IsValue();
-    if (*ok) Chase()->flags_ |= MODULE;
-  }
-
-  // Do not allow any further refinements, directly or through unification.
-  void Freeze(bool* ok) {
-    *ok = IsValue() || IsModule();
-    if (*ok) Chase()->flags_ |= FROZEN;
-  }
-
-  // ---------------------------------------------------------------------------
-  // Accessors.
-
-  // Look up an exported name. Returns NULL if not (yet) defined.
-  Interface* Lookup(Handle<String> name);
-
-  // Check whether this is still a fully undetermined type.
-  bool IsUnknown() { return Chase()->flags_ == NONE; }
-
-  // Check whether this is a value type.
-  bool IsValue() { return Chase()->flags_ & VALUE; }
-
-  // Check whether this is a module type.
-  bool IsModule() { return Chase()->flags_ & MODULE; }
-
-  // Check whether this is closed (i.e. fully determined).
-  bool IsFrozen() { return Chase()->flags_ & FROZEN; }
-
-  // ---------------------------------------------------------------------------
-  // Debugging.
-#ifdef DEBUG
-  void Print(int n = 0);  // n = indentation; n < 0 => don't print recursively
-#endif
-
-  // ---------------------------------------------------------------------------
-  // Implementation.
- private:
-  enum Flags {    // All flags are monotonic
-    NONE = 0,
-    VALUE = 1,    // This type describes a value
-    MODULE = 2,   // This type describes a module
-    FROZEN = 4    // This type is fully determined
-  };
-
-  int flags_;
-  Interface* forward_;     // Unification link
-  ZoneHashMap* exports_;   // Module exports and their types (allocated lazily)
-
-  explicit Interface(int flags)
-    : flags_(flags),
-      forward_(NULL),
-      exports_(NULL) {
-#ifdef DEBUG
-    if (FLAG_print_interface_details)
-      PrintF("# Creating %p\n", static_cast<void*>(this));
-#endif
-  }
-
-  Interface* Chase() {
-    Interface* result = this;
-    while (result->forward_ != NULL) result = result->forward_;
-    if (result != this) forward_ = result;  // On-the-fly path compression.
-    return result;
-  }
-
-  void DoAdd(void* name, uint32_t hash, Interface* interface, bool* ok);
-  void DoUnify(Interface* that, bool* ok);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_INTERFACE_H_
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index b337e88..796a447 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -33,9 +33,9 @@
 #include "utils.h"
 #include "ast.h"
 #include "bytecodes-irregexp.h"
-#include "jsregexp.h"
 #include "interpreter-irregexp.h"
 
+
 namespace v8 {
 namespace internal {
 
@@ -187,12 +187,12 @@
 
 
 template <typename Char>
-static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
-                                           const byte* code_base,
-                                           Vector<const Char> subject,
-                                           int* registers,
-                                           int current,
-                                           uint32_t current_char) {
+static bool RawMatch(Isolate* isolate,
+                     const byte* code_base,
+                     Vector<const Char> subject,
+                     int* registers,
+                     int current,
+                     uint32_t current_char) {
   const byte* pc = code_base;
   // BacktrackStack ensures that the memory allocated for the backtracking stack
   // is returned to the system or cached if there is no stack being cached at
@@ -211,24 +211,24 @@
     switch (insn & BYTECODE_MASK) {
       BYTECODE(BREAK)
         UNREACHABLE();
-        return RegExpImpl::RE_FAILURE;
+        return false;
       BYTECODE(PUSH_CP)
         if (--backtrack_stack_space < 0) {
-          return RegExpImpl::RE_EXCEPTION;
+          return false;  // No match on backtrack stack overflow.
         }
         *backtrack_sp++ = current;
         pc += BC_PUSH_CP_LENGTH;
         break;
       BYTECODE(PUSH_BT)
         if (--backtrack_stack_space < 0) {
-          return RegExpImpl::RE_EXCEPTION;
+          return false;  // No match on backtrack stack overflow.
         }
         *backtrack_sp++ = Load32Aligned(pc + 4);
         pc += BC_PUSH_BT_LENGTH;
         break;
       BYTECODE(PUSH_REGISTER)
         if (--backtrack_stack_space < 0) {
-          return RegExpImpl::RE_EXCEPTION;
+          return false;  // No match on backtrack stack overflow.
         }
         *backtrack_sp++ = registers[insn >> BYTECODE_SHIFT];
         pc += BC_PUSH_REGISTER_LENGTH;
@@ -278,9 +278,9 @@
         pc += BC_POP_REGISTER_LENGTH;
         break;
       BYTECODE(FAIL)
-        return RegExpImpl::RE_FAILURE;
+        return false;
       BYTECODE(SUCCEED)
-        return RegExpImpl::RE_SUCCESS;
+        return true;
       BYTECODE(ADVANCE_CP)
         current += insn >> BYTECODE_SHIFT;
         pc += BC_ADVANCE_CP_LENGTH;
@@ -625,12 +625,11 @@
 }
 
 
-RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
-    Isolate* isolate,
-    Handle<ByteArray> code_array,
-    Handle<String> subject,
-    int* registers,
-    int start_position) {
+bool IrregexpInterpreter::Match(Isolate* isolate,
+                                Handle<ByteArray> code_array,
+                                Handle<String> subject,
+                                int* registers,
+                                int start_position) {
   ASSERT(subject->IsFlat());
 
   AssertNoAllocation a;
diff --git a/src/interpreter-irregexp.h b/src/interpreter-irregexp.h
index 0f45d98..076f0c5 100644
--- a/src/interpreter-irregexp.h
+++ b/src/interpreter-irregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,11 +36,11 @@
 
 class IrregexpInterpreter {
  public:
-  static RegExpImpl::IrregexpResult Match(Isolate* isolate,
-                                          Handle<ByteArray> code,
-                                          Handle<String> subject,
-                                          int* captures,
-                                          int start_position);
+  static bool Match(Isolate* isolate,
+                    Handle<ByteArray> code,
+                    Handle<String> subject,
+                    int* captures,
+                    int start_position);
 };
 
 
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index 9fb16fb..aa6b537 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -36,29 +36,6 @@
 namespace internal {
 
 
-SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
-  if (isolate->context() != NULL) {
-    context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
-    dummy_ = Handle<Context>(isolate->context());
-#endif
-  }
-  isolate->set_save_context(this);
-
-  c_entry_fp_ = isolate->c_entry_fp(isolate->thread_local_top());
-}
-
-
-bool Isolate::IsDebuggerActive() {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  if (!NoBarrier_Load(&debugger_initialized_)) return false;
-  return debugger()->IsDebuggerActive();
-#else
-  return false;
-#endif
-}
-
-
 bool Isolate::DebuggerHasBreakPoints() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   return debug()->has_break_points();
diff --git a/src/isolate.cc b/src/isolate.cc
index 625cc56..fd0f673 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -38,11 +38,9 @@
 #include "heap-profiler.h"
 #include "hydrogen.h"
 #include "isolate.h"
-#include "lazy-instance.h"
 #include "lithium-allocator.h"
 #include "log.h"
 #include "messages.h"
-#include "platform.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
@@ -57,31 +55,6 @@
 namespace v8 {
 namespace internal {
 
-struct GlobalState {
-  Thread::LocalStorageKey per_isolate_thread_data_key;
-  Thread::LocalStorageKey isolate_key;
-  Thread::LocalStorageKey thread_id_key;
-  Isolate* default_isolate;
-  Isolate::ThreadDataTable* thread_data_table;
-  Mutex* mutex;
-};
-
-struct InitializeGlobalState {
-  static void Construct(GlobalState* state) {
-    state->isolate_key = Thread::CreateThreadLocalKey();
-    state->thread_id_key = Thread::CreateThreadLocalKey();
-    state->per_isolate_thread_data_key = Thread::CreateThreadLocalKey();
-    state->thread_data_table = new Isolate::ThreadDataTable();
-    state->default_isolate = new Isolate();
-    state->mutex = OS::CreateMutex();
-    // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
-    // because a non-null thread data may be already set.
-    Thread::SetThreadLocal(state->isolate_key, state->default_isolate);
-  }
-};
-
-static LazyInstance<GlobalState, InitializeGlobalState>::type global_state;
-
 Atomic32 ThreadId::highest_thread_id_ = 0;
 
 int ThreadId::AllocateThreadId() {
@@ -91,11 +64,10 @@
 
 
 int ThreadId::GetCurrentThreadId() {
-  const GlobalState& global = global_state.Get();
-  int thread_id = Thread::GetThreadLocalInt(global.thread_id_key);
+  int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
   if (thread_id == 0) {
     thread_id = AllocateThreadId();
-    Thread::SetThreadLocalInt(global.thread_id_key, thread_id);
+    Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
   }
   return thread_id;
 }
@@ -126,15 +98,6 @@
   failed_access_check_callback_ = NULL;
   save_context_ = NULL;
   catcher_ = NULL;
-  top_lookup_result_ = NULL;
-
-  // These members are re-initialized later after deserialization
-  // is complete.
-  pending_exception_ = NULL;
-  has_pending_message_ = false;
-  pending_message_obj_ = NULL;
-  pending_message_script_ = NULL;
-  scheduled_exception_ = NULL;
 }
 
 
@@ -339,16 +302,44 @@
   storage->LinkTo(&free_list_);
 }
 
+
+Isolate* Isolate::default_isolate_ = NULL;
+Thread::LocalStorageKey Isolate::isolate_key_;
+Thread::LocalStorageKey Isolate::thread_id_key_;
+Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
+Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
+
+
+class IsolateInitializer {
+ public:
+  IsolateInitializer() {
+    Isolate::EnsureDefaultIsolate();
+  }
+};
+
+static IsolateInitializer* EnsureDefaultIsolateAllocated() {
+  // TODO(isolates): Use the system threading API to do this once?
+  static IsolateInitializer static_initializer;
+  return &static_initializer;
+}
+
+// This variable only needed to trigger static intialization.
+static IsolateInitializer* static_initializer = EnsureDefaultIsolateAllocated();
+
+
+
+
+
 Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData(
     ThreadId thread_id) {
   ASSERT(!thread_id.Equals(ThreadId::Invalid()));
   PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
   {
-    GlobalState* const global = global_state.Pointer();
-    ScopedLock lock(global->mutex);
-    ASSERT(global->thread_data_table->Lookup(this, thread_id) == NULL);
-    global->thread_data_table->Insert(per_thread);
-    ASSERT(global->thread_data_table->Lookup(this, thread_id) == per_thread);
+    ScopedLock lock(process_wide_mutex_);
+    ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
+    thread_data_table_->Insert(per_thread);
+    ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
   }
   return per_thread;
 }
@@ -359,9 +350,8 @@
   ThreadId thread_id = ThreadId::Current();
   PerIsolateThreadData* per_thread = NULL;
   {
-    GlobalState* const global = global_state.Pointer();
-    ScopedLock lock(global->mutex);
-    per_thread = global->thread_data_table->Lookup(this, thread_id);
+    ScopedLock lock(process_wide_mutex_);
+    per_thread = thread_data_table_->Lookup(this, thread_id);
     if (per_thread == NULL) {
       per_thread = AllocatePerIsolateThreadData(thread_id);
     }
@@ -374,25 +364,26 @@
   ThreadId thread_id = ThreadId::Current();
   PerIsolateThreadData* per_thread = NULL;
   {
-    GlobalState* const global = global_state.Pointer();
-    ScopedLock lock(global->mutex);
-    per_thread = global->thread_data_table->Lookup(this, thread_id);
+    ScopedLock lock(process_wide_mutex_);
+    per_thread = thread_data_table_->Lookup(this, thread_id);
   }
   return per_thread;
 }
 
 
-bool Isolate::IsDefaultIsolate() const {
-  return this == global_state.Get().default_isolate;
-}
-
-
 void Isolate::EnsureDefaultIsolate() {
-  GlobalState* const global = global_state.Pointer();
+  ScopedLock lock(process_wide_mutex_);
+  if (default_isolate_ == NULL) {
+    isolate_key_ = Thread::CreateThreadLocalKey();
+    thread_id_key_ = Thread::CreateThreadLocalKey();
+    per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+    thread_data_table_ = new Isolate::ThreadDataTable();
+    default_isolate_ = new Isolate();
+  }
   // Can't use SetIsolateThreadLocals(default_isolate_, NULL) here
-  // because a non-null thread data may be already set.
-  if (Thread::GetThreadLocal(global->isolate_key) == NULL) {
-    Thread::SetThreadLocal(global->isolate_key, global->default_isolate);
+  // becase a non-null thread data may be already set.
+  if (Thread::GetThreadLocal(isolate_key_) == NULL) {
+    Thread::SetThreadLocal(isolate_key_, default_isolate_);
   }
 }
 
@@ -400,48 +391,32 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 Debugger* Isolate::GetDefaultIsolateDebugger() {
   EnsureDefaultIsolate();
-  return global_state.Pointer()->default_isolate->debugger();
+  return default_isolate_->debugger();
 }
 #endif
 
 
 StackGuard* Isolate::GetDefaultIsolateStackGuard() {
   EnsureDefaultIsolate();
-  return global_state.Pointer()->default_isolate->stack_guard();
-}
-
-
-Thread::LocalStorageKey Isolate::isolate_key() {
-  return global_state.Get().isolate_key;
-}
-
-
-Thread::LocalStorageKey Isolate::thread_id_key() {
-  return global_state.Get().thread_id_key;
-}
-
-
-Thread::LocalStorageKey Isolate::per_isolate_thread_data_key() {
-  return global_state.Get().per_isolate_thread_data_key;
+  return default_isolate_->stack_guard();
 }
 
 
 void Isolate::EnterDefaultIsolate() {
   EnsureDefaultIsolate();
-  Isolate* const default_isolate = global_state.Pointer()->default_isolate;
-  ASSERT(default_isolate != NULL);
+  ASSERT(default_isolate_ != NULL);
 
   PerIsolateThreadData* data = CurrentPerIsolateThreadData();
   // If not yet in default isolate - enter it.
-  if (data == NULL || data->isolate() != default_isolate) {
-    default_isolate->Enter();
+  if (data == NULL || data->isolate() != default_isolate_) {
+    default_isolate_->Enter();
   }
 }
 
 
 Isolate* Isolate::GetDefaultIsolateForLocking() {
   EnsureDefaultIsolate();
-  return global_state.Pointer()->default_isolate;
+  return default_isolate_;
 }
 
 
@@ -497,9 +472,6 @@
   for (StackFrameIterator it(this, thread); !it.done(); it.Advance()) {
     it.frame()->Iterate(v);
   }
-
-  // Iterate pointers in live lookup results.
-  thread->top_lookup_result_->Iterate(v);
 }
 
 
@@ -558,18 +530,6 @@
 }
 
 
-void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) {
-  if (capture_stack_trace_for_uncaught_exceptions_) {
-    // Capture stack trace for a detailed exception message.
-    Handle<String> key = factory()->hidden_stack_trace_symbol();
-    Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
-        stack_trace_for_uncaught_exceptions_frame_limit_,
-        stack_trace_for_uncaught_exceptions_options_);
-    JSObject::SetHiddenProperty(error_object, key, stack_trace);
-  }
-}
-
-
 Handle<JSArray> Isolate::CaptureCurrentStackTrace(
     int frame_limit, StackTrace::StackTraceOptions options) {
   // Ensure no negative values.
@@ -598,7 +558,7 @@
     frame->Summarize(&frames);
     for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
       // Create a JSObject to hold the information for the StackFrame.
-      Handle<JSObject> stack_frame = factory()->NewJSObject(object_function());
+      Handle<JSObject> stackFrame = factory()->NewJSObject(object_function());
 
       Handle<JSFunction> fun = frames[i].function();
       Handle<Script> script(Script::cast(fun->shared()->script()));
@@ -619,24 +579,16 @@
             // tag.
             column_offset += script->column_offset()->value();
           }
-          CHECK_NOT_EMPTY_HANDLE(
-              this,
-              JSObject::SetLocalPropertyIgnoreAttributes(
-                  stack_frame, column_key,
-                  Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE));
+          SetLocalPropertyNoThrow(stackFrame, column_key,
+                                  Handle<Smi>(Smi::FromInt(column_offset + 1)));
         }
-        CHECK_NOT_EMPTY_HANDLE(
-            this,
-            JSObject::SetLocalPropertyIgnoreAttributes(
-                stack_frame, line_key,
-                Handle<Smi>(Smi::FromInt(line_number + 1)), NONE));
+        SetLocalPropertyNoThrow(stackFrame, line_key,
+                                Handle<Smi>(Smi::FromInt(line_number + 1)));
       }
 
       if (options & StackTrace::kScriptName) {
         Handle<Object> script_name(script->name(), this);
-        CHECK_NOT_EMPTY_HANDLE(this,
-                               JSObject::SetLocalPropertyIgnoreAttributes(
-                                   stack_frame, script_key, script_name, NONE));
+        SetLocalPropertyNoThrow(stackFrame, script_key, script_name);
       }
 
       if (options & StackTrace::kScriptNameOrSourceURL) {
@@ -652,10 +604,8 @@
         if (caught_exception) {
           result = factory()->undefined_value();
         }
-        CHECK_NOT_EMPTY_HANDLE(this,
-                               JSObject::SetLocalPropertyIgnoreAttributes(
-                                   stack_frame, script_name_or_source_url_key,
-                                   result, NONE));
+        SetLocalPropertyNoThrow(stackFrame, script_name_or_source_url_key,
+                                result);
       }
 
       if (options & StackTrace::kFunctionName) {
@@ -663,30 +613,23 @@
         if (fun_name->ToBoolean()->IsFalse()) {
           fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
         }
-        CHECK_NOT_EMPTY_HANDLE(this,
-                               JSObject::SetLocalPropertyIgnoreAttributes(
-                                   stack_frame, function_key, fun_name, NONE));
+        SetLocalPropertyNoThrow(stackFrame, function_key, fun_name);
       }
 
       if (options & StackTrace::kIsEval) {
         int type = Smi::cast(script->compilation_type())->value();
         Handle<Object> is_eval = (type == Script::COMPILATION_TYPE_EVAL) ?
             factory()->true_value() : factory()->false_value();
-        CHECK_NOT_EMPTY_HANDLE(this,
-                               JSObject::SetLocalPropertyIgnoreAttributes(
-                                   stack_frame, eval_key, is_eval, NONE));
+        SetLocalPropertyNoThrow(stackFrame, eval_key, is_eval);
       }
 
       if (options & StackTrace::kIsConstructor) {
         Handle<Object> is_constructor = (frames[i].is_constructor()) ?
             factory()->true_value() : factory()->false_value();
-        CHECK_NOT_EMPTY_HANDLE(this,
-                               JSObject::SetLocalPropertyIgnoreAttributes(
-                                   stack_frame, constructor_key,
-                                   is_constructor, NONE));
+        SetLocalPropertyNoThrow(stackFrame, constructor_key, is_constructor);
       }
 
-      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
+      FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
       frames_seen++;
     }
     it.Advance();
@@ -791,12 +734,10 @@
   HandleScope scope;
   Handle<JSObject> receiver_handle(receiver);
   Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
-  { VMState state(this, EXTERNAL);
-    thread_local_top()->failed_access_check_callback_(
-      v8::Utils::ToLocal(receiver_handle),
-      type,
-      v8::Utils::ToLocal(data));
-  }
+  thread_local_top()->failed_access_check_callback_(
+    v8::Utils::ToLocal(receiver_handle),
+    type,
+    v8::Utils::ToLocal(data));
 }
 
 
@@ -1041,7 +982,7 @@
   // Find the top-most try-catch handler.
   StackHandler* handler =
       StackHandler::FromAddress(Isolate::handler(thread_local_top()));
-  while (handler != NULL && !handler->is_catch()) {
+  while (handler != NULL && !handler->is_try_catch()) {
     handler = handler->next();
   }
 
@@ -1067,39 +1008,22 @@
 }
 
 
-bool Isolate::IsErrorObject(Handle<Object> obj) {
-  if (!obj->IsJSObject()) return false;
-
-  String* error_key = *(factory()->LookupAsciiSymbol("$Error"));
-  Object* error_constructor =
-      js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
-
-  for (Object* prototype = *obj; !prototype->IsNull();
-       prototype = prototype->GetPrototype()) {
-    if (!prototype->IsJSObject()) return false;
-    if (JSObject::cast(prototype)->map()->constructor() == error_constructor) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-void Isolate::DoThrow(Object* exception, MessageLocation* location) {
+void Isolate::DoThrow(MaybeObject* exception, MessageLocation* location) {
   ASSERT(!has_pending_exception());
 
   HandleScope scope;
-  Handle<Object> exception_handle(exception);
+  Object* exception_object = Smi::FromInt(0);
+  bool is_object = exception->ToObject(&exception_object);
+  Handle<Object> exception_handle(exception_object);
 
   // Determine reporting and whether the exception is caught externally.
   bool catchable_by_javascript = is_catchable_by_javascript(exception);
+  // Only real objects can be caught by JS.
+  ASSERT(!catchable_by_javascript || is_object);
   bool can_be_caught_externally = false;
   bool should_report_exception =
       ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
   bool report_exception = catchable_by_javascript && should_report_exception;
-  bool try_catch_needs_message =
-      can_be_caught_externally && try_catch_handler()->capture_message_;
-  bool bootstrapping = bootstrapper()->IsActive();
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger of exception.
@@ -1108,74 +1032,63 @@
   }
 #endif
 
-  // Generate the message if required.
+  // Generate the message.
+  Handle<Object> message_obj;
+  MessageLocation potential_computed_location;
+  bool try_catch_needs_message =
+      can_be_caught_externally &&
+      try_catch_handler()->capture_message_;
   if (report_exception || try_catch_needs_message) {
-    MessageLocation potential_computed_location;
     if (location == NULL) {
-      // If no location was specified we use a computed one instead.
+      // If no location was specified we use a computed one instead
       ComputeLocation(&potential_computed_location);
       location = &potential_computed_location;
     }
-    // It's not safe to try to make message objects or collect stack traces
-    // while the bootstrapper is active since the infrastructure may not have
-    // been properly initialized.
-    if (!bootstrapping) {
+    if (!bootstrapper()->IsActive()) {
+      // It's not safe to try to make message objects or collect stack
+      // traces while the bootstrapper is active since the infrastructure
+      // may not have been properly initialized.
       Handle<String> stack_trace;
       if (FLAG_trace_exception) stack_trace = StackTraceString();
       Handle<JSArray> stack_trace_object;
-      if (capture_stack_trace_for_uncaught_exceptions_) {
-        if (IsErrorObject(exception_handle)) {
-          // We fetch the stack trace that corresponds to this error object.
-          String* key = heap()->hidden_stack_trace_symbol();
-          Object* stack_property =
-              JSObject::cast(*exception_handle)->GetHiddenProperty(key);
-          // Property lookup may have failed.  In this case it's probably not
-          // a valid Error object.
-          if (stack_property->IsJSArray()) {
-            stack_trace_object = Handle<JSArray>(JSArray::cast(stack_property));
-          }
-        }
-        if (stack_trace_object.is_null()) {
-          // Not an error object, we capture at throw site.
+      if (report_exception && capture_stack_trace_for_uncaught_exceptions_) {
           stack_trace_object = CaptureCurrentStackTrace(
               stack_trace_for_uncaught_exceptions_frame_limit_,
               stack_trace_for_uncaught_exceptions_options_);
-        }
       }
-      Handle<Object> message_obj = MessageHandler::MakeMessageObject(
-          "uncaught_exception",
-          location,
-          HandleVector<Object>(&exception_handle, 1),
-          stack_trace,
+      ASSERT(is_object);  // Can't use the handle unless there's a real object.
+      message_obj = MessageHandler::MakeMessageObject("uncaught_exception",
+          location, HandleVector<Object>(&exception_handle, 1), stack_trace,
           stack_trace_object);
-      thread_local_top()->pending_message_obj_ = *message_obj;
-      if (location != NULL) {
-        thread_local_top()->pending_message_script_ = *location->script();
-        thread_local_top()->pending_message_start_pos_ = location->start_pos();
-        thread_local_top()->pending_message_end_pos_ = location->end_pos();
-      }
-    } else if (location != NULL && !location->script().is_null()) {
-      // We are bootstrapping and caught an error where the location is set
-      // and we have a script for the location.
-      // In this case we could have an extension (or an internal error
-      // somewhere) and we print out the line number at which the error occured
-      // to the console for easier debugging.
-      int line_number = GetScriptLineNumberSafe(location->script(),
-                                                location->start_pos());
-      OS::PrintError("Extension or internal compilation error at line %d.\n",
-                     line_number);
     }
   }
 
   // Save the message for reporting if the the exception remains uncaught.
   thread_local_top()->has_pending_message_ = report_exception;
+  if (!message_obj.is_null()) {
+    thread_local_top()->pending_message_obj_ = *message_obj;
+    if (location != NULL) {
+      thread_local_top()->pending_message_script_ = *location->script();
+      thread_local_top()->pending_message_start_pos_ = location->start_pos();
+      thread_local_top()->pending_message_end_pos_ = location->end_pos();
+    }
+  }
 
   // Do not forget to clean catcher_ if currently thrown exception cannot
   // be caught.  If necessary, ReThrow will update the catcher.
   thread_local_top()->catcher_ = can_be_caught_externally ?
       try_catch_handler() : NULL;
 
-  set_pending_exception(*exception_handle);
+  // NOTE: Notifying the debugger or generating the message
+  // may have caused new exceptions. For now, we just ignore
+  // that and set the pending exception to the original one.
+  if (is_object) {
+    set_pending_exception(*exception_handle);
+  } else {
+    // Failures are not on the heap so they neither need nor work with handles.
+    ASSERT(exception_handle->IsFailure());
+    set_pending_exception(exception);
+  }
 }
 
 
@@ -1211,8 +1124,8 @@
   StackHandler* handler =
       StackHandler::FromAddress(Isolate::handler(thread_local_top()));
   while (handler != NULL && handler->address() < external_handler_address) {
-    ASSERT(!handler->is_catch());
-    if (handler->is_finally()) return false;
+    ASSERT(!handler->is_try_catch());
+    if (handler->is_try_finally()) return false;
 
     handler = handler->next();
   }
@@ -1267,7 +1180,7 @@
   ASSERT(has_pending_exception());
   PropagatePendingExceptionToExternalTryCatch();
 
-  // Always reschedule out of memory exceptions.
+  // Allways reschedule out of memory exceptions.
   if (!is_out_of_memory()) {
     bool is_termination_exception =
         pending_exception() == heap_.termination_exception();
@@ -1371,9 +1284,6 @@
   memcpy(to, reinterpret_cast<char*>(thread_local_top()),
          sizeof(ThreadLocalTop));
   InitializeThreadLocal();
-  clear_pending_exception();
-  clear_pending_message();
-  clear_scheduled_exception();
   return to + sizeof(ThreadLocalTop);
 }
 
@@ -1493,18 +1403,14 @@
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
-      inner_pointer_to_code_cache_(NULL),
+      pc_to_code_cache_(NULL),
       write_input_buffer_(NULL),
       global_handles_(NULL),
       context_switcher_(NULL),
       thread_manager_(NULL),
-      fp_stubs_generated_(false),
-      has_installed_extensions_(false),
       string_tracker_(NULL),
       regexp_stack_(NULL),
-      date_cache_(NULL),
-      embedder_data_(NULL),
-      context_exit_happened_(false) {
+      embedder_data_(NULL) {
   TRACE_ISOLATE(constructor);
 
   memset(isolate_addresses_, 0,
@@ -1564,8 +1470,8 @@
 
   Deinit();
 
-  { ScopedLock lock(global_state.Pointer()->mutex);
-    global_state.Pointer()->thread_data_table->RemoveAllThreads(this);
+  { ScopedLock lock(process_wide_mutex_);
+    thread_data_table_->RemoveAllThreads(this);
   }
 
   if (!IsDefaultIsolate()) {
@@ -1618,9 +1524,8 @@
 
 void Isolate::SetIsolateThreadLocals(Isolate* isolate,
                                      PerIsolateThreadData* data) {
-  const GlobalState& global = global_state.Get();
-  Thread::SetThreadLocal(global.isolate_key, isolate);
-  Thread::SetThreadLocal(global.per_isolate_thread_data_key, data);
+  Thread::SetThreadLocal(isolate_key_, isolate);
+  Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
 }
 
 
@@ -1636,9 +1541,6 @@
   delete unicode_cache_;
   unicode_cache_ = NULL;
 
-  delete date_cache_;
-  date_cache_ = NULL;
-
   delete regexp_stack_;
   regexp_stack_ = NULL;
 
@@ -1673,8 +1575,8 @@
   compilation_cache_ = NULL;
   delete bootstrapper_;
   bootstrapper_ = NULL;
-  delete inner_pointer_to_code_cache_;
-  inner_pointer_to_code_cache_ = NULL;
+  delete pc_to_code_cache_;
+  pc_to_code_cache_ = NULL;
   delete write_input_buffer_;
   write_input_buffer_ = NULL;
 
@@ -1708,6 +1610,9 @@
 void Isolate::InitializeThreadLocal() {
   thread_local_top_.isolate_ = this;
   thread_local_top_.Initialize();
+  clear_pending_exception();
+  clear_pending_message();
+  clear_scheduled_exception();
 }
 
 
@@ -1795,7 +1700,7 @@
   context_slot_cache_ = new ContextSlotCache();
   descriptor_lookup_cache_ = new DescriptorLookupCache();
   unicode_cache_ = new UnicodeCache();
-  inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
+  pc_to_code_cache_ = new PcToCodeCache(this);
   write_input_buffer_ = new StringInputBuffer();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
@@ -1803,13 +1708,12 @@
   stub_cache_ = new StubCache(this);
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
-  date_cache_ = new DateCache();
 
   // Enable logging before setting up the heap
-  logger_->SetUp();
+  logger_->Setup();
 
-  CpuProfiler::SetUp();
-  HeapProfiler::SetUp();
+  CpuProfiler::Setup();
+  HeapProfiler::Setup();
 
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
@@ -1826,10 +1730,10 @@
     stack_guard_.InitThread(lock);
   }
 
-  // SetUp the object heap.
+  // Setup the object heap.
   const bool create_heap_objects = (des == NULL);
-  ASSERT(!heap_.HasBeenSetUp());
-  if (!heap_.SetUp(create_heap_objects)) {
+  ASSERT(!heap_.HasBeenSetup());
+  if (!heap_.Setup(create_heap_objects)) {
     V8::SetFatalError();
     return false;
   }
@@ -1837,7 +1741,7 @@
   InitializeThreadLocal();
 
   bootstrapper_->Initialize(create_heap_objects);
-  builtins_.SetUp(create_heap_objects);
+  builtins_.Setup(create_heap_objects);
 
   // Only preallocate on the first initialization.
   if (FLAG_preallocate_message_memory && preallocated_message_space_ == NULL) {
@@ -1856,19 +1760,15 @@
   }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  debug_->SetUp(create_heap_objects);
+  debug_->Setup(create_heap_objects);
 #endif
+  stub_cache_->Initialize(create_heap_objects);
 
   // If we are deserializing, read the state into the now-empty heap.
   if (des != NULL) {
     des->Deserialize();
+    stub_cache_->Clear();
   }
-  stub_cache_->Initialize();
-
-  // Finish initialization of ThreadLocal after deserialization is done.
-  clear_pending_exception();
-  clear_pending_message();
-  clear_scheduled_exception();
 
   // Deserializing may put strange things in the root array's copy of the
   // stack guard.
@@ -1876,7 +1776,7 @@
 
   deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
-  runtime_profiler_->SetUp();
+  runtime_profiler_->Setup();
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
@@ -1887,7 +1787,6 @@
   }
 
   state_ = INITIALIZED;
-  time_millis_at_init_ = OS::TimeCurrentMillis();
   return true;
 }
 
diff --git a/src/isolate.h b/src/isolate.h
index 0c5a54c..2582da6 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,10 +36,8 @@
 #include "contexts.h"
 #include "execution.h"
 #include "frames.h"
-#include "date.h"
 #include "global-handles.h"
 #include "handles.h"
-#include "hashmap.h"
 #include "heap.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
@@ -68,7 +66,7 @@
 class HeapProfiler;
 class InlineRuntimeFunctionsTable;
 class NoAllocationStringAllocator;
-class InnerPointerToCodeCache;
+class PcToCodeCache;
 class PreallocatedMemoryThread;
 class RegExpStack;
 class SaveContext;
@@ -108,28 +106,15 @@
 // of handles to the actual constants.
 typedef ZoneList<Handle<Object> > ZoneObjectList;
 
-#define RETURN_IF_SCHEDULED_EXCEPTION(isolate)            \
-  do {                                                    \
-    Isolate* __isolate__ = (isolate);                     \
-    if (__isolate__->has_scheduled_exception()) {         \
-      return __isolate__->PromoteScheduledException();    \
-    }                                                     \
-  } while (false)
+#define RETURN_IF_SCHEDULED_EXCEPTION(isolate)    \
+  if (isolate->has_scheduled_exception())         \
+      return isolate->PromoteScheduledException()
 
 #define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
-  do {                                                     \
-    if ((call).is_null()) {                                \
-      ASSERT((isolate)->has_pending_exception());          \
-      return (value);                                      \
-    }                                                      \
-  } while (false)
-
-#define CHECK_NOT_EMPTY_HANDLE(isolate, call)     \
-  do {                                            \
-    ASSERT(!(isolate)->has_pending_exception());  \
-    CHECK(!(call).is_null());                     \
-    CHECK(!(isolate)->has_pending_exception());   \
-  } while (false)
+  if (call.is_null()) {                                    \
+    ASSERT(isolate->has_pending_exception());              \
+    return value;                                          \
+  }
 
 #define RETURN_IF_EMPTY_HANDLE(isolate, call)                       \
   RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
@@ -260,7 +245,7 @@
 #endif
 #endif  // USE_SIMULATOR
 
-  Address js_entry_sp_;  // the stack pointer of the bottom JS entry frame
+  Address js_entry_sp_;  // the stack pointer of the bottom js entry frame
   Address external_callback_;  // the external callback we're currently in
   StateTag current_vm_state_;
 
@@ -270,9 +255,6 @@
   // Call back function to report unsafe JS accesses.
   v8::FailedAccessCheckCallback failed_access_check_callback_;
 
-  // Head of the list of live LookupResults.
-  LookupResult* top_lookup_result_;
-
   // Whether out of memory exceptions should be ignored.
   bool ignore_out_of_memory_;
 
@@ -282,6 +264,23 @@
   Address try_catch_handler_address_;
 };
 
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
+
+#define ISOLATE_PLATFORM_INIT_LIST(V)                                          \
+  /* VirtualFrame::SpilledScope state */                                       \
+  V(bool, is_virtual_frame_in_spilled_scope, false)                            \
+  /* CodeGenerator::EmitNamedStore state */                                    \
+  V(int, inlined_write_barrier_size, -1)
+
+#if !defined(__arm__) && !defined(__mips__)
+class HashMap;
+#endif
+
+#else
+
+#define ISOLATE_PLATFORM_INIT_LIST(V)
+
+#endif
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
@@ -312,12 +311,15 @@
   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
   V(int, suffix_table, (kBMMaxShift + 1))                                      \
+  V(uint32_t, random_seed, 2)                                                  \
   V(uint32_t, private_random_seed, 2)                                          \
   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
 
 typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache;
 
 #define ISOLATE_INIT_LIST(V)                                                   \
+  /* AssertNoZoneAllocation state. */                                          \
+  V(bool, zone_allow_allocation, true)                                         \
   /* SerializerDeserializer state. */                                          \
   V(int, serialize_partial_snapshot_cache_length, 0)                           \
   /* Assembler state. */                                                       \
@@ -345,13 +347,14 @@
   /* Serializer state. */                                                      \
   V(ExternalReferenceTable*, external_reference_table, NULL)                   \
   /* AstNode state. */                                                         \
-  V(int, ast_node_id, 0)                                                       \
+  V(unsigned, ast_node_id, 0)                                                  \
   V(unsigned, ast_node_count, 0)                                               \
   /* SafeStackFrameIterator activations count. */                              \
   V(int, safe_stack_iterator_counter, 0)                                       \
   V(uint64_t, enabled_cpu_features, 0)                                         \
   V(CpuProfiler*, cpu_profiler, NULL)                                          \
   V(HeapProfiler*, heap_profiler, NULL)                                        \
+  ISOLATE_PLATFORM_INIT_LIST(V)                                                \
   ISOLATE_DEBUGGER_INIT_LIST(V)
 
 class Isolate {
@@ -430,25 +433,19 @@
   // not currently set).
   static PerIsolateThreadData* CurrentPerIsolateThreadData() {
     return reinterpret_cast<PerIsolateThreadData*>(
-        Thread::GetThreadLocal(per_isolate_thread_data_key()));
+        Thread::GetThreadLocal(per_isolate_thread_data_key_));
   }
 
   // Returns the isolate inside which the current thread is running.
   INLINE(static Isolate* Current()) {
-    const Thread::LocalStorageKey key = isolate_key();
     Isolate* isolate = reinterpret_cast<Isolate*>(
-        Thread::GetExistingThreadLocal(key));
-    if (!isolate) {
-      EnsureDefaultIsolate();
-      isolate = reinterpret_cast<Isolate*>(
-          Thread::GetExistingThreadLocal(key));
-    }
+        Thread::GetExistingThreadLocal(isolate_key_));
     ASSERT(isolate != NULL);
     return isolate;
   }
 
   INLINE(static Isolate* UncheckedCurrent()) {
-    return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key()));
+    return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
   }
 
   // Usually called by Init(), but can be called early e.g. to allow
@@ -470,10 +467,10 @@
   // for legacy API reasons.
   void TearDown();
 
-  bool IsDefaultIsolate() const;
+  bool IsDefaultIsolate() const { return this == default_isolate_; }
 
   // Ensures that process-wide resources and the default isolate have been
-  // allocated. It is only necessary to call this method in rare cases, for
+  // allocated. It is only necessary to call this method in rare casses, for
   // example if you are using V8 from within the body of a static initializer.
   // Safe to call multiple times.
   static void EnsureDefaultIsolate();
@@ -495,12 +492,14 @@
   // Returns the key used to store the pointer to the current isolate.
   // Used internally for V8 threads that do not execute JavaScript but still
   // are part of the domain of an isolate (like the context switcher).
-  static Thread::LocalStorageKey isolate_key();
+  static Thread::LocalStorageKey isolate_key() {
+    return isolate_key_;
+  }
 
   // Returns the key used to store process-wide thread IDs.
-  static Thread::LocalStorageKey thread_id_key();
-
-  static Thread::LocalStorageKey per_isolate_thread_data_key();
+  static Thread::LocalStorageKey thread_id_key() {
+    return thread_id_key_;
+  }
 
   // If a client attempts to create a Locker without specifying an isolate,
   // we assume that the client is using legacy behavior. Set up the current
@@ -621,7 +620,7 @@
   void* formal_count_address() { return &thread_local_top_.formal_count_; }
 
   // Returns the global object of the current context. It could be
-  // a builtin object, or a JS global object.
+  // a builtin object, or a js global object.
   Handle<GlobalObject> global() {
     return Handle<GlobalObject>(context()->global());
   }
@@ -689,8 +688,6 @@
       int frame_limit,
       StackTrace::StackTraceOptions options);
 
-  void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
-
   // Returns if the top context may access the given global object. If
   // the result is false, the pending exception is guaranteed to be
   // set.
@@ -717,7 +714,7 @@
 
   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
   Failure* PromoteScheduledException();
-  void DoThrow(Object* exception, MessageLocation* location);
+  void DoThrow(MaybeObject* exception, MessageLocation* location);
   // Checks if exception should be reported and finds out if it's
   // caught externally.
   bool ShouldReportException(bool* can_be_caught_externally,
@@ -844,9 +841,7 @@
     return unicode_cache_;
   }
 
-  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
-    return inner_pointer_to_code_cache_;
-  }
+  PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
 
   StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
 
@@ -884,24 +879,12 @@
 
   RuntimeState* runtime_state() { return &runtime_state_; }
 
-  void set_fp_stubs_generated(bool value) {
-    fp_stubs_generated_ = value;
-  }
-
-  bool fp_stubs_generated() { return fp_stubs_generated_; }
-
   StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
     return &compiler_safe_string_input_buffer_;
   }
 
   Builtins* builtins() { return &builtins_; }
 
-  void NotifyExtensionInstalled() {
-    has_installed_extensions_ = true;
-  }
-
-  bool has_installed_extensions() { return has_installed_extensions_; }
-
   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
       regexp_macro_assembler_canonicalize() {
     return &regexp_macro_assembler_canonicalize_;
@@ -929,7 +912,6 @@
   }
 #endif
 
-  inline bool IsDebuggerActive();
   inline bool DebuggerHasBreakPoints();
 
 #ifdef DEBUG
@@ -1005,41 +987,9 @@
   void SetData(void* data) { embedder_data_ = data; }
   void* GetData() { return embedder_data_; }
 
-  LookupResult* top_lookup_result() {
-    return thread_local_top_.top_lookup_result_;
-  }
-  void SetTopLookupResult(LookupResult* top) {
-    thread_local_top_.top_lookup_result_ = top;
-  }
-
-  bool context_exit_happened() {
-    return context_exit_happened_;
-  }
-  void set_context_exit_happened(bool context_exit_happened) {
-    context_exit_happened_ = context_exit_happened;
-  }
-
-  double time_millis_since_init() {
-    return OS::TimeCurrentMillis() - time_millis_at_init_;
-  }
-
-  DateCache* date_cache() {
-    return date_cache_;
-  }
-
-  void set_date_cache(DateCache* date_cache) {
-    if (date_cache != date_cache_) {
-      delete date_cache_;
-    }
-    date_cache_ = date_cache;
-  }
-
  private:
   Isolate();
 
-  friend struct GlobalState;
-  friend struct InitializeGlobalState;
-
   // The per-process lock should be acquired before the ThreadDataTable is
   // modified.
   class ThreadDataTable {
@@ -1078,10 +1028,19 @@
     Isolate* previous_isolate;
     EntryStackItem* previous_item;
 
-   private:
     DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
   };
 
+  // This mutex protects highest_thread_id_, thread_data_table_ and
+  // default_isolate_.
+  static Mutex* process_wide_mutex_;
+
+  static Thread::LocalStorageKey per_isolate_thread_data_key_;
+  static Thread::LocalStorageKey isolate_key_;
+  static Thread::LocalStorageKey thread_id_key_;
+  static Isolate* default_isolate_;
+  static ThreadDataTable* thread_data_table_;
+
   void Deinit();
 
   static void SetIsolateThreadLocals(Isolate* isolate,
@@ -1103,7 +1062,7 @@
   // If one does not yet exist, allocate a new one.
   PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
 
-  // PreInits and returns a default isolate. Needed when a new thread tries
+// PreInits and returns a default isolate. Needed when a new thread tries
   // to create a Locker for the first time (the lock itself is in the isolate).
   static Isolate* GetDefaultIsolateForLocking();
 
@@ -1134,10 +1093,6 @@
 
   void InitializeDebugger();
 
-  // Traverse prototype chain to find out whether the object is derived from
-  // the Error object.
-  bool IsErrorObject(Handle<Object> obj);
-
   int stack_trace_nesting_level_;
   StringStream* incomplete_message_;
   // The preallocated memory thread singleton.
@@ -1175,16 +1130,14 @@
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
-  InnerPointerToCodeCache* inner_pointer_to_code_cache_;
+  PcToCodeCache* pc_to_code_cache_;
   StringInputBuffer* write_input_buffer_;
   GlobalHandles* global_handles_;
   ContextSwitcher* context_switcher_;
   ThreadManager* thread_manager_;
   RuntimeState runtime_state_;
-  bool fp_stubs_generated_;
   StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
   Builtins builtins_;
-  bool has_installed_extensions_;
   StringTracker* string_tracker_;
   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
@@ -1194,19 +1147,9 @@
   unibrow::Mapping<unibrow::Ecma262Canonicalize>
       regexp_macro_assembler_canonicalize_;
   RegExpStack* regexp_stack_;
-
-  DateCache* date_cache_;
-
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
   void* embedder_data_;
 
-  // The garbage collector should be a little more aggressive when it knows
-  // that a context was recently exited.
-  bool context_exit_happened_;
-
-  // Time stamp at initialization.
-  double time_millis_at_init_;
-
 #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
     defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
   bool simulator_initialized_;
@@ -1267,7 +1210,19 @@
 // versions of GCC. See V8 issue 122 for details.
 class SaveContext BASE_EMBEDDED {
  public:
-  inline explicit SaveContext(Isolate* isolate);
+  explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+    if (isolate->context() != NULL) {
+      context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+      dummy_ = Handle<Context>(isolate->context());
+#endif
+    }
+    isolate->set_save_context(this);
+
+    // If there is no JS frame under the current C frame, use the value 0.
+    JavaScriptFrameIterator it(isolate);
+    js_sp_ = it.done() ? 0 : it.frame()->sp();
+  }
 
   ~SaveContext() {
     if (context_.is_null()) {
@@ -1285,8 +1240,8 @@
   SaveContext* prev() { return prev_; }
 
   // Returns true if this save context is below a given JavaScript frame.
-  bool IsBelowFrame(JavaScriptFrame* frame) {
-    return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
+  bool below(JavaScriptFrame* frame) {
+    return (js_sp_ == 0) || (frame->sp() < js_sp_);
   }
 
  private:
@@ -1295,7 +1250,7 @@
   Handle<Context> dummy_;
 #endif
   SaveContext* prev_;
-  Address c_entry_fp_;
+  Address js_sp_;  // The top JS frame's sp when saving context.
 };
 
 
diff --git a/src/json-parser.h b/src/json-parser.h
index d22cd0d..68eab65 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -130,7 +130,7 @@
   // An object literal is a squiggly-braced and comma separated sequence
   // (possibly empty) of key/value pairs, where the key is a JSON string
   // literal, the value is a JSON value, and the two are separated by a colon.
-  // A JSON array doesn't allow numbers and identifiers as keys, like a
+  // A JSON array dosn't allow numbers and identifiers as keys, like a
   // JavaScript array.
   Handle<Object> ParseJsonObject();
 
@@ -165,7 +165,7 @@
 
 template <bool seq_ascii>
 Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
-  isolate_ = source->map()->GetHeap()->isolate();
+  isolate_ = source->map()->isolate();
   FlattenString(source);
   source_ = source;
   source_length_ = source_->length();
@@ -177,7 +177,7 @@
 
   // Set initial position right before the string.
   position_ = -1;
-  // Advance to the first character (possibly EOS)
+  // Advance to the first character (posibly EOS)
   AdvanceSkipWhitespace();
   Handle<Object> result = ParseJsonValue();
   if (result.is_null() || c0_ != kEndOfString) {
@@ -303,12 +303,11 @@
 
       uint32_t index;
       if (key->AsArrayIndex(&index)) {
-        JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
+        SetOwnElement(json_object, index, value, kNonStrictMode);
       } else if (key->Equals(isolate()->heap()->Proto_symbol())) {
         SetPrototype(json_object, value);
       } else {
-        JSObject::SetLocalPropertyIgnoreAttributes(
-            json_object, key, value, NONE);
+        SetLocalPropertyIgnoreAttributes(json_object, key, value, NONE);
       }
     } while (MatchSkipWhiteSpace(','));
     if (c0_ != '}') {
diff --git a/src/json.js b/src/json.js
index ccef445..deba126 100644
--- a/src/json.js
+++ b/src/json.js
@@ -345,4 +345,4 @@
   ));
 }
 
-SetUpJSON();
+SetUpJSON()
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 8ccbae4..3ebfbdf 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -68,9 +68,9 @@
                                                Handle<String> flags,
                                                bool* has_pending_exception) {
   // Call the construct code with 2 arguments.
-  Handle<Object> argv[] = { pattern, flags };
-  return Execution::New(constructor, ARRAY_SIZE(argv), argv,
-                        has_pending_exception);
+  Object** argv[2] = { Handle<Object>::cast(pattern).location(),
+                       Handle<Object>::cast(flags).location() };
+  return Execution::New(constructor, 2, argv, has_pending_exception);
 }
 
 
@@ -175,8 +175,7 @@
     case JSRegExp::IRREGEXP: {
       Handle<Object> result =
           IrregexpExec(regexp, subject, index, last_match_info);
-      ASSERT(!result.is_null() ||
-             regexp->GetIsolate()->has_pending_exception());
+      ASSERT(!result.is_null() || Isolate::Current()->has_pending_exception());
       return result;
     }
     default:
@@ -510,16 +509,14 @@
   }
   Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
 
-  IrregexpResult result = IrregexpInterpreter::Match(isolate,
-                                                     byte_codes,
-                                                     subject,
-                                                     register_vector,
-                                                     index);
-  if (result == RE_EXCEPTION) {
-    ASSERT(!isolate->has_pending_exception());
-    isolate->StackOverflow();
+  if (IrregexpInterpreter::Match(isolate,
+                                 byte_codes,
+                                 subject,
+                                 register_vector,
+                                 index)) {
+    return RE_SUCCESS;
   }
-  return result;
+  return RE_FAILURE;
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -528,7 +525,6 @@
                                         Handle<String> subject,
                                         int previous_index,
                                         Handle<JSArray> last_match_info) {
-  Isolate* isolate = jsregexp->GetIsolate();
   ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
 
   // Prepare space for the return values.
@@ -544,11 +540,11 @@
   int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
   if (required_registers < 0) {
     // Compiling failed with an exception.
-    ASSERT(isolate->has_pending_exception());
+    ASSERT(Isolate::Current()->has_pending_exception());
     return Handle<Object>::null();
   }
 
-  OffsetsVector registers(required_registers, isolate);
+  OffsetsVector registers(required_registers);
 
   IrregexpResult res = RegExpImpl::IrregexpExecOnce(
       jsregexp, subject, previous_index, Vector<int>(registers.vector(),
@@ -570,11 +566,11 @@
     return last_match_info;
   }
   if (res == RE_EXCEPTION) {
-    ASSERT(isolate->has_pending_exception());
+    ASSERT(Isolate::Current()->has_pending_exception());
     return Handle<Object>::null();
   }
   ASSERT(res == RE_FAILURE);
-  return isolate->factory()->null_value();
+  return Isolate::Current()->factory()->null_value();
 }
 
 
@@ -706,7 +702,7 @@
 //   the virtualized backtrack stack and some register changes.  When a node is
 //   to be emitted it can flush the Trace or update it.  Flushing the Trace
 //   will emit code to bring the actual state into line with the virtual state.
-//   Avoiding flushing the state can postpone some work (e.g. updates of capture
+//   Avoiding flushing the state can postpone some work (eg updates of capture
 //   registers).  Postponing work can save time when executing the regular
 //   expression since it may be found that the work never has to be done as a
 //   failure to match can occur.  In addition it is much faster to jump to a
@@ -1444,7 +1440,7 @@
   if (ascii) {
     char_mask = String::kMaxAsciiCharCode;
   } else {
-    char_mask = String::kMaxUtf16CodeUnit;
+    char_mask = String::kMaxUC16CharCode;
   }
   uc16 exor = c1 ^ c2;
   // Check whether exor has only one bit set.
@@ -1546,7 +1542,7 @@
   if (ascii) {
     max_char = String::kMaxAsciiCharCode;
   } else {
-    max_char = String::kMaxUtf16CodeUnit;
+    max_char = String::kMaxUC16CharCode;
   }
 
   Label success;
@@ -1642,7 +1638,7 @@
         macro_assembler->CheckCharacterLT(from, on_failure);
       }
     }
-    if (to != String::kMaxUtf16CodeUnit) {
+    if (to != String::kMaxUC16CharCode) {
       if (cc->is_negated()) {
         macro_assembler->CheckCharacterLT(to + 1, on_failure);
       } else {
@@ -1835,7 +1831,7 @@
   if (asc) {
     char_mask = String::kMaxAsciiCharCode;
   } else {
-    char_mask = String::kMaxUtf16CodeUnit;
+    char_mask = String::kMaxUC16CharCode;
   }
   mask_ = 0;
   value_ = 0;
@@ -1887,7 +1883,7 @@
     if (compiler->ascii()) {
       char_mask = String::kMaxAsciiCharCode;
     } else {
-      char_mask = String::kMaxUtf16CodeUnit;
+      char_mask = String::kMaxUC16CharCode;
     }
     if ((mask & char_mask) == char_mask) need_mask = false;
     mask &= char_mask;
@@ -1939,7 +1935,7 @@
   if (compiler->ascii()) {
     char_mask = String::kMaxAsciiCharCode;
   } else {
-    char_mask = String::kMaxUtf16CodeUnit;
+    char_mask = String::kMaxUC16CharCode;
   }
   for (int k = 0; k < elms_->length(); k++) {
     TextElement elm = elms_->at(k);
@@ -2638,7 +2634,7 @@
     TextElement elm = elms_->at(i);
     if (elm.type == TextElement::CHAR_CLASS) {
       RegExpCharacterClass* cc = elm.data.u_char_class;
-      // None of the standard character classes is different in the case
+      // None of the standard character classses is different in the case
       // independent case and it slows us down if we don't know that.
       if (cc->is_standard()) continue;
       ZoneList<CharacterRange>* ranges = cc->ranges();
@@ -3599,20 +3595,22 @@
 // -------------------------------------------------------------------
 // Tree to graph conversion
 
-static const uc16 kSpaceRanges[] = { 0x0009, 0x000D, 0x0020, 0x0020, 0x00A0,
-    0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A, 0x2028, 0x2029,
-    0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000, 0xFEFF, 0xFEFF };
-static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
+static const int kSpaceRangeCount = 20;
+static const int kSpaceRangeAsciiCount = 4;
+static const uc16 kSpaceRanges[kSpaceRangeCount] = { 0x0009, 0x000D, 0x0020,
+    0x0020, 0x00A0, 0x00A0, 0x1680, 0x1680, 0x180E, 0x180E, 0x2000, 0x200A,
+    0x2028, 0x2029, 0x202F, 0x202F, 0x205F, 0x205F, 0x3000, 0x3000 };
 
-static const uc16 kWordRanges[] = { '0', '9', 'A', 'Z', '_', '_', 'a', 'z' };
-static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
+static const int kWordRangeCount = 8;
+static const uc16 kWordRanges[kWordRangeCount] = { '0', '9', 'A', 'Z', '_',
+    '_', 'a', 'z' };
 
-static const uc16 kDigitRanges[] = { '0', '9' };
-static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
+static const int kDigitRangeCount = 2;
+static const uc16 kDigitRanges[kDigitRangeCount] = { '0', '9' };
 
-static const uc16 kLineTerminatorRanges[] = { 0x000A, 0x000A, 0x000D, 0x000D,
-    0x2028, 0x2029 };
-static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
+static const int kLineTerminatorRangeCount = 6;
+static const uc16 kLineTerminatorRanges[kLineTerminatorRangeCount] = { 0x000A,
+    0x000A, 0x000D, 0x000D, 0x2028, 0x2029 };
 
 RegExpNode* RegExpAtom::ToNode(RegExpCompiler* compiler,
                                RegExpNode* on_success) {
@@ -4079,7 +4077,7 @@
                             int elmc,
                             ZoneList<CharacterRange>* ranges) {
   ASSERT(elmv[0] != 0x0000);
-  ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
+  ASSERT(elmv[elmc-1] != String::kMaxUC16CharCode);
   uc16 last = 0x0000;
   for (int i = 0; i < elmc; i += 2) {
     ASSERT(last <= elmv[i] - 1);
@@ -4087,7 +4085,7 @@
     ranges->Add(CharacterRange(last, elmv[i] - 1));
     last = elmv[i + 1] + 1;
   }
-  ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit));
+  ranges->Add(CharacterRange(last, String::kMaxUC16CharCode));
 }
 
 
@@ -4633,8 +4631,8 @@
     from = range.to();
     i++;
   }
-  if (from < String::kMaxUtf16CodeUnit) {
-    negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit));
+  if (from < String::kMaxUC16CharCode) {
+    negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
   }
 }
 
@@ -4725,6 +4723,7 @@
 
 
 const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
+const DispatchTable::Entry DispatchTable::Config::kNoValue;
 
 
 void DispatchTable::AddRange(CharacterRange full_range, int value) {
@@ -4797,7 +4796,7 @@
       entry->AddValue(value);
       // Bail out if the last interval ended at 0xFFFF since otherwise
       // adding 1 will wrap around to 0.
-      if (entry->to() == String::kMaxUtf16CodeUnit)
+      if (entry->to() == String::kMaxUC16CharCode)
         break;
       ASSERT(entry->to() + 1 > current.from());
       current.set_from(entry->to() + 1);
@@ -5117,7 +5116,7 @@
         int new_length = length + 1;
         if (length > 0) {
           if (ranges->at(0).from() == 0) new_length--;
-          if (ranges->at(length - 1).to() == String::kMaxUtf16CodeUnit) {
+          if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
             new_length--;
           }
         }
@@ -5207,14 +5206,14 @@
     if (last < range.from())
       AddRange(CharacterRange(last, range.from() - 1));
     if (range.to() >= last) {
-      if (range.to() == String::kMaxUtf16CodeUnit) {
+      if (range.to() == String::kMaxUC16CharCode) {
         return;
       } else {
         last = range.to() + 1;
       }
     }
   }
-  AddRange(CharacterRange(last, String::kMaxUtf16CodeUnit));
+  AddRange(CharacterRange(last, String::kMaxUC16CharCode));
 }
 
 
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 8875de9..54297a4 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,17 +29,14 @@
 #define V8_JSREGEXP_H_
 
 #include "allocation.h"
-#include "assembler.h"
 #include "zone-inl.h"
 
 namespace v8 {
 namespace internal {
 
-class NodeVisitor;
-class RegExpCompiler;
+
 class RegExpMacroAssembler;
-class RegExpNode;
-class RegExpTree;
+
 
 class RegExpImpl {
  public:
@@ -391,7 +388,7 @@
     typedef uc16 Key;
     typedef Entry Value;
     static const uc16 kNoKey;
-    static const Entry NoValue() { return Value(); }
+    static const Entry kNoValue;
     static inline int Compare(uc16 a, uc16 b) {
       if (a == b)
         return 0;
@@ -636,7 +633,7 @@
   static const int kNodeIsTooComplexForGreedyLoops = -1;
   virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
   Label* label() { return &label_; }
-  // If non-generic code is generated for a node (i.e. the node is not at the
+  // If non-generic code is generated for a node (ie the node is not at the
   // start of the trace) then it cannot be reused.  This variable sets a limit
   // on how often we allow that to happen before we insist on starting a new
   // trace and generating generic code for a node that can be reused by flushing
@@ -1466,12 +1463,12 @@
 
 class OffsetsVector {
  public:
-  inline OffsetsVector(int num_registers, Isolate* isolate)
+  explicit inline OffsetsVector(int num_registers)
       : offsets_vector_length_(num_registers) {
     if (offsets_vector_length_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
       vector_ = NewArray<int>(offsets_vector_length_);
     } else {
-      vector_ = isolate->jsregexp_static_offsets_vector();
+      vector_ = Isolate::Current()->jsregexp_static_offsets_vector();
     }
   }
   inline ~OffsetsVector() {
diff --git a/src/lazy-instance.h b/src/lazy-instance.h
deleted file mode 100644
index 09dfe21..0000000
--- a/src/lazy-instance.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The LazyInstance<Type, Traits> class manages a single instance of Type,
-// which will be lazily created on the first time it's accessed.  This class is
-// useful for places you would normally use a function-level static, but you
-// need to have guaranteed thread-safety.  The Type constructor will only ever
-// be called once, even if two threads are racing to create the object.  Get()
-// and Pointer() will always return the same, completely initialized instance.
-//
-// LazyInstance is completely thread safe, assuming that you create it safely.
-// The class was designed to be POD initialized, so it shouldn't require a
-// static constructor.  It really only makes sense to declare a LazyInstance as
-// a global variable using the LAZY_INSTANCE_INITIALIZER initializer.
-//
-// LazyInstance is similar to Singleton, except it does not have the singleton
-// property.  You can have multiple LazyInstance's of the same type, and each
-// will manage a unique instance.  It also preallocates the space for Type, as
-// to avoid allocating the Type instance on the heap.  This may help with the
-// performance of creating the instance, and reducing heap fragmentation.  This
-// requires that Type be a complete type so we can determine the size. See
-// notes for advanced users below for more explanations.
-//
-// Example usage:
-//   static LazyInstance<MyClass>::type my_instance = LAZY_INSTANCE_INITIALIZER;
-//   void SomeMethod() {
-//     my_instance.Get().SomeMethod();  // MyClass::SomeMethod()
-//
-//     MyClass* ptr = my_instance.Pointer();
-//     ptr->DoDoDo();  // MyClass::DoDoDo
-//   }
-//
-// Additionally you can override the way your instance is constructed by
-// providing your own trait:
-// Example usage:
-//   struct MyCreateTrait {
-//     static void Construct(MyClass* allocated_ptr) {
-//       new (allocated_ptr) MyClass(/* extra parameters... */);
-//     }
-//   };
-//   static LazyInstance<MyClass, MyCreateTrait>::type my_instance =
-//      LAZY_INSTANCE_INITIALIZER;
-//
-// Notes for advanced users:
-// LazyInstance can actually be used in two different ways:
-//
-// - "Static mode" which is the default mode since it is the most efficient
-//   (no extra heap allocation). In this mode, the instance is statically
-//   allocated (stored in the global data section at compile time).
-//   The macro LAZY_STATIC_INSTANCE_INITIALIZER (= LAZY_INSTANCE_INITIALIZER)
-//   must be used to initialize static lazy instances.
-//
-// - "Dynamic mode". In this mode, the instance is dynamically allocated and
-//   constructed (using new) by default. This mode is useful if you have to
-//   deal with some code already allocating the instance for you (e.g.
-//   OS::Mutex() which returns a new private OS-dependent subclass of Mutex).
-//   The macro LAZY_DYNAMIC_INSTANCE_INITIALIZER must be used to initialize
-//   dynamic lazy instances.
-
-#ifndef V8_LAZY_INSTANCE_H_
-#define V8_LAZY_INSTANCE_H_
-
-#include "once.h"
-
-namespace v8 {
-namespace internal {
-
-#define LAZY_STATIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, {} }
-#define LAZY_DYNAMIC_INSTANCE_INITIALIZER { V8_ONCE_INIT, 0 }
-
-// Default to static mode.
-#define LAZY_INSTANCE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-
-template <typename T>
-struct LeakyInstanceTrait {
-  static void Destroy(T* /* instance */) {}
-};
-
-
-// Traits that define how an instance is allocated and accessed.
-
-template <typename T>
-struct StaticallyAllocatedInstanceTrait {
-  typedef char StorageType[sizeof(T)];
-
-  static T* MutableInstance(StorageType* storage) {
-    return reinterpret_cast<T*>(storage);
-  }
-
-  template <typename ConstructTrait>
-  static void InitStorageUsingTrait(StorageType* storage) {
-    ConstructTrait::Construct(MutableInstance(storage));
-  }
-};
-
-
-template <typename T>
-struct DynamicallyAllocatedInstanceTrait {
-  typedef T* StorageType;
-
-  static T* MutableInstance(StorageType* storage) {
-    return *storage;
-  }
-
-  template <typename CreateTrait>
-  static void InitStorageUsingTrait(StorageType* storage) {
-    *storage = CreateTrait::Create();
-  }
-};
-
-
-template <typename T>
-struct DefaultConstructTrait {
-  // Constructs the provided object which was already allocated.
-  static void Construct(T* allocated_ptr) {
-    new(allocated_ptr) T();
-  }
-};
-
-
-template <typename T>
-struct DefaultCreateTrait {
-  static T* Create() {
-    return new T();
-  }
-};
-
-
-// TODO(pliard): Handle instances destruction (using global destructors).
-template <typename T, typename AllocationTrait, typename CreateTrait,
-          typename DestroyTrait  /* not used yet. */ >
-struct LazyInstanceImpl {
- public:
-  typedef typename AllocationTrait::StorageType StorageType;
-
- private:
-  static void InitInstance(StorageType* storage) {
-    AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
-  }
-
-  void Init() const {
-    CallOnce(&once_, &InitInstance, &storage_);
-  }
-
- public:
-  T* Pointer() {
-    Init();
-    return AllocationTrait::MutableInstance(&storage_);
-  }
-
-  const T& Get() const {
-    Init();
-    return *AllocationTrait::MutableInstance(&storage_);
-  }
-
-  mutable OnceType once_;
-  // Note that the previous field, OnceType, is an AtomicWord which guarantees
-  // the correct alignment of the storage field below.
-  mutable StorageType storage_;
-};
-
-
-template <typename T,
-          typename CreateTrait = DefaultConstructTrait<T>,
-          typename DestroyTrait = LeakyInstanceTrait<T> >
-struct LazyStaticInstance {
-  typedef LazyInstanceImpl<T, StaticallyAllocatedInstanceTrait<T>, CreateTrait,
-      DestroyTrait> type;
-};
-
-
-template <typename T,
-          typename CreateTrait = DefaultConstructTrait<T>,
-          typename DestroyTrait = LeakyInstanceTrait<T> >
-struct LazyInstance {
-  // A LazyInstance is a LazyStaticInstance.
-  typedef typename LazyStaticInstance<T, CreateTrait, DestroyTrait>::type type;
-};
-
-
-template <typename T,
-          typename CreateTrait = DefaultConstructTrait<T>,
-          typename DestroyTrait = LeakyInstanceTrait<T> >
-struct LazyDynamicInstance {
-  typedef LazyInstanceImpl<T, DynamicallyAllocatedInstanceTrait<T>, CreateTrait,
-      DestroyTrait> type;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_LAZY_INSTANCE_H_
diff --git a/src/list-inl.h b/src/list-inl.h
index 7c2c83f..80bccc9 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -72,9 +72,9 @@
 template<typename T, class P>
 void List<T, P>::ResizeAddInternal(const T& element) {
   ASSERT(length_ >= capacity_);
-  // Grow the list capacity by 100%, but make sure to let it grow
+  // Grow the list capacity by 50%, but make sure to let it grow
   // even when the capacity is zero (possible initial case).
-  int new_capacity = 1 + 2 * capacity_;
+  int new_capacity = 1 + capacity_ + (capacity_ >> 1);
   // Since the element reference could be an element of the list, copy
   // it out of the old backing storage before resizing.
   T temp = element;
@@ -216,11 +216,11 @@
     int mid = (low + high) / 2;
     T mid_elem = list[mid];
 
-    if (cmp(&mid_elem, &elem) > 0) {
+    if (mid_elem > elem) {
       high = mid - 1;
       continue;
     }
-    if (cmp(&mid_elem, &elem) < 0) {
+    if (mid_elem < elem) {
       low = mid + 1;
       continue;
     }
@@ -236,7 +236,6 @@
   return SortedListBSearch<T>(list, elem, PointerValueCompare<T>);
 }
 
-
 } }  // namespace v8::internal
 
 #endif  // V8_LIST_INL_H_
diff --git a/src/list.h b/src/list.h
index adddea4..0558709 100644
--- a/src/list.h
+++ b/src/list.h
@@ -67,7 +67,7 @@
 
   // Returns a reference to the element at index i.  This reference is
   // not safe to use after operations that can change the list's
-  // backing store (e.g. Add).
+  // backing store (eg, Add).
   inline T& operator[](int i) const {
     ASSERT(0 <= i);
     ASSERT(i < length_);
@@ -165,11 +165,8 @@
 
 class Map;
 class Code;
-template<typename T> class Handle;
 typedef List<Map*> MapList;
 typedef List<Code*> CodeList;
-typedef List<Handle<Map> > MapHandleList;
-typedef List<Handle<Code> > CodeHandleList;
 
 // Perform binary search for an element in an already sorted
 // list. Returns the index of the element of -1 if it was not found.
@@ -179,7 +176,6 @@
 template <typename T>
 int SortedListBSearch(const List<T>& list, T elem);
 
-
 } }  // namespace v8::internal
 
 
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 4396c73..4661106 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -46,6 +46,29 @@
 namespace v8 {
 namespace internal {
 
+
+#define DEFINE_OPERAND_CACHE(name, type)                      \
+  name name::cache[name::kNumCachedOperands];                 \
+  void name::SetupCache() {                                   \
+    for (int i = 0; i < kNumCachedOperands; i++) {            \
+      cache[i].ConvertTo(type, i);                            \
+    }                                                         \
+  }                                                           \
+  static bool name##_initialize() {                           \
+    name::SetupCache();                                       \
+    return true;                                              \
+  }                                                           \
+  static bool name##_cache_initialized = name##_initialize();
+
+DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
+DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
+DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
+DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
+DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
+
+#undef DEFINE_OPERAND_CACHE
+
+
 static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
   return a.Value() < b.Value() ? a : b;
 }
@@ -87,9 +110,9 @@
 }
 
 
-void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
+void UseInterval::SplitAt(LifetimePosition pos) {
   ASSERT(Contains(pos) && pos.Value() != start().Value());
-  UseInterval* after = new(zone) UseInterval(pos, end_);
+  UseInterval* after = new UseInterval(pos, end_);
   after->next_ = next_;
   next_ = after;
   end_ = pos;
@@ -126,11 +149,11 @@
 #endif
 
 
-LiveRange::LiveRange(int id, Zone* zone)
+LiveRange::LiveRange(int id)
     : id_(id),
       spilled_(false),
-      is_double_(false),
       assigned_register_(kInvalidAssignment),
+      assigned_register_kind_(NONE),
       last_interval_(NULL),
       first_interval_(NULL),
       first_pos_(NULL),
@@ -138,39 +161,37 @@
       next_(NULL),
       current_interval_(NULL),
       last_processed_use_(NULL),
-      spill_operand_(new(zone) LOperand()),
-      spill_start_index_(kMaxInt) { }
-
-
-void LiveRange::set_assigned_register(int reg,
-                                      RegisterKind register_kind,
-                                      Zone* zone) {
-  ASSERT(!HasRegisterAssigned() && !IsSpilled());
-  assigned_register_ = reg;
-  is_double_ = (register_kind == DOUBLE_REGISTERS);
-  ConvertOperands(zone);
+      spill_start_index_(kMaxInt) {
+  spill_operand_ = new LUnallocated(LUnallocated::IGNORE);
 }
 
 
-void LiveRange::MakeSpilled(Zone* zone) {
+void LiveRange::set_assigned_register(int reg, RegisterKind register_kind) {
+  ASSERT(!HasRegisterAssigned() && !IsSpilled());
+  assigned_register_ = reg;
+  assigned_register_kind_ = register_kind;
+  ConvertOperands();
+}
+
+
+void LiveRange::MakeSpilled() {
   ASSERT(!IsSpilled());
   ASSERT(TopLevel()->HasAllocatedSpillOperand());
   spilled_ = true;
   assigned_register_ = kInvalidAssignment;
-  ConvertOperands(zone);
+  ConvertOperands();
 }
 
 
 bool LiveRange::HasAllocatedSpillOperand() const {
-  ASSERT(spill_operand_ != NULL);
-  return !spill_operand_->IsIgnored();
+  return spill_operand_ != NULL && !spill_operand_->IsUnallocated();
 }
 
 
 void LiveRange::SetSpillOperand(LOperand* operand) {
   ASSERT(!operand->IsUnallocated());
   ASSERT(spill_operand_ != NULL);
-  ASSERT(spill_operand_->IsIgnored());
+  ASSERT(spill_operand_->IsUnallocated());
   spill_operand_->ConvertTo(operand->kind(), operand->index());
 }
 
@@ -213,8 +234,7 @@
   // at the current or the immediate next position.
   UsePosition* use_pos = NextRegisterPosition(pos);
   if (use_pos == NULL) return true;
-  return
-      use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
+  return use_pos->pos().Value() > pos.NextInstruction().Value();
 }
 
 
@@ -225,7 +245,7 @@
 }
 
 
-LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
+LOperand* LiveRange::CreateAssignedOperand() {
   LOperand* op = NULL;
   if (HasRegisterAssigned()) {
     ASSERT(!IsSpilled());
@@ -239,7 +259,7 @@
     op = TopLevel()->GetSpillOperand();
     ASSERT(!op->IsUnallocated());
   } else {
-    LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
+    LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE);
     unalloc->set_virtual_register(id_);
     op = unalloc;
   }
@@ -271,9 +291,7 @@
 }
 
 
-void LiveRange::SplitAt(LifetimePosition position,
-                        LiveRange* result,
-                        Zone* zone) {
+void LiveRange::SplitAt(LifetimePosition position, LiveRange* result) {
   ASSERT(Start().Value() < position.Value());
   ASSERT(result->IsEmpty());
   // Find the last interval that ends before the position. If the
@@ -292,7 +310,7 @@
 
   while (current != NULL) {
     if (current->Contains(position)) {
-      current->SplitAt(position, zone);
+      current->SplitAt(position);
       break;
     }
     UseInterval* next = current->next();
@@ -385,9 +403,7 @@
 }
 
 
-void LiveRange::EnsureInterval(LifetimePosition start,
-                               LifetimePosition end,
-                               Zone* zone) {
+void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end) {
   LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
                          id_,
                          start.Value(),
@@ -401,7 +417,7 @@
     first_interval_ = first_interval_->next();
   }
 
-  UseInterval* new_interval = new(zone) UseInterval(start, new_end);
+  UseInterval* new_interval = new UseInterval(start, new_end);
   new_interval->next_ = first_interval_;
   first_interval_ = new_interval;
   if (new_interval->next() == NULL) {
@@ -410,22 +426,20 @@
 }
 
 
-void LiveRange::AddUseInterval(LifetimePosition start,
-                               LifetimePosition end,
-                               Zone* zone) {
+void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end) {
   LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
                          id_,
                          start.Value(),
                          end.Value());
   if (first_interval_ == NULL) {
-    UseInterval* interval = new(zone) UseInterval(start, end);
+    UseInterval* interval = new UseInterval(start, end);
     first_interval_ = interval;
     last_interval_ = interval;
   } else {
     if (end.Value() == first_interval_->start().Value()) {
       first_interval_->set_start(start);
     } else if (end.Value() < first_interval_->start().Value()) {
-      UseInterval* interval = new(zone) UseInterval(start, end);
+      UseInterval* interval = new UseInterval(start, end);
       interval->set_next(first_interval_);
       first_interval_ = interval;
     } else {
@@ -441,12 +455,11 @@
 
 
 UsePosition* LiveRange::AddUsePosition(LifetimePosition pos,
-                                       LOperand* operand,
-                                       Zone* zone) {
+                                       LOperand* operand) {
   LAllocator::TraceAlloc("Add to live range %d use position %d\n",
                          id_,
                          pos.Value());
-  UsePosition* use_pos = new(zone) UsePosition(pos, operand);
+  UsePosition* use_pos = new UsePosition(pos, operand);
   UsePosition* prev = NULL;
   UsePosition* current = first_pos_;
   while (current != NULL && current->pos().Value() < pos.Value()) {
@@ -466,8 +479,8 @@
 }
 
 
-void LiveRange::ConvertOperands(Zone* zone) {
-  LOperand* op = CreateAssignedOperand(zone);
+void LiveRange::ConvertOperands() {
+  LOperand* op = CreateAssignedOperand();
   UsePosition* use_pos = first_pos();
   while (use_pos != NULL) {
     ASSERT(Start().Value() <= use_pos->pos().Value() &&
@@ -531,8 +544,7 @@
 
 
 LAllocator::LAllocator(int num_values, HGraph* graph)
-    : zone_(graph->zone()),
-      chunk_(NULL),
+    : chunk_(NULL),
       live_in_sets_(graph->blocks()->length()),
       live_ranges_(num_values * 2),
       fixed_live_ranges_(NULL),
@@ -543,11 +555,10 @@
       reusable_slots_(8),
       next_virtual_register_(num_values),
       first_artificial_register_(num_values),
-      mode_(GENERAL_REGISTERS),
+      mode_(NONE),
       num_registers_(-1),
       graph_(graph),
-      has_osr_entry_(false),
-      allocation_ok_(true) { }
+      has_osr_entry_(false) {}
 
 
 void LAllocator::InitializeLivenessAnalysis() {
@@ -561,7 +572,7 @@
 BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
   // Compute live out for the given block, except not including backward
   // successor edges.
-  BitVector* live_out = new(zone_) BitVector(next_virtual_register_, zone_);
+  BitVector* live_out = new BitVector(next_virtual_register_);
 
   // Process all successor blocks.
   for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
@@ -599,7 +610,7 @@
   while (!iterator.Done()) {
     int operand_index = iterator.Current();
     LiveRange* range = LiveRangeFor(operand_index);
-    range->AddUseInterval(start, end, zone_);
+    range->AddUseInterval(start, end);
     iterator.Advance();
   }
 }
@@ -641,9 +652,9 @@
   ASSERT(index < Register::kNumAllocatableRegisters);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
-    result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
+    result = new LiveRange(FixedLiveRangeID(index));
     ASSERT(result->IsFixed());
-    result->set_assigned_register(index, GENERAL_REGISTERS, zone_);
+    result->set_assigned_register(index, GENERAL_REGISTERS);
     fixed_live_ranges_[index] = result;
   }
   return result;
@@ -654,9 +665,9 @@
   ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
-    result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
+    result = new LiveRange(FixedDoubleLiveRangeID(index));
     ASSERT(result->IsFixed());
-    result->set_assigned_register(index, DOUBLE_REGISTERS, zone_);
+    result->set_assigned_register(index, DOUBLE_REGISTERS);
     fixed_double_live_ranges_[index] = result;
   }
   return result;
@@ -669,7 +680,7 @@
   }
   LiveRange* result = live_ranges_[index];
   if (result == NULL) {
-    result = new(zone_) LiveRange(index, zone_);
+    result = new LiveRange(index);
     live_ranges_[index] = result;
   }
   return result;
@@ -685,7 +696,7 @@
 
 HPhi* LAllocator::LookupPhi(LOperand* operand) const {
   if (!operand->IsUnallocated()) return NULL;
-  int index = LUnallocated::cast(operand)->virtual_register();
+  int index = operand->VirtualRegister();
   HValue* instr = graph_->LookupValue(index);
   if (instr != NULL && instr->IsPhi()) {
     return HPhi::cast(instr);
@@ -715,15 +726,15 @@
 
   if (range->IsEmpty() || range->Start().Value() > position.Value()) {
     // Can happen if there is a definition without use.
-    range->AddUseInterval(position, position.NextInstruction(), zone_);
-    range->AddUsePosition(position.NextInstruction(), NULL, zone_);
+    range->AddUseInterval(position, position.NextInstruction());
+    range->AddUsePosition(position.NextInstruction(), NULL);
   } else {
     range->ShortenTo(position);
   }
 
   if (operand->IsUnallocated()) {
     LUnallocated* unalloc_operand = LUnallocated::cast(operand);
-    range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
   }
 }
 
@@ -736,9 +747,9 @@
   if (range == NULL) return;
   if (operand->IsUnallocated()) {
     LUnallocated* unalloc_operand = LUnallocated::cast(operand);
-    range->AddUsePosition(position, unalloc_operand, zone_)->set_hint(hint);
+    range->AddUsePosition(position, unalloc_operand)->set_hint(hint);
   }
-  range->AddUseInterval(block_start, position, zone_);
+  range->AddUseInterval(block_start, position);
 }
 
 
@@ -753,8 +764,7 @@
       LMoveOperands cur = move_operands->at(i);
       LOperand* cur_to = cur.destination();
       if (cur_to->IsUnallocated()) {
-        if (LUnallocated::cast(cur_to)->virtual_register() ==
-            LUnallocated::cast(from)->virtual_register()) {
+        if (cur_to->VirtualRegister() == from->VirtualRegister()) {
           move->AddMove(cur.source(), to);
           return;
         }
@@ -775,7 +785,6 @@
       if (i < end) instr = InstructionAt(i + 1);
       if (i > start) prev_instr = InstructionAt(i - 1);
       MeetConstraintsBetween(prev_instr, instr, i);
-      if (!AllocationOk()) return;
     }
   }
 }
@@ -797,11 +806,11 @@
   // Handle fixed output operand.
   if (first != NULL && first->Output() != NULL) {
     LUnallocated* first_output = LUnallocated::cast(first->Output());
-    LiveRange* range = LiveRangeFor(first_output->virtual_register());
+    LiveRange* range = LiveRangeFor(first_output->VirtualRegister());
     bool assigned = false;
     if (first_output->HasFixedPolicy()) {
       LUnallocated* output_copy = first_output->CopyUnconstrained();
-      bool is_tagged = HasTaggedValue(first_output->virtual_register());
+      bool is_tagged = HasTaggedValue(first_output->VirtualRegister());
       AllocateFixed(first_output, gap_index, is_tagged);
 
       // This value is produced on the stack, we never need to spill it.
@@ -832,7 +841,7 @@
       LUnallocated* cur_input = LUnallocated::cast(it.Current());
       if (cur_input->HasFixedPolicy()) {
         LUnallocated* input_copy = cur_input->CopyUnconstrained();
-        bool is_tagged = HasTaggedValue(cur_input->virtual_register());
+        bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
         AllocateFixed(cur_input, gap_index + 1, is_tagged);
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
       } else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
@@ -841,14 +850,12 @@
         ASSERT(!cur_input->IsUsedAtStart());
 
         LUnallocated* input_copy = cur_input->CopyUnconstrained();
-        cur_input->set_virtual_register(GetVirtualRegister());
-        if (!AllocationOk()) return;
+        cur_input->set_virtual_register(next_virtual_register_++);
 
         if (RequiredRegisterKind(input_copy->virtual_register()) ==
             DOUBLE_REGISTERS) {
           double_artificial_registers_.Add(
-              cur_input->virtual_register() - first_artificial_register_,
-              zone_);
+              cur_input->virtual_register() - first_artificial_register_);
         }
 
         AddConstraintsGapMove(gap_index, input_copy, cur_input);
@@ -861,8 +868,8 @@
     LUnallocated* second_output = LUnallocated::cast(second->Output());
     if (second_output->HasSameAsInputPolicy()) {
       LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
-      int output_vreg = second_output->virtual_register();
-      int input_vreg = cur_input->virtual_register();
+      int output_vreg = second_output->VirtualRegister();
+      int input_vreg = cur_input->VirtualRegister();
 
       LUnallocated* input_copy = cur_input->CopyUnconstrained();
       cur_input->set_virtual_register(second_output->virtual_register());
@@ -917,9 +924,9 @@
           }
         } else {
           if (to->IsUnallocated()) {
-            if (live->Contains(LUnallocated::cast(to)->virtual_register())) {
+            if (live->Contains(to->VirtualRegister())) {
               Define(curr_position, to, from);
-              live->Remove(LUnallocated::cast(to)->virtual_register());
+              live->Remove(to->VirtualRegister());
             } else {
               cur->Eliminate();
               continue;
@@ -930,7 +937,7 @@
         }
         Use(block_start_position, curr_position, from, hint);
         if (from->IsUnallocated()) {
-          live->Add(LUnallocated::cast(from)->virtual_register());
+          live->Add(from->VirtualRegister());
         }
       }
     } else {
@@ -940,9 +947,7 @@
       if (instr != NULL) {
         LOperand* output = instr->Output();
         if (output != NULL) {
-          if (output->IsUnallocated()) {
-            live->Remove(LUnallocated::cast(output)->virtual_register());
-          }
+          if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
           Define(curr_position, output, NULL);
         }
 
@@ -952,8 +957,7 @@
                 output->index() != i) {
               LiveRange* range = FixedLiveRangeFor(i);
               range->AddUseInterval(curr_position,
-                                    curr_position.InstructionEnd(),
-                                    zone_);
+                                    curr_position.InstructionEnd());
             }
           }
         }
@@ -964,8 +968,7 @@
                 output->index() != i) {
               LiveRange* range = FixedDoubleLiveRangeFor(i);
               range->AddUseInterval(curr_position,
-                                    curr_position.InstructionEnd(),
-                                    zone_);
+                                    curr_position.InstructionEnd());
             }
           }
         }
@@ -982,9 +985,7 @@
           }
 
           Use(block_start_position, use_pos, input, NULL);
-          if (input->IsUnallocated()) {
-            live->Add(LUnallocated::cast(input)->virtual_register());
-          }
+          if (input->IsUnallocated()) live->Add(input->VirtualRegister());
         }
 
         for (TempIterator it(instr); !it.Done(); it.Advance()) {
@@ -1013,7 +1014,7 @@
   const ZoneList<HPhi*>* phis = block->phis();
   for (int i = 0; i < phis->length(); ++i) {
     HPhi* phi = phis->at(i);
-    LUnallocated* phi_operand = new(zone_) LUnallocated(LUnallocated::NONE);
+    LUnallocated* phi_operand = new LUnallocated(LUnallocated::NONE);
     phi_operand->set_virtual_register(phi->id());
     for (int j = 0; j < phi->OperandCount(); ++j) {
       HValue* op = phi->OperandAt(j);
@@ -1023,7 +1024,7 @@
         operand = chunk_->DefineConstantOperand(constant);
       } else {
         ASSERT(!op->EmitAtUses());
-        LUnallocated* unalloc = new(zone_) LUnallocated(LUnallocated::ANY);
+        LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY);
         unalloc->set_virtual_register(op->id());
         operand = unalloc;
       }
@@ -1042,13 +1043,11 @@
       // it into a location different from the operand of a live range
       // covering a branch instruction.
       // Thus we need to manually record a pointer.
-      LInstruction* branch =
-          InstructionAt(cur_block->last_instruction_index());
-      if (branch->HasPointerMap()) {
-        if (phi->representation().IsTagged()) {
+      if (phi->representation().IsTagged()) {
+        LInstruction* branch =
+            InstructionAt(cur_block->last_instruction_index());
+        if (branch->HasPointerMap()) {
           branch->pointer_map()->RecordPointer(phi_operand);
-        } else if (!phi->representation().IsDouble()) {
-          branch->pointer_map()->RecordUntagged(phi_operand);
         }
       }
     }
@@ -1062,39 +1061,34 @@
 }
 
 
-bool LAllocator::Allocate(LChunk* chunk) {
+void LAllocator::Allocate(LChunk* chunk) {
   ASSERT(chunk_ == NULL);
   chunk_ = chunk;
   MeetRegisterConstraints();
-  if (!AllocationOk()) return false;
   ResolvePhis();
   BuildLiveRanges();
   AllocateGeneralRegisters();
-  if (!AllocationOk()) return false;
   AllocateDoubleRegisters();
-  if (!AllocationOk()) return false;
   PopulatePointerMaps();
   if (has_osr_entry_) ProcessOsrEntry();
   ConnectRanges();
   ResolveControlFlow();
-  return true;
 }
 
 
 void LAllocator::MeetRegisterConstraints() {
-  HPhase phase("L_Register constraints", chunk_);
+  HPhase phase("Register constraints", chunk_);
   first_artificial_register_ = next_virtual_register_;
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int i = 0; i < blocks->length(); ++i) {
     HBasicBlock* block = blocks->at(i);
     MeetRegisterConstraints(block);
-    if (!AllocationOk()) return;
   }
 }
 
 
 void LAllocator::ResolvePhis() {
-  HPhase phase("L_Resolve phis", chunk_);
+  HPhase phase("Resolve phis", chunk_);
 
   // Process the blocks in reverse order.
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1130,8 +1124,8 @@
   if (cur_cover->IsSpilled()) return;
   ASSERT(pred_cover != NULL && cur_cover != NULL);
   if (pred_cover != cur_cover) {
-    LOperand* pred_op = pred_cover->CreateAssignedOperand(zone_);
-    LOperand* cur_op = cur_cover->CreateAssignedOperand(zone_);
+    LOperand* pred_op = pred_cover->CreateAssignedOperand();
+    LOperand* cur_op = cur_cover->CreateAssignedOperand();
     if (!pred_op->Equals(cur_op)) {
       LGap* gap = NULL;
       if (block->predecessors()->length() == 1) {
@@ -1148,13 +1142,10 @@
         // it into a location different from the operand of a live range
         // covering a branch instruction.
         // Thus we need to manually record a pointer.
-        LInstruction* branch = InstructionAt(pred->last_instruction_index());
-        if (branch->HasPointerMap()) {
-          if (HasTaggedValue(range->id())) {
+        if (HasTaggedValue(range->id())) {
+          LInstruction* branch = InstructionAt(pred->last_instruction_index());
+          if (branch->HasPointerMap()) {
             branch->pointer_map()->RecordPointer(cur_op);
-          } else if (!cur_op->IsDoubleStackSlot() &&
-                     !cur_op->IsDoubleRegister()) {
-            branch->pointer_map()->RemovePointer(cur_op);
           }
         }
       }
@@ -1184,7 +1175,7 @@
 
 
 void LAllocator::ConnectRanges() {
-  HPhase phase("L_Connect ranges", this);
+  HPhase phase("Connect ranges", this);
   for (int i = 0; i < live_ranges()->length(); ++i) {
     LiveRange* first_range = live_ranges()->at(i);
     if (first_range == NULL || first_range->parent() != NULL) continue;
@@ -1203,8 +1194,8 @@
           }
           if (should_insert) {
             LParallelMove* move = GetConnectingParallelMove(pos);
-            LOperand* prev_operand = first_range->CreateAssignedOperand(zone_);
-            LOperand* cur_operand = second_range->CreateAssignedOperand(zone_);
+            LOperand* prev_operand = first_range->CreateAssignedOperand();
+            LOperand* cur_operand = second_range->CreateAssignedOperand();
             move->AddMove(prev_operand, cur_operand);
           }
         }
@@ -1224,7 +1215,7 @@
 
 
 void LAllocator::ResolveControlFlow() {
-  HPhase phase("L_Resolve control flow", this);
+  HPhase phase("Resolve control flow", this);
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
   for (int block_id = 1; block_id < blocks->length(); ++block_id) {
     HBasicBlock* block = blocks->at(block_id);
@@ -1245,7 +1236,7 @@
 
 
 void LAllocator::BuildLiveRanges() {
-  HPhase phase("L_Build live ranges", this);
+  HPhase phase("Build live ranges", this);
   InitializeLivenessAnalysis();
   // Process the blocks in reverse order.
   const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
@@ -1273,8 +1264,7 @@
       LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
       for (int j = 0; j < move->move_operands()->length(); ++j) {
         LOperand* to = move->move_operands()->at(j).destination();
-        if (to->IsUnallocated() &&
-            LUnallocated::cast(to)->virtual_register() == phi->id()) {
+        if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
           hint = move->move_operands()->at(j).source();
           phi_operand = to;
           break;
@@ -1307,7 +1297,7 @@
       while (!iterator.Done()) {
         int operand_index = iterator.Current();
         LiveRange* range = LiveRangeFor(operand_index);
-        range->EnsureInterval(start, end, zone_);
+        range->EnsureInterval(start, end);
         iterator.Advance();
       }
 
@@ -1350,7 +1340,7 @@
 
 
 void LAllocator::PopulatePointerMaps() {
-  HPhase phase("L_Populate pointer maps", this);
+  HPhase phase("Populate pointer maps", this);
   const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
 
   ASSERT(SafePointsAreInOrder());
@@ -1428,7 +1418,7 @@
         TraceAlloc("Pointer in register for range %d (start at %d) "
                    "at safe point %d\n",
                    cur->id(), cur->Start().Value(), safe_point);
-        LOperand* operand = cur->CreateAssignedOperand(zone_);
+        LOperand* operand = cur->CreateAssignedOperand();
         ASSERT(!operand->IsStackSlot());
         map->RecordPointer(operand);
       }
@@ -1469,14 +1459,15 @@
 
 
 void LAllocator::AllocateGeneralRegisters() {
-  HPhase phase("L_Allocate general registers", this);
+  HPhase phase("Allocate general registers", this);
   num_registers_ = Register::kNumAllocatableRegisters;
+  mode_ = GENERAL_REGISTERS;
   AllocateRegisters();
 }
 
 
 void LAllocator::AllocateDoubleRegisters() {
-  HPhase phase("L_Allocate double registers", this);
+  HPhase phase("Allocate double registers", this);
   num_registers_ = DoubleRegister::kNumAllocatableRegisters;
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
@@ -1484,6 +1475,7 @@
 
 
 void LAllocator::AllocateRegisters() {
+  ASSERT(mode_ != NONE);
   ASSERT(unhandled_live_ranges_.is_empty());
 
   for (int i = 0; i < live_ranges_.length(); ++i) {
@@ -1542,7 +1534,6 @@
         // Do not spill live range eagerly if use position that can benefit from
         // the register is too close to the start of live range.
         SpillBetween(current, current->Start(), pos->pos());
-        if (!AllocationOk()) return;
         ASSERT(UnhandledIsSorted());
         continue;
       }
@@ -1573,10 +1564,9 @@
     ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
 
     bool result = TryAllocateFreeReg(current);
-    if (!AllocationOk()) return;
-
-    if (!result) AllocateBlockedReg(current);
-    if (!AllocationOk()) return;
+    if (!result) {
+      AllocateBlockedReg(current);
+    }
 
     if (current->HasRegisterAssigned()) {
       AddToActive(current);
@@ -1590,6 +1580,7 @@
 
 
 const char* LAllocator::RegisterName(int allocation_index) {
+  ASSERT(mode_ != NONE);
   if (mode_ == GENERAL_REGISTERS) {
     return Register::AllocationIndexToString(allocation_index);
   } else {
@@ -1630,6 +1621,29 @@
 }
 
 
+void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
+  operand->set_virtual_register(instr->id());
+}
+
+
+void LAllocator::RecordTemporary(LUnallocated* operand) {
+  ASSERT(next_virtual_register_ < LUnallocated::kMaxVirtualRegisters);
+  if (!operand->HasFixedPolicy()) {
+    operand->set_virtual_register(next_virtual_register_++);
+  }
+}
+
+
+void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
+  operand->set_virtual_register(value->id());
+}
+
+
+int LAllocator::max_initial_value_ids() {
+  return LUnallocated::kMaxVirtualRegisters / 32;
+}
+
+
 void LAllocator::AddToActive(LiveRange* range) {
   TraceAlloc("Add live range %d to active\n", range->id());
   active_live_ranges_.Add(range);
@@ -1800,7 +1814,7 @@
         TraceAlloc("Assigning preferred reg %s to live range %d\n",
                    RegisterName(register_index),
                    current->id());
-        current->set_assigned_register(register_index, mode_, zone_);
+        current->set_assigned_register(register_index, mode_);
         return true;
       }
     }
@@ -1824,8 +1838,7 @@
   if (pos.Value() < current->End().Value()) {
     // Register reg is available at the range start but becomes blocked before
     // the range end. Split current at position where it becomes blocked.
-    LiveRange* tail = SplitRangeAt(current, pos);
-    if (!AllocationOk()) return false;
+    LiveRange* tail = SplitAt(current, pos);
     AddToUnhandledSorted(tail);
   }
 
@@ -1836,7 +1849,7 @@
   TraceAlloc("Assigning free reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
-  current->set_assigned_register(reg, mode_, zone_);
+  current->set_assigned_register(reg, mode_);
 
   return true;
 }
@@ -1926,7 +1939,7 @@
   TraceAlloc("Assigning blocked reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
-  current->set_assigned_register(reg, mode_, zone_);
+  current->set_assigned_register(reg, mode_);
 
   // This register was not free. Thus we need to find and spill
   // parts of active and inactive live regions that use the same register
@@ -1980,7 +1993,7 @@
 }
 
 
-LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
+LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
   ASSERT(!range->IsFixed());
   TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
 
@@ -1991,9 +2004,8 @@
   ASSERT(pos.IsInstructionStart() ||
          !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
 
-  LiveRange* result = LiveRangeFor(GetVirtualRegister());
-  if (!AllocationOk()) return NULL;
-  range->SplitAt(pos, result, zone_);
+  LiveRange* result = LiveRangeFor(next_virtual_register_++);
+  range->SplitAt(pos, result);
   return result;
 }
 
@@ -2009,7 +2021,7 @@
 
   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
   ASSERT(split_pos.Value() >= start.Value());
-  return SplitRangeAt(range, split_pos);
+  return SplitAt(range, split_pos);
 }
 
 
@@ -2048,8 +2060,7 @@
 
 
 void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
-  LiveRange* second_part = SplitRangeAt(range, pos);
-  if (!AllocationOk()) return;
+  LiveRange* second_part = SplitAt(range, pos);
   Spill(second_part);
 }
 
@@ -2058,8 +2069,7 @@
                               LifetimePosition start,
                               LifetimePosition end) {
   ASSERT(start.Value() < end.Value());
-  LiveRange* second_part = SplitRangeAt(range, start);
-  if (!AllocationOk()) return;
+  LiveRange* second_part = SplitAt(range, start);
 
   if (second_part->Start().Value() < end.Value()) {
     // The split result intersects with [start, end[.
@@ -2092,7 +2102,7 @@
     if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS);
     first->SetSpillOperand(op);
   }
-  range->MakeSpilled(zone_);
+  range->MakeSpilled();
 }
 
 
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index f5ab055..e4e6497 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -146,6 +146,7 @@
 
 
 enum RegisterKind {
+  NONE,
   GENERAL_REGISTERS,
   DOUBLE_REGISTERS
 };
@@ -216,7 +217,7 @@
 
   // Split this interval at the given position without effecting the
   // live range that owns it. The interval must contain the position.
-  void SplitAt(LifetimePosition pos, Zone* zone);
+  void SplitAt(LifetimePosition pos);
 
   // If this interval intersects with other return smallest position
   // that belongs to both of them.
@@ -277,7 +278,7 @@
  public:
   static const int kInvalidAssignment = 0x7fffffff;
 
-  LiveRange(int id, Zone* zone);
+  explicit LiveRange(int id);
 
   UseInterval* first_interval() const { return first_interval_; }
   UsePosition* first_pos() const { return first_pos_; }
@@ -288,13 +289,11 @@
   int id() const { return id_; }
   bool IsFixed() const { return id_ < 0; }
   bool IsEmpty() const { return first_interval() == NULL; }
-  LOperand* CreateAssignedOperand(Zone* zone);
+  LOperand* CreateAssignedOperand();
   int assigned_register() const { return assigned_register_; }
   int spill_start_index() const { return spill_start_index_; }
-  void set_assigned_register(int reg,
-                             RegisterKind register_kind,
-                             Zone* zone);
-  void MakeSpilled(Zone* zone);
+  void set_assigned_register(int reg, RegisterKind register_kind);
+  void MakeSpilled();
 
   // Returns use position in this live range that follows both start
   // and last processed use position.
@@ -318,9 +317,9 @@
   // the range.
   // All uses following the given position will be moved from this
   // live range to the result live range.
-  void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+  void SplitAt(LifetimePosition position, LiveRange* result);
 
-  bool IsDouble() const { return is_double_; }
+  bool IsDouble() const { return assigned_register_kind_ == DOUBLE_REGISTERS; }
   bool HasRegisterAssigned() const {
     return assigned_register_ != kInvalidAssignment;
   }
@@ -357,15 +356,9 @@
   LifetimePosition FirstIntersection(LiveRange* other);
 
   // Add a new interval or a new use position to this live range.
-  void EnsureInterval(LifetimePosition start,
-                      LifetimePosition end,
-                      Zone* zone);
-  void AddUseInterval(LifetimePosition start,
-                      LifetimePosition end,
-                      Zone* zone);
-  UsePosition* AddUsePosition(LifetimePosition pos,
-                              LOperand* operand,
-                              Zone* zone);
+  void EnsureInterval(LifetimePosition start, LifetimePosition end);
+  void AddUseInterval(LifetimePosition start, LifetimePosition end);
+  UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand);
 
   // Shorten the most recently added interval by setting a new start.
   void ShortenTo(LifetimePosition start);
@@ -377,15 +370,15 @@
 #endif
 
  private:
-  void ConvertOperands(Zone* zone);
+  void ConvertOperands();
   UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
   void AdvanceLastProcessedMarker(UseInterval* to_start_of,
                                   LifetimePosition but_not_past) const;
 
   int id_;
   bool spilled_;
-  bool is_double_;
   int assigned_register_;
+  RegisterKind assigned_register_kind_;
   UseInterval* last_interval_;
   UseInterval* first_interval_;
   UsePosition* first_pos_;
@@ -408,8 +401,8 @@
     return bits_->Contains(value);
   }
 
-  void Add(int value, Zone* zone) {
-    EnsureCapacity(value, zone);
+  void Add(int value) {
+    EnsureCapacity(value);
     bits_->Add(value);
   }
 
@@ -420,11 +413,11 @@
     return bits_ != NULL && bits_->length() > value;
   }
 
-  void EnsureCapacity(int value, Zone* zone) {
+  void EnsureCapacity(int value) {
     if (InBitsRange(value)) return;
     int new_length = bits_ == NULL ? kInitialLength : bits_->length();
     while (new_length <= value) new_length *= 2;
-    BitVector* new_bits = new(zone) BitVector(new_length, zone);
+    BitVector* new_bits = new BitVector(new_length);
     if (bits_ != NULL) new_bits->CopyFrom(*bits_);
     bits_ = new_bits;
   }
@@ -439,13 +432,24 @@
 
   static void TraceAlloc(const char* msg, ...);
 
+  // Lithium translation support.
+  // Record a use of an input operand in the current instruction.
+  void RecordUse(HValue* value, LUnallocated* operand);
+  // Record the definition of the output operand.
+  void RecordDefinition(HInstruction* instr, LUnallocated* operand);
+  // Record a temporary operand.
+  void RecordTemporary(LUnallocated* operand);
+
   // Checks whether the value of a given virtual register is tagged.
   bool HasTaggedValue(int virtual_register) const;
 
   // Returns the register kind required by the given virtual register.
   RegisterKind RequiredRegisterKind(int virtual_register) const;
 
-  bool Allocate(LChunk* chunk);
+  // Control max function size.
+  static int max_initial_value_ids();
+
+  void Allocate(LChunk* chunk);
 
   const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
   const Vector<LiveRange*>* fixed_live_ranges() const {
@@ -458,15 +462,6 @@
   LChunk* chunk() const { return chunk_; }
   HGraph* graph() const { return graph_; }
 
-  int GetVirtualRegister() {
-    if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) {
-      allocation_ok_ = false;
-    }
-    return next_virtual_register_++;
-  }
-
-  bool AllocationOk() { return allocation_ok_; }
-
   void MarkAsOsrEntry() {
     // There can be only one.
     ASSERT(!has_osr_entry_);
@@ -539,7 +534,7 @@
   // Otherwise returns the live range that starts at pos and contains
   // all uses from the original range that follow pos. Uses at pos will
   // still be owned by the original range after splitting.
-  LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+  LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
 
   // Split the given range in a position from the interval [start, end].
   LiveRange* SplitBetween(LiveRange* range,
@@ -595,8 +590,6 @@
 
   inline LGap* GapAt(int index);
 
-  Zone* zone_;
-
   LChunk* chunk_;
 
   // During liveness analysis keep a mapping from block id to live_in sets
@@ -628,9 +621,6 @@
 
   bool has_osr_entry_;
 
-  // Indicates success or failure during register allocation.
-  bool allocation_ok_;
-
   DISALLOW_COPY_AND_ASSIGN(LAllocator);
 };
 
diff --git a/src/lithium.cc b/src/lithium.cc
index aefd8b6..5410f6f 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -36,7 +36,6 @@
   LUnallocated* unalloc = NULL;
   switch (kind()) {
     case INVALID:
-      stream->Add("(0)");
       break;
     case UNALLOCATED:
       unalloc = LUnallocated::cast(this);
@@ -71,6 +70,9 @@
         case LUnallocated::ANY:
           stream->Add("(-)");
           break;
+        case LUnallocated::IGNORE:
+          stream->Add("(0)");
+          break;
       }
       break;
     case CONSTANT_OPERAND:
@@ -94,32 +96,13 @@
   }
 }
 
-#define DEFINE_OPERAND_CACHE(name, type)                      \
-  name* name::cache = NULL;                                   \
-  void name::SetUpCache() {                                   \
-    if (cache) return;                                        \
-    cache = new name[kNumCachedOperands];                     \
-    for (int i = 0; i < kNumCachedOperands; i++) {            \
-      cache[i].ConvertTo(type, i);                            \
-    }                                                         \
-  }                                                           \
 
-DEFINE_OPERAND_CACHE(LConstantOperand, CONSTANT_OPERAND)
-DEFINE_OPERAND_CACHE(LStackSlot,       STACK_SLOT)
-DEFINE_OPERAND_CACHE(LDoubleStackSlot, DOUBLE_STACK_SLOT)
-DEFINE_OPERAND_CACHE(LRegister,        REGISTER)
-DEFINE_OPERAND_CACHE(LDoubleRegister,  DOUBLE_REGISTER)
-
-#undef DEFINE_OPERAND_CACHE
-
-void LOperand::SetUpCaches() {
-  LConstantOperand::SetUpCache();
-  LStackSlot::SetUpCache();
-  LDoubleStackSlot::SetUpCache();
-  LRegister::SetUpCache();
-  LDoubleRegister::SetUpCache();
+int LOperand::VirtualRegister() {
+  LUnallocated* unalloc = LUnallocated::cast(this);
+  return unalloc->virtual_register();
 }
 
+
 bool LParallelMove::IsRedundant() const {
   for (int i = 0; i < move_operands_.length(); ++i) {
     if (!move_operands_[i].IsRedundant()) return false;
@@ -173,27 +156,6 @@
 }
 
 
-void LPointerMap::RemovePointer(LOperand* op) {
-  // Do not record arguments as pointers.
-  if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
-  for (int i = 0; i < pointer_operands_.length(); ++i) {
-    if (pointer_operands_[i]->Equals(op)) {
-      pointer_operands_.Remove(i);
-      --i;
-    }
-  }
-}
-
-
-void LPointerMap::RecordUntagged(LOperand* op) {
-  // Do not record arguments as pointers.
-  if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
-  untagged_operands_.Add(op);
-}
-
-
 void LPointerMap::PrintTo(StringStream* stream) {
   stream->Add("{");
   for (int i = 0; i < pointer_operands_.length(); ++i) {
@@ -220,7 +182,6 @@
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
       return 3;
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
diff --git a/src/lithium.h b/src/lithium.h
index d1e2e3c..a933f72 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -59,8 +59,8 @@
   bool IsDoubleRegister() const { return kind() == DOUBLE_REGISTER; }
   bool IsArgument() const { return kind() == ARGUMENT; }
   bool IsUnallocated() const { return kind() == UNALLOCATED; }
-  bool IsIgnored() const { return kind() == INVALID; }
   bool Equals(LOperand* other) const { return value_ == other->value_; }
+  int VirtualRegister();
 
   void PrintTo(StringStream* stream);
   void ConvertTo(Kind kind, int index) {
@@ -69,10 +69,6 @@
     ASSERT(this->index() == index);
   }
 
-  // Calls SetUpCache() for each subclass. Don't forget to update this method
-  // if you add a new LOperand subclass.
-  static void SetUpCaches();
-
  protected:
   static const int kKindFieldWidth = 3;
   class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
@@ -93,7 +89,8 @@
     FIXED_SLOT,
     MUST_HAVE_REGISTER,
     WRITABLE_REGISTER,
-    SAME_AS_FIRST_INPUT
+    SAME_AS_FIRST_INPUT,
+    IGNORE
   };
 
   // Lifetime of operand inside the instruction.
@@ -124,9 +121,9 @@
 
   // The superclass has a KindField.  Some policies have a signed fixed
   // index in the upper bits.
-  static const int kPolicyWidth = 3;
+  static const int kPolicyWidth = 4;
   static const int kLifetimeWidth = 1;
-  static const int kVirtualRegisterWidth = 18;
+  static const int kVirtualRegisterWidth = 17;
 
   static const int kPolicyShift = kKindFieldWidth;
   static const int kLifetimeShift = kPolicyShift + kPolicyWidth;
@@ -146,10 +143,12 @@
                         kVirtualRegisterWidth> {
   };
 
-  static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
+  static const int kMaxVirtualRegisters = 1 << (kVirtualRegisterWidth + 1);
   static const int kMaxFixedIndex = 63;
   static const int kMinFixedIndex = -64;
 
+  bool HasIgnorePolicy() const { return policy() == IGNORE; }
+  bool HasNoPolicy() const { return policy() == NONE; }
   bool HasAnyPolicy() const {
     return policy() == ANY;
   }
@@ -172,7 +171,7 @@
     return static_cast<int>(value_) >> kFixedIndexShift;
   }
 
-  int virtual_register() const {
+  unsigned virtual_register() const {
     return VirtualRegisterField::decode(value_);
   }
 
@@ -235,7 +234,9 @@
   }
 
   bool IsIgnored() const {
-    return destination_ != NULL && destination_->IsIgnored();
+    return destination_ != NULL &&
+        destination_->IsUnallocated() &&
+        LUnallocated::cast(destination_)->HasIgnorePolicy();
   }
 
   // We clear both operands to indicate move that's been eliminated.
@@ -264,11 +265,11 @@
     return reinterpret_cast<LConstantOperand*>(op);
   }
 
-  static void SetUpCache();
+  static void SetupCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LConstantOperand* cache;
+  static LConstantOperand cache[];
 
   LConstantOperand() : LOperand() { }
   explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
@@ -299,11 +300,11 @@
     return reinterpret_cast<LStackSlot*>(op);
   }
 
-  static void SetUpCache();
+  static void SetupCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LStackSlot* cache;
+  static LStackSlot cache[];
 
   LStackSlot() : LOperand() { }
   explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
@@ -323,11 +324,11 @@
     return reinterpret_cast<LDoubleStackSlot*>(op);
   }
 
-  static void SetUpCache();
+  static void SetupCache();
 
  private:
   static const int kNumCachedOperands = 128;
-  static LDoubleStackSlot* cache;
+  static LDoubleStackSlot cache[];
 
   LDoubleStackSlot() : LOperand() { }
   explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
@@ -347,11 +348,11 @@
     return reinterpret_cast<LRegister*>(op);
   }
 
-  static void SetUpCache();
+  static void SetupCache();
 
  private:
   static const int kNumCachedOperands = 16;
-  static LRegister* cache;
+  static LRegister cache[];
 
   LRegister() : LOperand() { }
   explicit LRegister(int index) : LOperand(REGISTER, index) { }
@@ -371,11 +372,11 @@
     return reinterpret_cast<LDoubleRegister*>(op);
   }
 
-  static void SetUpCache();
+  static void SetupCache();
 
  private:
   static const int kNumCachedOperands = 16;
-  static LDoubleRegister* cache;
+  static LDoubleRegister cache[];
 
   LDoubleRegister() : LOperand() { }
   explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
@@ -406,18 +407,9 @@
 class LPointerMap: public ZoneObject {
  public:
   explicit LPointerMap(int position)
-      : pointer_operands_(8),
-        untagged_operands_(0),
-        position_(position),
-        lithium_position_(-1) { }
+      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
 
-  const ZoneList<LOperand*>* GetNormalizedOperands() {
-    for (int i = 0; i < untagged_operands_.length(); ++i) {
-      RemovePointer(untagged_operands_[i]);
-    }
-    untagged_operands_.Clear();
-    return &pointer_operands_;
-  }
+  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
   int position() const { return position_; }
   int lithium_position() const { return lithium_position_; }
 
@@ -427,13 +419,10 @@
   }
 
   void RecordPointer(LOperand* op);
-  void RemovePointer(LOperand* op);
-  void RecordUntagged(LOperand* op);
   void PrintTo(StringStream* stream);
 
  private:
   ZoneList<LOperand*> pointer_operands_;
-  ZoneList<LOperand*> untagged_operands_;
   int position_;
   int lithium_position_;
 };
@@ -442,14 +431,12 @@
 class LEnvironment: public ZoneObject {
  public:
   LEnvironment(Handle<JSFunction> closure,
-               FrameType frame_type,
                int ast_id,
                int parameter_count,
                int argument_count,
                int value_count,
                LEnvironment* outer)
       : closure_(closure),
-        frame_type_(frame_type),
         arguments_stack_height_(argument_count),
         deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
         translation_index_(-1),
@@ -457,13 +444,13 @@
         parameter_count_(parameter_count),
         pc_offset_(-1),
         values_(value_count),
-        is_tagged_(value_count, closure->GetHeap()->isolate()->zone()),
+        representations_(value_count),
         spilled_registers_(NULL),
         spilled_double_registers_(NULL),
-        outer_(outer) { }
+        outer_(outer) {
+  }
 
   Handle<JSFunction> closure() const { return closure_; }
-  FrameType frame_type() const { return frame_type_; }
   int arguments_stack_height() const { return arguments_stack_height_; }
   int deoptimization_index() const { return deoptimization_index_; }
   int translation_index() const { return translation_index_; }
@@ -479,13 +466,11 @@
 
   void AddValue(LOperand* operand, Representation representation) {
     values_.Add(operand);
-    if (representation.IsTagged()) {
-      is_tagged_.Add(values_.length() - 1);
-    }
+    representations_.Add(representation);
   }
 
   bool HasTaggedValueAt(int index) const {
-    return is_tagged_.Contains(index);
+    return representations_[index].IsTagged();
   }
 
   void Register(int deoptimization_index,
@@ -510,7 +495,6 @@
 
  private:
   Handle<JSFunction> closure_;
-  FrameType frame_type_;
   int arguments_stack_height_;
   int deoptimization_index_;
   int translation_index_;
@@ -518,7 +502,7 @@
   int parameter_count_;
   int pc_offset_;
   ZoneList<LOperand*> values_;
-  BitVector is_tagged_;
+  ZoneList<Representation> representations_;
 
   // Allocation index indexed arrays of spill slot operands for registers
   // that are also in spill slots at an OSR entry.  NULL for environments
@@ -527,6 +511,8 @@
   LOperand** spilled_double_registers_;
 
   LEnvironment* outer_;
+
+  friend class LCodegen;
 };
 
 
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index abfb0f6..e05c53c 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -325,10 +325,9 @@
             if (old_node.children[i].live_shared_function_infos) {
               old_node.children[i].live_shared_function_infos.
                   forEach(function (old_child_info) {
-                    %LiveEditReplaceRefToNestedFunction(
-                        old_info.info,
-                        corresponding_child_info,
-                        old_child_info.info);
+                    %LiveEditReplaceRefToNestedFunction(old_info.info,
+                                                        corresponding_child_info,
+                                                        old_child_info.info);
                   });
             }
           }
@@ -382,7 +381,7 @@
           position: break_point_position,
           line: break_point.line(),
           column: break_point.column()
-      };
+      }
       break_point_old_positions.push(old_position_description);
     }
 
@@ -419,7 +418,7 @@
             position: updated_position,
             line: new_location.line,
             column: new_location.column
-        };
+        }
 
         break_point.set(original_script);
 
@@ -429,7 +428,7 @@
           new_positions: new_position_description
           } );
       }
-    };
+    }
   }
 
 
@@ -466,7 +465,7 @@
   }
   PosTranslator.prototype.GetChunks = function() {
     return this.chunks;
-  };
+  }
 
   PosTranslator.prototype.Translate = function(pos, inside_chunk_handler) {
     var array = this.chunks;
@@ -493,18 +492,18 @@
       inside_chunk_handler = PosTranslator.DefaultInsideChunkHandler;
     }
     return inside_chunk_handler(pos, chunk);
-  };
+  }
 
   PosTranslator.DefaultInsideChunkHandler = function(pos, diff_chunk) {
     Assert(false, "Cannot translate position in changed area");
-  };
+  }
 
   PosTranslator.ShiftWithTopInsideChunkHandler =
       function(pos, diff_chunk) {
     // We carelessly do not check whether we stay inside the chunk after
     // translation.
     return pos - diff_chunk.pos1 + diff_chunk.pos2;
-  };
+  }
 
   var FunctionStatus = {
       // No change to function or its inner functions; however its positions
@@ -518,7 +517,7 @@
       CHANGED: "changed",
       // Function is changed but cannot be patched.
       DAMAGED: "damaged"
-  };
+  }
 
   function CodeInfoTreeNode(code_info, children, array_index) {
     this.info = code_info;
@@ -581,19 +580,19 @@
   // children of unchanged functions are ignored.
   function MarkChangedFunctions(code_info_tree, chunks) {
 
-    // A convenient iterator over diff chunks that also translates
+    // A convenient interator over diff chunks that also translates
     // positions from old to new in a current non-changed part of script.
     var chunk_it = new function() {
       var chunk_index = 0;
       var pos_diff = 0;
-      this.current = function() { return chunks[chunk_index]; };
+      this.current = function() { return chunks[chunk_index]; }
       this.next = function() {
         var chunk = chunks[chunk_index];
         pos_diff = chunk.pos2 + chunk.len2 - (chunk.pos1 + chunk.len1);
         chunk_index++;
-      };
-      this.done = function() { return chunk_index >= chunks.length; };
-      this.TranslatePos = function(pos) { return pos + pos_diff; };
+      }
+      this.done = function() { return chunk_index >= chunks.length; }
+      this.TranslatePos = function(pos) { return pos + pos_diff; }
     };
 
     // A recursive function that processes internals of a function and all its
@@ -947,16 +946,16 @@
       BLOCKED_ON_OTHER_STACK: 3,
       BLOCKED_UNDER_NATIVE_CODE: 4,
       REPLACED_ON_ACTIVE_STACK: 5
-  };
+  }
 
   FunctionPatchabilityStatus.SymbolName = function(code) {
-    var enumeration = FunctionPatchabilityStatus;
-    for (name in enumeration) {
-      if (enumeration[name] == code) {
+    var enum = FunctionPatchabilityStatus;
+    for (name in enum) {
+      if (enum[name] == code) {
         return name;
       }
     }
-  };
+  }
 
 
   // A logical failure in liveedit process. This means that change_log
@@ -969,7 +968,7 @@
 
   Failure.prototype.toString = function() {
     return "LiveEdit Failure: " + this.message;
-  };
+  }
 
   // A testing entry.
   function GetPcFromSourcePos(func, source_pos) {
@@ -1079,5 +1078,5 @@
     PosTranslator: PosTranslator,
     CompareStrings: CompareStrings,
     ApplySingleChunkPatch: ApplySingleChunkPatch
-  };
-};
+  }
+}
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 9c5294a..d44c2fc 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -53,8 +53,8 @@
   // Ignore return value from SetElement. It can only be a failure if there
   // are element setters causing exceptions and the debugger context has none
   // of these.
-  Handle<Object> no_failure =
-      JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
+  Handle<Object> no_failure;
+  no_failure = SetElement(object, index, value, kNonStrictMode);
   ASSERT(!no_failure.is_null());
   USE(no_failure);
 }
@@ -602,8 +602,7 @@
   // Build AST.
   CompilationInfo info(script);
   info.MarkAsGlobal();
-  // Parse and don't allow skipping lazy functions.
-  if (ParserApi::Parse(&info, kNoParsingFlags)) {
+  if (ParserApi::Parse(&info)) {
     // Compile the code.
     LiveEditFunctionTracker tracker(info.isolate(), info.function());
     if (Compiler::MakeCodeForLiveEdit(&info)) {
@@ -798,7 +797,7 @@
     HandleScope scope;
     FunctionInfoWrapper info = FunctionInfoWrapper::Create();
     info.SetInitialProperties(fun->name(), fun->start_position(),
-                              fun->end_position(), fun->parameter_count(),
+                              fun->end_position(), fun->num_parameters(),
                               current_parent_index_);
     current_parent_index_ = len_;
     SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -856,20 +855,38 @@
       return HEAP->undefined_value();
     }
     do {
-      ZoneList<Variable*> stack_list(outer_scope->StackLocalCount());
-      ZoneList<Variable*> context_list(outer_scope->ContextLocalCount());
-      outer_scope->CollectStackAndContextLocals(&stack_list, &context_list);
-      context_list.Sort(&Variable::CompareIndex);
+      ZoneList<Variable*> list(10);
+      outer_scope->CollectUsedVariables(&list);
+      int j = 0;
+      for (int i = 0; i < list.length(); i++) {
+        Variable* var1 = list[i];
+        if (var1->IsContextSlot()) {
+          if (j != i) {
+            list[j] = var1;
+          }
+          j++;
+        }
+      }
 
-      for (int i = 0; i < context_list.length(); i++) {
+      // Sort it.
+      for (int k = 1; k < j; k++) {
+        int l = k;
+        for (int m = k + 1; m < j; m++) {
+          if (list[l]->index() > list[m]->index()) {
+            l = m;
+          }
+        }
+        list[k] = list[l];
+      }
+      for (int i = 0; i < j; i++) {
         SetElementNonStrict(scope_info_list,
                             scope_info_length,
-                            context_list[i]->name());
+                            list[i]->name());
         scope_info_length++;
         SetElementNonStrict(
             scope_info_list,
             scope_info_length,
-            Handle<Smi>(Smi::FromInt(context_list[i]->index())));
+            Handle<Smi>(Smi::FromInt(list[i]->index())));
         scope_info_length++;
       }
       SetElementNonStrict(scope_info_list,
@@ -983,7 +1000,6 @@
 static void ReplaceCodeObject(Code* original, Code* substitution) {
   ASSERT(!HEAP->InNewSpace(substitution));
 
-  HeapIterator iterator;
   AssertNoAllocation no_allocations_please;
 
   // A zone scope for ReferenceCollectorVisitor.
@@ -1000,6 +1016,7 @@
 
   // Now iterate over all pointers of all objects, including code_target
   // implicit pointers.
+  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     obj->Iterate(&visitor);
   }
@@ -1084,14 +1101,12 @@
 
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
-  HEAP->EnsureHeapIsIterable();
-
   if (IsJSFunctionCode(shared_info->code())) {
     Handle<Code> code = compile_info_wrapper.GetFunctionCode();
     ReplaceCodeObject(shared_info->code(), *code);
     Handle<Object> code_scope_info =  compile_info_wrapper.GetCodeScopeInfo();
     if (code_scope_info->IsFixedArray()) {
-      shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
+      shared_info->set_scope_info(SerializedScopeInfo::cast(*code_scope_info));
     }
   }
 
@@ -1228,7 +1243,7 @@
       V8::FatalProcessOutOfMemory("RelocInfoBuffer::GrowBuffer");
     }
 
-    // Set up new buffer.
+    // Setup new buffer.
     byte* new_buffer = NewArray<byte>(new_buffer_size);
 
     // Copy the data.
@@ -1256,8 +1271,7 @@
 
 // Patch positions in code (changes relocation info section) and possibly
 // returns new instance of code.
-static Handle<Code> PatchPositionsInCode(
-    Handle<Code> code,
+static Handle<Code> PatchPositionsInCode(Handle<Code> code,
     Handle<JSArray> position_change_array) {
 
   RelocInfoBuffer buffer_writer(code->relocation_size(),
@@ -1272,7 +1286,7 @@
         int new_position = TranslatePosition(position,
                                              position_change_array);
         if (position != new_position) {
-          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
+          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
           buffer_writer.Write(&info_copy);
           continue;
         }
@@ -1319,8 +1333,6 @@
   info->set_end_position(new_function_end);
   info->set_function_token_position(new_function_token_pos);
 
-  HEAP->EnsureHeapIsIterable();
-
   if (IsJSFunctionCode(info->code())) {
     // Patch relocation info section of the code.
     Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
diff --git a/src/liveobjectlist-inl.h b/src/liveobjectlist-inl.h
index 2bc2296..f742de3 100644
--- a/src/liveobjectlist-inl.h
+++ b/src/liveobjectlist-inl.h
@@ -59,7 +59,7 @@
 }
 
 
-void LiveObjectList::ProcessNonLive(HeapObject* obj) {
+void LiveObjectList::ProcessNonLive(HeapObject *obj) {
   // Only do work if we have at least one list to process.
   if (last()) DoProcessNonLive(obj);
 }
@@ -93,7 +93,7 @@
 template <typename T>
 inline LiveObjectList::Element*
 LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
-  LiveObjectList* lol = last();
+  LiveObjectList *lol = last();
   while (lol != NULL) {
     Element* elements = lol->elements_;
     for (int i = 0; i < lol->obj_count_; i++) {
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 1aabc59..957c051 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -165,7 +165,7 @@
 }
 
 
-bool IsOfType(LiveObjectType type, HeapObject* obj) {
+bool IsOfType(LiveObjectType type, HeapObject *obj) {
   // Note: there are types that are more general (e.g. JSObject) that would
   // have passed the Is##type_() test for more specialized types (e.g.
   // JSFunction).  If we find a more specialized match but we're looking for
@@ -211,7 +211,7 @@
 }
 
 
-static bool InSpace(AllocationSpace space, HeapObject* heap_obj) {
+static bool InSpace(AllocationSpace space, HeapObject *heap_obj) {
   Heap* heap = ISOLATE->heap();
   if (space != LO_SPACE) {
     return heap->InSpace(heap_obj, space);
@@ -462,7 +462,7 @@
   char prev_ch = 0;
   while (*dst != '\0') {
     char ch = *src++;
-    // We will treat non-ASCII chars as '?'.
+    // We will treat non-ascii chars as '?'.
     if ((ch & 0x80) != 0) {
       ch = '?';
     }
@@ -498,7 +498,7 @@
                  length);
 
   } else if (obj->IsString()) {
-    String* str = String::cast(obj);
+    String *str = String::cast(obj);
     // Only grab up to 160 chars in case they are double byte.
     // We'll only dump 80 of them after we compact them.
     const int kMaxCharToDump = 80;
@@ -842,7 +842,7 @@
   bool found_root_;
   bool found_weak_root_;
 
-  LolFilter* filter_;
+  LolFilter *filter_;
 };
 
 
@@ -857,8 +857,8 @@
 // A summary writer for filling in a summary of lol lists and diffs.
 class LolSummaryWriter: public SummaryWriter {
  public:
-  LolSummaryWriter(LiveObjectList* older_lol,
-                   LiveObjectList* newer_lol)
+  LolSummaryWriter(LiveObjectList *older_lol,
+                   LiveObjectList *newer_lol)
       : older_(older_lol), newer_(newer_lol) {
   }
 
@@ -944,7 +944,7 @@
 int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
   int size = 0;
   int count = 0;
-  LiveObjectList* lol = this;
+  LiveObjectList *lol = this;
   do {
     // Only compute total size if requested i.e. when size_p is not null.
     if (size_p != NULL) {
@@ -1085,7 +1085,7 @@
 static int CountHeapObjects() {
   int count = 0;
   // Iterate over all the heap spaces and count the number of objects.
-  HeapIterator iterator;
+  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
   HeapObject* heap_obj = NULL;
   while ((heap_obj = iterator.next()) != NULL) {
     count++;
@@ -1122,7 +1122,7 @@
   // allocation, and we need allocate below.
   {
     // Iterate over all the heap spaces and add the objects.
-    HeapIterator iterator;
+    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
     HeapObject* heap_obj = NULL;
     bool failed = false;
     while (!failed && (heap_obj = iterator.next()) != NULL) {
@@ -1183,7 +1183,7 @@
 // only time we'll actually delete the lol is when we Reset() or if the lol is
 // invisible, and its element count reaches 0.
 bool LiveObjectList::Delete(int id) {
-  LiveObjectList* lol = last();
+  LiveObjectList *lol = last();
   while (lol != NULL) {
     if (lol->id() == id) {
       break;
@@ -1246,8 +1246,8 @@
     newer_id = temp;
   }
 
-  LiveObjectList* newer_lol = FindLolForId(newer_id, last());
-  LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
+  LiveObjectList *newer_lol = FindLolForId(newer_id, last());
+  LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
 
   // If the id is defined, and we can't find a LOL for it, then we have an
   // invalid id.
@@ -1336,9 +1336,7 @@
   // Allocate the JSArray of the elements.
   Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
   if (elements->IsFailure()) return Object::cast(*elements);
-
-  maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
-  if (maybe_result->IsFailure()) return maybe_result;
+  Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
 
   // Set body.elements.
   Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
@@ -1365,8 +1363,8 @@
     newer_id = temp;
   }
 
-  LiveObjectList* newer_lol = FindLolForId(newer_id, last());
-  LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
+  LiveObjectList *newer_lol = FindLolForId(newer_id, last());
+  LiveObjectList *older_lol = FindLolForId(older_id, newer_lol);
 
   // If the id is defined, and we can't find a LOL for it, then we have an
   // invalid id.
@@ -1464,9 +1462,7 @@
   Handle<JSObject> summary_obj =
     factory->NewJSObject(isolate->array_function());
   if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
-
-  maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
-  if (maybe_result->IsFailure()) return maybe_result;
+  Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
 
   // Create the body object.
   Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
@@ -1593,9 +1589,7 @@
 
   // Return the result as a JS array.
   Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
-
-  maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
-  if (maybe_result->IsFailure()) return maybe_result;
+  Handle<JSArray>::cast(lols)->SetContent(*list);
 
   Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
   if (result->IsFailure()) return Object::cast(*result);
@@ -1626,7 +1620,7 @@
 
 // Deletes all captured lols.
 void LiveObjectList::Reset() {
-  LiveObjectList* lol = last();
+  LiveObjectList *lol = last();
   // Just delete the last.  Each lol will delete it's prev automatically.
   delete lol;
 
@@ -1715,8 +1709,8 @@
 
 inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
                                    LolFilter* filter,
-                                   LiveObjectSummary* summary,
-                                   void (*SetRootFound)(LiveObjectSummary* s),
+                                   LiveObjectSummary *summary,
+                                   void (*SetRootFound)(LiveObjectSummary *s),
                                    int start,
                                    int dump_limit,
                                    int* total_count,
@@ -1762,12 +1756,12 @@
 }
 
 
-inline void SetFoundRoot(LiveObjectSummary* summary) {
+inline void SetFoundRoot(LiveObjectSummary *summary) {
   summary->set_found_root();
 }
 
 
-inline void SetFoundWeakRoot(LiveObjectSummary* summary) {
+inline void SetFoundWeakRoot(LiveObjectSummary *summary) {
   summary->set_found_weak_root();
 }
 
@@ -1779,7 +1773,7 @@
                                  int dump_limit,
                                  int* total_count,
                                  LolFilter* filter,
-                                 LiveObjectSummary* summary,
+                                 LiveObjectSummary *summary,
                                  JSFunction* arguments_function,
                                  Handle<Object> error) {
   HandleScope scope;
@@ -2267,7 +2261,7 @@
 }
 
 
-void LiveObjectList::DoProcessNonLive(HeapObject* obj) {
+void LiveObjectList::DoProcessNonLive(HeapObject *obj) {
   // We should only be called if we have at least one lol to search.
   ASSERT(last() != NULL);
   Element* element = last()->Find(obj);
@@ -2284,7 +2278,7 @@
     int count = lol->obj_count_;
     for (int i = 0; i < count; i++) {
       HeapObject** p = &elements[i].obj_;
-      v->VisitPointer(reinterpret_cast<Object** >(p));
+      v->VisitPointer(reinterpret_cast<Object **>(p));
     }
     lol = lol->prev_;
   }
@@ -2389,11 +2383,11 @@
   PurgeDuplicates();
 
   // After the GC, sweep away all free'd Elements and compact.
-  LiveObjectList* prev = NULL;
-  LiveObjectList* next = NULL;
+  LiveObjectList *prev = NULL;
+  LiveObjectList *next = NULL;
 
   // Iterating from the youngest lol to the oldest lol.
-  for (LiveObjectList* lol = last(); lol; lol = prev) {
+  for (LiveObjectList *lol = last(); lol; lol = prev) {
     Element* elements = lol->elements_;
     prev = lol->prev();  // Save the prev.
 
@@ -2446,7 +2440,7 @@
       const int kMaxUnusedSpace = 64;
       if (diff > kMaxUnusedSpace) {  // Threshold for shrinking.
         // Shrink the list.
-        Element* new_elements = NewArray<Element>(new_count);
+        Element *new_elements = NewArray<Element>(new_count);
         memcpy(new_elements, elements, new_count * sizeof(Element));
 
         DeleteArray<Element>(elements);
@@ -2513,7 +2507,7 @@
   OS::Print("  Start verify ...\n");
   OS::Print("  Verifying ...");
   Flush();
-  HeapIterator iterator;
+  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
   HeapObject* heap_obj = NULL;
   while ((heap_obj = iterator.next()) != NULL) {
     number_of_heap_objects++;
@@ -2619,7 +2613,7 @@
     HeapObject* heap_obj = it.Obj();
     if (heap->InFromSpace(heap_obj)) {
       OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
-                i++, heap_obj, Heap::new_space()->FromSpaceStart());
+                i++, heap_obj, heap->new_space()->FromSpaceLow());
     }
   }
 }
diff --git a/src/liveobjectlist.h b/src/liveobjectlist.h
index 1aa9196..65470d7 100644
--- a/src/liveobjectlist.h
+++ b/src/liveobjectlist.h
@@ -77,7 +77,7 @@
   inline static void GCEpilogue();
   inline static void GCPrologue();
   inline static void IterateElements(ObjectVisitor* v);
-  inline static void ProcessNonLive(HeapObject* obj);
+  inline static void ProcessNonLive(HeapObject *obj);
   inline static void UpdateReferencesForScavengeGC();
 
   // Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
@@ -125,7 +125,7 @@
   static void GCEpiloguePrivate();
   static void IterateElementsPrivate(ObjectVisitor* v);
 
-  static void DoProcessNonLive(HeapObject* obj);
+  static void DoProcessNonLive(HeapObject *obj);
 
   static int CompareElement(const Element* a, const Element* b);
 
@@ -138,7 +138,7 @@
                           int dump_limit,
                           int* total_count,
                           LolFilter* filter,
-                          LiveObjectSummary* summary,
+                          LiveObjectSummary *summary,
                           JSFunction* arguments_function,
                           Handle<Object> error);
 
@@ -151,7 +151,7 @@
                                        bool is_tracking_roots);
 
   static bool NeedLOLProcessing() { return (last() != NULL); }
-  static void NullifyNonLivePointer(HeapObject** p) {
+  static void NullifyNonLivePointer(HeapObject **p) {
     // Mask out the low bit that marks this as a heap object.  We'll use this
     // cleared bit as an indicator that this pointer needs to be collected.
     //
@@ -202,7 +202,7 @@
   int id_;
   int capacity_;
   int obj_count_;
-  Element* elements_;
+  Element *elements_;
 
   // Statics for managing all the lists.
   static uint32_t next_element_id_;
diff --git a/src/log.cc b/src/log.cc
index 21d64df..3d66b5f 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -35,7 +35,6 @@
 #include "global-handles.h"
 #include "log.h"
 #include "macro-assembler.h"
-#include "platform.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
 #include "string-stream.h"
@@ -462,20 +461,18 @@
       utf8_pos_ += utf8_length;
       return;
     }
-    int uc16_length = Min(str->length(), kUtf16BufferSize);
-    String::WriteToFlat(str, utf16_buffer, 0, uc16_length);
-    int previous = unibrow::Utf16::kNoPreviousCharacter;
+    int uc16_length = Min(str->length(), kUc16BufferSize);
+    String::WriteToFlat(str, uc16_buffer_, 0, uc16_length);
     for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
-      uc16 c = utf16_buffer[i];
+      uc16 c = uc16_buffer_[i];
       if (c <= String::kMaxAsciiCharCodeU) {
         utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
       } else {
-        int char_length = unibrow::Utf8::Length(c, previous);
+        int char_length = unibrow::Utf8::Length(c);
         if (utf8_pos_ + char_length > kUtf8BufferSize) break;
-        unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c, previous);
+        unibrow::Utf8::Encode(utf8_buffer_ + utf8_pos_, c);
         utf8_pos_ += char_length;
       }
-      previous = c;
     }
   }
 
@@ -507,11 +504,11 @@
 
  private:
   static const int kUtf8BufferSize = 512;
-  static const int kUtf16BufferSize = 128;
+  static const int kUc16BufferSize = 128;
 
   int utf8_pos_;
   char utf8_buffer_[kUtf8BufferSize];
-  uc16 utf16_buffer[kUtf16BufferSize];
+  uc16 uc16_buffer_[kUc16BufferSize];
 };
 
 
@@ -1359,12 +1356,12 @@
 
 static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
                                       Handle<Code>* code_objects) {
-  HeapIterator iterator;
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
 
   // Iterate the heap to find shared function info objects and record
   // the unoptimized code for them.
+  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsSharedFunctionInfo()) continue;
     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
@@ -1453,8 +1450,6 @@
   const char arch[] = "x64";
 #elif V8_TARGET_ARCH_ARM
   const char arch[] = "arm";
-#elif V8_TARGET_ARCH_MIPS
-  const char arch[] = "mips";
 #else
   const char arch[] = "unknown";
 #endif
@@ -1524,10 +1519,8 @@
 
 
 void Logger::LogCodeObjects() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                          "Logger::LogCodeObjects");
-  HeapIterator iterator;
   AssertNoAllocation no_alloc;
+  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsCode()) LogCodeObject(obj);
   }
@@ -1580,8 +1573,6 @@
 
 
 void Logger::LogCompiledFunctions() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                          "Logger::LogCompiledFunctions");
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1600,10 +1591,9 @@
 
 
 void Logger::LogAccessorCallbacks() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                          "Logger::LogAccessorCallbacks");
-  HeapIterator iterator;
   AssertNoAllocation no_alloc;
+  HeapIterator iterator;
+  i::Isolate* isolate = ISOLATE;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsAccessorInfo()) continue;
     AccessorInfo* ai = AccessorInfo::cast(obj);
@@ -1611,17 +1601,17 @@
     String* name = String::cast(ai->name());
     Address getter_entry = v8::ToCData<Address>(ai->getter());
     if (getter_entry != 0) {
-      PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
+      PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
     }
     Address setter_entry = v8::ToCData<Address>(ai->setter());
     if (setter_entry != 0) {
-      PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
+      PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
     }
   }
 }
 
 
-bool Logger::SetUp() {
+bool Logger::Setup() {
   // Tests and EnsureInitialize() can call this twice in a row. It's harmless.
   if (is_initialized_) return true;
   is_initialized_ = true;
@@ -1714,9 +1704,9 @@
 
 
 void Logger::EnableSlidingStateWindow() {
-  // If the ticker is NULL, Logger::SetUp has not been called yet.  In
+  // If the ticker is NULL, Logger::Setup has not been called yet.  In
   // that case, we set the sliding_state_window flag so that the
-  // sliding window computation will be started when Logger::SetUp is
+  // sliding window computation will be started when Logger::Setup is
   // called.
   if (ticker_ == NULL) {
     FLAG_sliding_state_window = true;
@@ -1729,14 +1719,13 @@
   }
 }
 
-// Protects the state below.
-static LazyMutex active_samplers_mutex = LAZY_MUTEX_INITIALIZER;
 
+Mutex* SamplerRegistry::mutex_ = OS::CreateMutex();
 List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
 
 
 bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
-  ScopedLock lock(active_samplers_mutex.Pointer());
+  ScopedLock lock(mutex_);
   for (int i = 0;
        ActiveSamplersExist() && i < active_samplers_->length();
        ++i) {
@@ -1763,7 +1752,7 @@
 
 void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
   ASSERT(sampler->IsActive());
-  ScopedLock lock(active_samplers_mutex.Pointer());
+  ScopedLock lock(mutex_);
   if (active_samplers_ == NULL) {
     active_samplers_ = new List<Sampler*>;
   } else {
@@ -1775,7 +1764,7 @@
 
 void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
   ASSERT(sampler->IsActive());
-  ScopedLock lock(active_samplers_mutex.Pointer());
+  ScopedLock lock(mutex_);
   ASSERT(active_samplers_ != NULL);
   bool removed = active_samplers_->RemoveElement(sampler);
   ASSERT(removed);
diff --git a/src/log.h b/src/log.h
index 1297387..50358ce 100644
--- a/src/log.h
+++ b/src/log.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,6 @@
 #define V8_LOG_H_
 
 #include "allocation.h"
-#include "objects.h"
 #include "platform.h"
 #include "log-utils.h"
 
@@ -71,6 +70,7 @@
 // tick profiler requires code events, so --prof implies --log-code.
 
 // Forward declarations.
+class HashMap;
 class LogMessageBuilder;
 class Profiler;
 class Semaphore;
@@ -149,14 +149,14 @@
 #undef DECLARE_ENUM
 
   // Acquires resources for logging if the right flags are set.
-  bool SetUp();
+  bool Setup();
 
   void EnsureTickerStarted();
   void EnsureTickerStopped();
 
   Sampler* sampler();
 
-  // Frees resources acquired in SetUp.
+  // Frees resources acquired in Setup.
   // When a temporary file is used for the log, returns its stream descriptor,
   // leaving the file open.
   FILE* TearDown();
@@ -410,7 +410,7 @@
   NameMap* address_to_name_map_;
 
   // Guards against multiple calls to TearDown() that can happen in some tests.
-  // 'true' between SetUp() and TearDown().
+  // 'true' between Setup() and TearDown().
   bool is_initialized_;
 
   // Support for 'incremental addresses' in compressed logs:
@@ -454,6 +454,7 @@
     return active_samplers_ != NULL && !active_samplers_->is_empty();
   }
 
+  static Mutex* mutex_;  // Protects the state below.
   static List<Sampler*>* active_samplers_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 11e2217..30838bd 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,6 +36,27 @@
 };
 
 
+enum CodeLocation {
+  IN_JAVASCRIPT,
+  IN_JS_ENTRY,
+  IN_C_ENTRY
+};
+
+
+enum HandlerType {
+  TRY_CATCH_HANDLER,
+  TRY_FINALLY_HANDLER,
+  JS_ENTRY_HANDLER
+};
+
+
+// Types of uncatchable exceptions.
+enum UncatchableExceptionType {
+  OUT_OF_MEMORY,
+  TERMINATION
+};
+
+
 // Invalid depth in prototype chain.
 const int kInvalidProtoDepth = -1;
 
@@ -72,63 +93,6 @@
 namespace v8 {
 namespace internal {
 
-class FrameScope {
- public:
-  explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
-      : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
-    masm->set_has_frame(true);
-    if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
-      masm->EnterFrame(type);
-    }
-  }
-
-  ~FrameScope() {
-    if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
-      masm_->LeaveFrame(type_);
-    }
-    masm_->set_has_frame(old_has_frame_);
-  }
-
-  // Normally we generate the leave-frame code when this object goes
-  // out of scope.  Sometimes we may need to generate the code somewhere else
-  // in addition.  Calling this will achieve that, but the object stays in
-  // scope, the MacroAssembler is still marked as being in a frame scope, and
-  // the code will be generated again when it goes out of scope.
-  void GenerateLeaveFrame() {
-    masm_->LeaveFrame(type_);
-  }
-
- private:
-  MacroAssembler* masm_;
-  StackFrame::Type type_;
-  bool old_has_frame_;
-};
-
-
-class AllowExternalCallThatCantCauseGC: public FrameScope {
- public:
-  explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
-      : FrameScope(masm, StackFrame::NONE) { }
-};
-
-
-class NoCurrentFrameScope {
- public:
-  explicit NoCurrentFrameScope(MacroAssembler* masm)
-      : masm_(masm), saved_(masm->has_frame()) {
-    masm->set_has_frame(false);
-  }
-
-  ~NoCurrentFrameScope() {
-    masm_->set_has_frame(saved_);
-  }
-
- private:
-  MacroAssembler* masm_;
-  bool saved_;
-};
-
-
 // Support for "structured" code comments.
 #ifdef DEBUG
 
diff --git a/src/macros.py b/src/macros.py
index 93287ae..7a493ca 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -26,7 +26,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 # Dictionary that is passed as defines for js2c.py.
-# Used for defines that must be defined for all native JS files.
+# Used for defines that must be defined for all native js files.
 
 const NONE        = 0;
 const READ_ONLY   = 1;
@@ -82,6 +82,8 @@
 const kMaxYear  = 1000000;
 const kMinMonth = -10000000;
 const kMaxMonth = 10000000;
+const kMinDate  = -100000000;
+const kMaxDate  = 100000000;
 
 # Native cache ids.
 const STRING_TO_REGEXP_CACHE_ID = 0;
@@ -101,9 +103,6 @@
 macro IS_ARRAY(arg)             = (%_IsArray(arg));
 macro IS_FUNCTION(arg)          = (%_IsFunction(arg));
 macro IS_REGEXP(arg)            = (%_IsRegExp(arg));
-macro IS_SET(arg)               = (%_ClassOf(arg) === 'Set');
-macro IS_MAP(arg)               = (%_ClassOf(arg) === 'Map');
-macro IS_WEAKMAP(arg)           = (%_ClassOf(arg) === 'WeakMap');
 macro IS_DATE(arg)              = (%_ClassOf(arg) === 'Date');
 macro IS_NUMBER_WRAPPER(arg)    = (%_ClassOf(arg) === 'Number');
 macro IS_STRING_WRAPPER(arg)    = (%_ClassOf(arg) === 'String');
@@ -129,11 +128,6 @@
 # we cannot handle those anyway.
 macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
 
-# Indices in bound function info retrieved by %BoundFunctionGetBindings(...).
-const kBoundFunctionIndex = 0;
-const kBoundThisIndex = 1;
-const kBoundArgumentsStartIndex = 2;
-
 # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
 macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
 macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
@@ -164,36 +158,16 @@
 
 # Gets the value of a Date object. If arg is not a Date object
 # a type error is thrown.
-macro CHECK_DATE(arg) = if (%_ClassOf(arg) !== 'Date') ThrowDateTypeError();
-macro LOCAL_DATE_VALUE(arg) = (%_DateField(arg, 0) + %_DateField(arg, 21));
-macro UTC_DATE_VALUE(arg)    = (%_DateField(arg, 0));
-
-macro LOCAL_YEAR(arg)        = (%_DateField(arg, 1));
-macro LOCAL_MONTH(arg)       = (%_DateField(arg, 2));
-macro LOCAL_DAY(arg)         = (%_DateField(arg, 3));
-macro LOCAL_WEEKDAY(arg)     = (%_DateField(arg, 4));
-macro LOCAL_HOUR(arg)        = (%_DateField(arg, 5));
-macro LOCAL_MIN(arg)         = (%_DateField(arg, 6));
-macro LOCAL_SEC(arg)         = (%_DateField(arg, 7));
-macro LOCAL_MS(arg)          = (%_DateField(arg, 8));
-macro LOCAL_DAYS(arg)        = (%_DateField(arg, 9));
-macro LOCAL_TIME_IN_DAY(arg) = (%_DateField(arg, 10));
-
-macro UTC_YEAR(arg)        = (%_DateField(arg, 11));
-macro UTC_MONTH(arg)       = (%_DateField(arg, 12));
-macro UTC_DAY(arg)         = (%_DateField(arg, 13));
-macro UTC_WEEKDAY(arg)     = (%_DateField(arg, 14));
-macro UTC_HOUR(arg)        = (%_DateField(arg, 15));
-macro UTC_MIN(arg)         = (%_DateField(arg, 16));
-macro UTC_SEC(arg)         = (%_DateField(arg, 17));
-macro UTC_MS(arg)          = (%_DateField(arg, 18));
-macro UTC_DAYS(arg)        = (%_DateField(arg, 19));
-macro UTC_TIME_IN_DAY(arg) = (%_DateField(arg, 20));
-
-macro TIMEZONE_OFFSET(arg)   = (%_DateField(arg, 21));
-
-macro SET_UTC_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 1));
-macro SET_LOCAL_DATE_VALUE(arg, value) = (%DateSetValue(arg, value, 0));
+macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
+macro DAY(time) = ($floor(time / 86400000));
+macro NAN_OR_DATE_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : DateFromTime(time));
+macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
+macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
+macro NAN_OR_MIN_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MIN_FROM_TIME(time));
+macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
+macro NAN_OR_SEC_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : SEC_FROM_TIME(time));
+macro MS_FROM_TIME(time) = (Modulo(time, 1000));
+macro NAN_OR_MS_FROM_TIME(time) = (NUMBER_IS_NAN(time) ? time : MS_FROM_TIME(time));
 
 # Last input and last subject of regexp matches.
 macro LAST_SUBJECT(array) = ((array)[1]);
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
deleted file mode 100644
index 43f6b89..0000000
--- a/src/mark-compact-inl.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MARK_COMPACT_INL_H_
-#define V8_MARK_COMPACT_INL_H_
-
-#include "isolate.h"
-#include "memory.h"
-#include "mark-compact.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-MarkBit Marking::MarkBitFrom(Address addr) {
-  MemoryChunk* p = MemoryChunk::FromAddress(addr);
-  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
-                                         p->ContainsOnlyData());
-}
-
-
-void MarkCompactCollector::SetFlags(int flags) {
-  sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
-  reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
-  abort_incremental_marking_ =
-      ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
-}
-
-
-void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
-  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
-  if (!mark_bit.Get()) {
-    mark_bit.Set();
-    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-    ProcessNewlyMarkedObject(obj);
-  }
-}
-
-
-bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
-  MarkBit mark = Marking::MarkBitFrom(object);
-  bool old_mark = mark.Get();
-  if (!old_mark) SetMark(object, mark);
-  return old_mark;
-}
-
-
-void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
-  if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
-}
-
-
-void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
-  ASSERT(!mark_bit.Get());
-  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
-  mark_bit.Set();
-  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-  if (obj->IsMap()) {
-    heap_->ClearCacheOnMap(Map::cast(obj));
-  }
-}
-
-
-bool MarkCompactCollector::IsMarked(Object* obj) {
-  ASSERT(obj->IsHeapObject());
-  HeapObject* heap_object = HeapObject::cast(obj);
-  return Marking::MarkBitFrom(heap_object).Get();
-}
-
-
-void MarkCompactCollector::RecordSlot(Object** anchor_slot,
-                                      Object** slot,
-                                      Object* object) {
-  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
-  if (object_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            object_page->slots_buffer_address(),
-                            slot,
-                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
-      EvictEvacuationCandidate(object_page);
-    }
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_MARK_COMPACT_INL_H_
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index dde172d..9b0d5fc 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,31 +27,20 @@
 
 #include "v8.h"
 
-#include "code-stubs.h"
 #include "compilation-cache.h"
-#include "deoptimizer.h"
 #include "execution.h"
+#include "heap-profiler.h"
 #include "gdb-jit.h"
 #include "global-handles.h"
-#include "heap-profiler.h"
 #include "ic-inl.h"
-#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "objects-visiting.h"
-#include "objects-visiting-inl.h"
 #include "stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-
-const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
-const char* Marking::kImpossibleBitPattern = "01";
-
-
 // -------------------------------------------------------------------------
 // MarkCompactCollector
 
@@ -59,616 +48,70 @@
 #ifdef DEBUG
       state_(IDLE),
 #endif
-      sweep_precisely_(false),
-      reduce_memory_footprint_(false),
-      abort_incremental_marking_(false),
-      compacting_(false),
-      was_marked_incrementally_(false),
-      collect_maps_(FLAG_collect_maps),
-      flush_monomorphic_ics_(false),
+      force_compaction_(false),
+      compacting_collection_(false),
+      compact_on_next_gc_(false),
+      previous_marked_count_(0),
       tracer_(NULL),
-      migration_slots_buffer_(NULL),
+#ifdef DEBUG
+      live_young_objects_size_(0),
+      live_old_pointer_objects_size_(0),
+      live_old_data_objects_size_(0),
+      live_code_objects_size_(0),
+      live_map_objects_size_(0),
+      live_cell_objects_size_(0),
+      live_lo_objects_size_(0),
+      live_bytes_(0),
+#endif
       heap_(NULL),
       code_flusher_(NULL),
       encountered_weak_maps_(NULL) { }
 
 
-#ifdef DEBUG
-class VerifyMarkingVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
-      }
-    }
-  }
-};
-
-
-static void VerifyMarking(Address bottom, Address top) {
-  VerifyMarkingVisitor visitor;
-  HeapObject* object;
-  Address next_object_must_be_here_or_later = bottom;
-
-  for (Address current = bottom;
-       current < top;
-       current += kPointerSize) {
-    object = HeapObject::FromAddress(current);
-    if (MarkCompactCollector::IsMarked(object)) {
-      ASSERT(current >= next_object_must_be_here_or_later);
-      object->Iterate(&visitor);
-      next_object_must_be_here_or_later = current + object->Size();
-    }
-  }
-}
-
-
-static void VerifyMarking(NewSpace* space) {
-  Address end = space->top();
-  NewSpacePageIterator it(space->bottom(), end);
-  // The bottom position is at the start of its page. Allows us to use
-  // page->area_start() as start of range on all pages.
-  ASSERT_EQ(space->bottom(),
-            NewSpacePage::FromAddress(space->bottom())->area_start());
-  while (it.has_next()) {
-    NewSpacePage* page = it.next();
-    Address limit = it.has_next() ? page->area_end() : end;
-    ASSERT(limit == end || !page->Contains(end));
-    VerifyMarking(page->area_start(), limit);
-  }
-}
-
-
-static void VerifyMarking(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    VerifyMarking(p->area_start(), p->area_end());
-  }
-}
-
-
-static void VerifyMarking(Heap* heap) {
-  VerifyMarking(heap->old_pointer_space());
-  VerifyMarking(heap->old_data_space());
-  VerifyMarking(heap->code_space());
-  VerifyMarking(heap->cell_space());
-  VerifyMarking(heap->map_space());
-  VerifyMarking(heap->new_space());
-
-  VerifyMarkingVisitor visitor;
-
-  LargeObjectIterator it(heap->lo_space());
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    if (MarkCompactCollector::IsMarked(obj)) {
-      obj->Iterate(&visitor);
-    }
-  }
-
-  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-}
-
-
-class VerifyEvacuationVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
-      }
-    }
-  }
-};
-
-
-static void VerifyEvacuation(Address bottom, Address top) {
-  VerifyEvacuationVisitor visitor;
-  HeapObject* object;
-  Address next_object_must_be_here_or_later = bottom;
-
-  for (Address current = bottom;
-       current < top;
-       current += kPointerSize) {
-    object = HeapObject::FromAddress(current);
-    if (MarkCompactCollector::IsMarked(object)) {
-      ASSERT(current >= next_object_must_be_here_or_later);
-      object->Iterate(&visitor);
-      next_object_must_be_here_or_later = current + object->Size();
-    }
-  }
-}
-
-
-static void VerifyEvacuation(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
-  VerifyEvacuationVisitor visitor;
-
-  while (it.has_next()) {
-    NewSpacePage* page = it.next();
-    Address current = page->area_start();
-    Address limit = it.has_next() ? page->area_end() : space->top();
-    ASSERT(limit == space->top() || !page->Contains(space->top()));
-    while (current < limit) {
-      HeapObject* object = HeapObject::FromAddress(current);
-      object->Iterate(&visitor);
-      current += object->Size();
-    }
-  }
-}
-
-
-static void VerifyEvacuation(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    if (p->IsEvacuationCandidate()) continue;
-    VerifyEvacuation(p->area_start(), p->area_end());
-  }
-}
-
-
-static void VerifyEvacuation(Heap* heap) {
-  VerifyEvacuation(heap->old_pointer_space());
-  VerifyEvacuation(heap->old_data_space());
-  VerifyEvacuation(heap->code_space());
-  VerifyEvacuation(heap->cell_space());
-  VerifyEvacuation(heap->map_space());
-  VerifyEvacuation(heap->new_space());
-
-  VerifyEvacuationVisitor visitor;
-  heap->IterateStrongRoots(&visitor, VISIT_ALL);
-}
-#endif
-
-
-void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
-  p->MarkEvacuationCandidate();
-  evacuation_candidates_.Add(p);
-}
-
-
-static void TraceFragmentation(PagedSpace* space) {
-  int number_of_pages = space->CountTotalPages();
-  intptr_t reserved = (number_of_pages * space->AreaSize());
-  intptr_t free = reserved - space->SizeOfObjects();
-  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
-         AllocationSpaceName(space->identity()),
-         number_of_pages,
-         static_cast<int>(free),
-         static_cast<double>(free) * 100 / reserved);
-}
-
-
-bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
-  if (!compacting_) {
-    ASSERT(evacuation_candidates_.length() == 0);
-
-    CollectEvacuationCandidates(heap()->old_pointer_space());
-    CollectEvacuationCandidates(heap()->old_data_space());
-
-    if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
-      CollectEvacuationCandidates(heap()->code_space());
-    } else if (FLAG_trace_fragmentation) {
-      TraceFragmentation(heap()->code_space());
-    }
-
-    if (FLAG_trace_fragmentation) {
-      TraceFragmentation(heap()->map_space());
-      TraceFragmentation(heap()->cell_space());
-    }
-
-    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
-    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
-    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
-
-    compacting_ = evacuation_candidates_.length() > 0;
-  }
-
-  return compacting_;
-}
-
-
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
   ASSERT(state_ == PREPARE_GC);
   ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
 
-  MarkLiveObjects();
-  ASSERT(heap_->incremental_marking()->IsStopped());
+  // Prepare has selected whether to compact the old generation or not.
+  // Tell the tracer.
+  if (IsCompacting()) tracer_->set_is_compacting();
 
-  if (collect_maps_) ClearNonLiveTransitions();
+  MarkLiveObjects();
+
+  if (FLAG_collect_maps) ClearNonLiveTransitions();
 
   ClearWeakMaps();
 
-#ifdef DEBUG
-  if (FLAG_verify_heap) {
-    VerifyMarking(heap_);
+  SweepLargeObjectSpace();
+
+  if (IsCompacting()) {
+    GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
+    EncodeForwardingAddresses();
+
+    heap()->MarkMapPointersAsEncoded(true);
+    UpdatePointers();
+    heap()->MarkMapPointersAsEncoded(false);
+    heap()->isolate()->pc_to_code_cache()->Flush();
+
+    RelocateObjects();
+  } else {
+    SweepSpaces();
+    heap()->isolate()->pc_to_code_cache()->Flush();
   }
-#endif
-
-  SweepSpaces();
-
-  if (!collect_maps_) ReattachInitialMaps();
-
-  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
 
   Finish();
 
+  // Save the count of marked objects remaining after the collection and
+  // null out the GC tracer.
+  previous_marked_count_ = tracer_->marked_count();
+  ASSERT(previous_marked_count_ == 0);
   tracer_ = NULL;
 }
 
 
-#ifdef DEBUG
-void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    CHECK(p->markbits()->IsClean());
-    CHECK_EQ(0, p->LiveBytes());
-  }
-}
-
-void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
-
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    CHECK(p->markbits()->IsClean());
-    CHECK_EQ(0, p->LiveBytes());
-  }
-}
-
-void MarkCompactCollector::VerifyMarkbitsAreClean() {
-  VerifyMarkbitsAreClean(heap_->old_pointer_space());
-  VerifyMarkbitsAreClean(heap_->old_data_space());
-  VerifyMarkbitsAreClean(heap_->code_space());
-  VerifyMarkbitsAreClean(heap_->cell_space());
-  VerifyMarkbitsAreClean(heap_->map_space());
-  VerifyMarkbitsAreClean(heap_->new_space());
-
-  LargeObjectIterator it(heap_->lo_space());
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(obj);
-    ASSERT(Marking::IsWhite(mark_bit));
-  }
-}
-#endif
-
-
-static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
-}
-
-
-static void ClearMarkbitsInNewSpace(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
-}
-
-
-void MarkCompactCollector::ClearMarkbits() {
-  ClearMarkbitsInPagedSpace(heap_->code_space());
-  ClearMarkbitsInPagedSpace(heap_->map_space());
-  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
-  ClearMarkbitsInPagedSpace(heap_->old_data_space());
-  ClearMarkbitsInPagedSpace(heap_->cell_space());
-  ClearMarkbitsInNewSpace(heap_->new_space());
-
-  LargeObjectIterator it(heap_->lo_space());
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(obj);
-    mark_bit.Clear();
-    mark_bit.Next().Clear();
-  }
-}
-
-
-bool Marking::TransferMark(Address old_start, Address new_start) {
-  // This is only used when resizing an object.
-  ASSERT(MemoryChunk::FromAddress(old_start) ==
-         MemoryChunk::FromAddress(new_start));
-
-  // If the mark doesn't move, we don't check the color of the object.
-  // It doesn't matter whether the object is black, since it hasn't changed
-  // size, so the adjustment to the live data count will be zero anyway.
-  if (old_start == new_start) return false;
-
-  MarkBit new_mark_bit = MarkBitFrom(new_start);
-  MarkBit old_mark_bit = MarkBitFrom(old_start);
-
-#ifdef DEBUG
-  ObjectColor old_color = Color(old_mark_bit);
-#endif
-
-  if (Marking::IsBlack(old_mark_bit)) {
-    old_mark_bit.Clear();
-    ASSERT(IsWhite(old_mark_bit));
-    Marking::MarkBlack(new_mark_bit);
-    return true;
-  } else if (Marking::IsGrey(old_mark_bit)) {
-    ASSERT(heap_->incremental_marking()->IsMarking());
-    old_mark_bit.Clear();
-    old_mark_bit.Next().Clear();
-    ASSERT(IsWhite(old_mark_bit));
-    heap_->incremental_marking()->WhiteToGreyAndPush(
-        HeapObject::FromAddress(new_start), new_mark_bit);
-    heap_->incremental_marking()->RestartIfNotMarking();
-  }
-
-#ifdef DEBUG
-  ObjectColor new_color = Color(new_mark_bit);
-  ASSERT(new_color == old_color);
-#endif
-
-  return false;
-}
-
-
-const char* AllocationSpaceName(AllocationSpace space) {
-  switch (space) {
-    case NEW_SPACE: return "NEW_SPACE";
-    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
-    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
-    case CODE_SPACE: return "CODE_SPACE";
-    case MAP_SPACE: return "MAP_SPACE";
-    case CELL_SPACE: return "CELL_SPACE";
-    case LO_SPACE: return "LO_SPACE";
-    default:
-      UNREACHABLE();
-  }
-
-  return NULL;
-}
-
-
-// Returns zero for pages that have so little fragmentation that it is not
-// worth defragmenting them.  Otherwise a positive integer that gives an
-// estimate of fragmentation on an arbitrary scale.
-static int FreeListFragmentation(PagedSpace* space, Page* p) {
-  // If page was not swept then there are no free list items on it.
-  if (!p->WasSwept()) {
-    if (FLAG_trace_fragmentation) {
-      PrintF("%p [%s]: %d bytes live (unswept)\n",
-             reinterpret_cast<void*>(p),
-             AllocationSpaceName(space->identity()),
-             p->LiveBytes());
-    }
-    return 0;
-  }
-
-  FreeList::SizeStats sizes;
-  space->CountFreeListItems(p, &sizes);
-
-  intptr_t ratio;
-  intptr_t ratio_threshold;
-  intptr_t area_size = space->AreaSize();
-  if (space->identity() == CODE_SPACE) {
-    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
-        area_size;
-    ratio_threshold = 10;
-  } else {
-    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
-        area_size;
-    ratio_threshold = 15;
-  }
-
-  if (FLAG_trace_fragmentation) {
-    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
-           reinterpret_cast<void*>(p),
-           AllocationSpaceName(space->identity()),
-           static_cast<int>(sizes.small_size_),
-           static_cast<double>(sizes.small_size_ * 100) /
-           area_size,
-           static_cast<int>(sizes.medium_size_),
-           static_cast<double>(sizes.medium_size_ * 100) /
-           area_size,
-           static_cast<int>(sizes.large_size_),
-           static_cast<double>(sizes.large_size_ * 100) /
-           area_size,
-           static_cast<int>(sizes.huge_size_),
-           static_cast<double>(sizes.huge_size_ * 100) /
-           area_size,
-           (ratio > ratio_threshold) ? "[fragmented]" : "");
-  }
-
-  if (FLAG_always_compact && sizes.Total() != area_size) {
-    return 1;
-  }
-
-  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
-
-  return static_cast<int>(ratio - ratio_threshold);
-}
-
-
-void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
-  ASSERT(space->identity() == OLD_POINTER_SPACE ||
-         space->identity() == OLD_DATA_SPACE ||
-         space->identity() == CODE_SPACE);
-
-  int number_of_pages = space->CountTotalPages();
-
-  const int kMaxMaxEvacuationCandidates = 1000;
-  int max_evacuation_candidates = Min(
-    kMaxMaxEvacuationCandidates,
-    static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
-
-  if (FLAG_stress_compaction || FLAG_always_compact) {
-    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
-  }
-
-  class Candidate {
-   public:
-    Candidate() : fragmentation_(0), page_(NULL) { }
-    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
-
-    int fragmentation() { return fragmentation_; }
-    Page* page() { return page_; }
-
-   private:
-    int fragmentation_;
-    Page* page_;
-  };
-
-  enum CompactionMode {
-    COMPACT_FREE_LISTS,
-    REDUCE_MEMORY_FOOTPRINT
-  };
-
-  CompactionMode mode = COMPACT_FREE_LISTS;
-
-  intptr_t reserved = number_of_pages * space->AreaSize();
-  intptr_t over_reserved = reserved - space->SizeOfObjects();
-  static const intptr_t kFreenessThreshold = 50;
-
-  if (over_reserved >= 2 * space->AreaSize() &&
-      reduce_memory_footprint_) {
-    mode = REDUCE_MEMORY_FOOTPRINT;
-
-    // We expect that empty pages are easier to compact so slightly bump the
-    // limit.
-    max_evacuation_candidates += 2;
-
-    if (FLAG_trace_fragmentation) {
-      PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
-             static_cast<double>(over_reserved) / MB,
-             static_cast<int>(kFreenessThreshold));
-    }
-  }
-
-  intptr_t estimated_release = 0;
-
-  Candidate candidates[kMaxMaxEvacuationCandidates];
-
-  int count = 0;
-  int fragmentation = 0;
-  Candidate* least = NULL;
-
-  PageIterator it(space);
-  if (it.has_next()) it.next();  // Never compact the first page.
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    p->ClearEvacuationCandidate();
-
-    if (FLAG_stress_compaction) {
-      int counter = space->heap()->ms_count();
-      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
-      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
-    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
-      // Don't try to release too many pages.
-      if (estimated_release >= ((over_reserved * 3) / 4)) {
-        continue;
-      }
-
-      intptr_t free_bytes = 0;
-
-      if (!p->WasSwept()) {
-        free_bytes = (p->area_size() - p->LiveBytes());
-      } else {
-        FreeList::SizeStats sizes;
-        space->CountFreeListItems(p, &sizes);
-        free_bytes = sizes.Total();
-      }
-
-      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
-
-      if (free_pct >= kFreenessThreshold) {
-        estimated_release += 2 * p->area_size() - free_bytes;
-        fragmentation = free_pct;
-      } else {
-        fragmentation = 0;
-      }
-
-      if (FLAG_trace_fragmentation) {
-        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
-               reinterpret_cast<void*>(p),
-               AllocationSpaceName(space->identity()),
-               static_cast<int>(free_bytes),
-               static_cast<double>(free_bytes * 100) / p->area_size(),
-               (fragmentation > 0) ? "[fragmented]" : "");
-      }
-    } else {
-      fragmentation = FreeListFragmentation(space, p);
-    }
-
-    if (fragmentation != 0) {
-      if (count < max_evacuation_candidates) {
-        candidates[count++] = Candidate(fragmentation, p);
-      } else {
-        if (least == NULL) {
-          for (int i = 0; i < max_evacuation_candidates; i++) {
-            if (least == NULL ||
-                candidates[i].fragmentation() < least->fragmentation()) {
-              least = candidates + i;
-            }
-          }
-        }
-        if (least->fragmentation() < fragmentation) {
-          *least = Candidate(fragmentation, p);
-          least = NULL;
-        }
-      }
-    }
-  }
-
-  for (int i = 0; i < count; i++) {
-    AddEvacuationCandidate(candidates[i].page());
-  }
-
-  if (count > 0 && FLAG_trace_fragmentation) {
-    PrintF("Collected %d evacuation candidates for space %s\n",
-           count,
-           AllocationSpaceName(space->identity()));
-  }
-}
-
-
-void MarkCompactCollector::AbortCompaction() {
-  if (compacting_) {
-    int npages = evacuation_candidates_.length();
-    for (int i = 0; i < npages; i++) {
-      Page* p = evacuation_candidates_[i];
-      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
-      p->ClearEvacuationCandidate();
-      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-    }
-    compacting_ = false;
-    evacuation_candidates_.Rewind(0);
-    invalidated_code_.Rewind(0);
-  }
-  ASSERT_EQ(0, evacuation_candidates_.length());
-}
-
-
 void MarkCompactCollector::Prepare(GCTracer* tracer) {
-  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
-
-  // Disable collection of maps if incremental marking is enabled.
-  // Map collection algorithm relies on a special map transition tree traversal
-  // order which is not implemented for incremental marking.
-  collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
-
-  // Monomorphic ICs are preserved when possible, but need to be flushed
-  // when they might be keeping a Context alive, or when the heap is about
-  // to be serialized.
-  flush_monomorphic_ics_ =
-      heap()->isolate()->context_exit_happened() || Serializer::enabled();
-
   // Rather than passing the tracer around we stash it in a static member
   // variable.
   tracer_ = tracer;
@@ -677,10 +120,16 @@
   ASSERT(state_ == IDLE);
   state_ = PREPARE_GC;
 #endif
+  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
 
-  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
+  compacting_collection_ =
+      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
+  compact_on_next_gc_ = false;
 
-  if (collect_maps_) CreateBackPointers();
+  if (FLAG_never_compact) compacting_collection_ = false;
+  if (!heap()->map_space()->MapPointersEncodable())
+      compacting_collection_ = false;
+  if (FLAG_collect_maps) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit) {
     // If GDBJIT interface is active disable compaction.
@@ -688,31 +137,21 @@
   }
 #endif
 
-  // Clear marking bits if incremental marking is aborted.
-  if (was_marked_incrementally_ && abort_incremental_marking_) {
-    heap()->incremental_marking()->Abort();
-    ClearMarkbits();
-    AbortCompaction();
-    was_marked_incrementally_ = false;
-  }
-
-  // Don't start compaction if we are in the middle of incremental
-  // marking cycle. We did not collect any slots.
-  if (!FLAG_never_compact && !was_marked_incrementally_) {
-    StartCompaction(NON_INCREMENTAL_COMPACTION);
-  }
-
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->PrepareForMarkCompact();
+       space != NULL; space = spaces.next()) {
+    space->PrepareForMarkCompact(compacting_collection_);
   }
 
 #ifdef DEBUG
-  if (!was_marked_incrementally_ && FLAG_verify_heap) {
-    VerifyMarkbitsAreClean();
-  }
+  live_bytes_ = 0;
+  live_young_objects_size_ = 0;
+  live_old_pointer_objects_size_ = 0;
+  live_old_data_objects_size_ = 0;
+  live_code_objects_size_ = 0;
+  live_map_objects_size_ = 0;
+  live_cell_objects_size_ = 0;
+  live_lo_objects_size_ = 0;
 #endif
 }
 
@@ -729,6 +168,31 @@
   heap()->isolate()->stub_cache()->Clear();
 
   heap()->external_string_table_.CleanUp();
+
+  // If we've just compacted old space there's no reason to check the
+  // fragmentation limit. Just return.
+  if (HasCompacted()) return;
+
+  // We compact the old generation on the next GC if it has gotten too
+  // fragmented (ie, we could recover an expected amount of space by
+  // reclaiming the waste and free list blocks).
+  static const int kFragmentationLimit = 15;        // Percent.
+  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
+  intptr_t old_gen_recoverable = 0;
+  intptr_t old_gen_used = 0;
+
+  OldSpaces spaces;
+  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+    old_gen_recoverable += space->Waste() + space->AvailableFree();
+    old_gen_used += space->Size();
+  }
+
+  int old_gen_fragmentation =
+      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
+  if (old_gen_fragmentation > kFragmentationLimit &&
+      old_gen_recoverable > kFragmentationAllowed) {
+    compact_on_next_gc_ = true;
+  }
 }
 
 
@@ -773,7 +237,8 @@
   }
 
   void AddCandidate(JSFunction* function) {
-    ASSERT(function->code() == function->shared()->code());
+    ASSERT(function->unchecked_code() ==
+           function->unchecked_shared()->unchecked_code());
 
     SetNextCandidate(function, jsfunction_candidates_head_);
     jsfunction_candidates_head_ = function;
@@ -793,26 +258,16 @@
     while (candidate != NULL) {
       next_candidate = GetNextCandidate(candidate);
 
-      SharedFunctionInfo* shared = candidate->shared();
+      SharedFunctionInfo* shared = candidate->unchecked_shared();
 
-      Code* code = shared->code();
-      MarkBit code_mark = Marking::MarkBitFrom(code);
-      if (!code_mark.Get()) {
+      Code* code = shared->unchecked_code();
+      if (!code->IsMarked()) {
         shared->set_code(lazy_compile);
         candidate->set_code(lazy_compile);
       } else {
-        candidate->set_code(shared->code());
+        candidate->set_code(shared->unchecked_code());
       }
 
-      // We are in the middle of a GC cycle so the write barrier in the code
-      // setter did not record the slot update and we have to do that manually.
-      Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
-      Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
-      isolate_->heap()->mark_compact_collector()->
-          RecordCodeEntrySlot(slot, target);
-
-      RecordSharedFunctionInfoCodeSlot(shared);
-
       candidate = next_candidate;
     }
 
@@ -829,27 +284,17 @@
       next_candidate = GetNextCandidate(candidate);
       SetNextCandidate(candidate, NULL);
 
-      Code* code = candidate->code();
-      MarkBit code_mark = Marking::MarkBitFrom(code);
-      if (!code_mark.Get()) {
+      Code* code = candidate->unchecked_code();
+      if (!code->IsMarked()) {
         candidate->set_code(lazy_compile);
       }
 
-      RecordSharedFunctionInfoCodeSlot(candidate);
-
       candidate = next_candidate;
     }
 
     shared_function_info_candidates_head_ = NULL;
   }
 
-  void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
-    Object** slot = HeapObject::RawField(shared,
-                                         SharedFunctionInfo::kCodeOffset);
-    isolate_->heap()->mark_compact_collector()->
-        RecordSlot(slot, slot, HeapObject::cast(*slot));
-  }
-
   static JSFunction** GetNextCandidateField(JSFunction* candidate) {
     return reinterpret_cast<JSFunction**>(
         candidate->address() + JSFunction::kCodeEntryOffset);
@@ -866,19 +311,18 @@
 
   static SharedFunctionInfo** GetNextCandidateField(
       SharedFunctionInfo* candidate) {
-    Code* code = candidate->code();
+    Code* code = candidate->unchecked_code();
     return reinterpret_cast<SharedFunctionInfo**>(
-        code->address() + Code::kGCMetadataOffset);
+        code->address() + Code::kNextCodeFlushingCandidateOffset);
   }
 
   static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
-    return reinterpret_cast<SharedFunctionInfo*>(
-        candidate->code()->gc_metadata());
+    return *GetNextCandidateField(candidate);
   }
 
   static void SetNextCandidate(SharedFunctionInfo* candidate,
                                SharedFunctionInfo* next_candidate) {
-    candidate->code()->set_gc_metadata(next_candidate);
+    *GetNextCandidateField(candidate) = next_candidate;
   }
 
   Isolate* isolate_;
@@ -903,7 +347,7 @@
   // it in place to its left substring.  Return the updated value.
   //
   // Here we assume that if we change *p, we replace it with a heap object
-  // (i.e., the left substring of a cons string is always a heap object).
+  // (ie, the left substring of a cons string is always a heap object).
   //
   // The check performed is:
   //   object->IsConsString() && !object->IsSymbol() &&
@@ -911,14 +355,14 @@
   // except the maps for the object and its possible substrings might be
   // marked.
   HeapObject* object = HeapObject::cast(*p);
-  if (!FLAG_clever_optimizations) return object;
-  Map* map = object->map();
-  InstanceType type = map->instance_type();
+  MapWord map_word = object->map_word();
+  map_word.ClearMark();
+  InstanceType type = map_word.ToMap()->instance_type();
   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
 
   Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
-  Heap* heap = map->GetHeap();
-  if (second != heap->empty_string()) {
+  Heap* heap = map_word.ToMap()->heap();
+  if (second != heap->raw_unchecked_empty_string()) {
     return object;
   }
 
@@ -960,12 +404,14 @@
                                          FixedArray::BodyDescriptor,
                                          void>::Visit);
 
-    table_.Register(kVisitGlobalContext, &VisitGlobalContext);
-
     table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
 
+    table_.Register(kVisitGlobalContext,
+                    &FixedBodyVisitor<StaticMarkingVisitor,
+                                      Context::MarkCompactBodyDescriptor,
+                                      void>::Visit);
+
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
-    table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
 
@@ -1010,7 +456,7 @@
   }
 
   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
+    MarkObjectByPointer(heap, p);
   }
 
   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
@@ -1020,43 +466,29 @@
       if (VisitUnmarkedObjects(heap, start, end)) return;
       // We are close to a stack overflow, so just mark the objects.
     }
-    MarkCompactCollector* collector = heap->mark_compact_collector();
-    for (Object** p = start; p < end; p++) {
-      MarkObjectByPointer(collector, start, p);
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
+  }
+
+  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
+      IC::Clear(rinfo->pc());
+      // Please note targets for cleared inline cached do not have to be
+      // marked since they are contained in HEAP->non_monomorphic_cache().
+    } else {
+      heap->mark_compact_collector()->MarkObject(code);
     }
   }
 
   static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
-    JSGlobalPropertyCell* cell =
-        JSGlobalPropertyCell::cast(rinfo->target_cell());
-    MarkBit mark = Marking::MarkBitFrom(cell);
-    heap->mark_compact_collector()->MarkObject(cell, mark);
-  }
-
-  static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
-    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-    // TODO(mstarzinger): We do not short-circuit cons strings here, verify
-    // that there can be no such embedded pointers and add assertion here.
-    HeapObject* object = HeapObject::cast(rinfo->target_object());
-    heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
-    MarkBit mark = Marking::MarkBitFrom(object);
-    heap->mark_compact_collector()->MarkObject(object, mark);
-  }
-
-  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
-        && (target->ic_state() == MEGAMORPHIC ||
-            heap->mark_compact_collector()->flush_monomorphic_ics_ ||
-            target->ic_age() != heap->global_ic_age())) {
-      IC::Clear(rinfo->pc());
-      target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Object* cell = rinfo->target_cell();
+    Object* old_cell = cell;
+    VisitPointer(heap, &cell);
+    if (cell != old_cell) {
+      rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
     }
-    MarkBit code_mark = Marking::MarkBitFrom(target);
-    heap->mark_compact_collector()->MarkObject(target, code_mark);
-    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
   static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
@@ -1064,21 +496,17 @@
             rinfo->IsPatchedReturnSequence()) ||
            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
             rinfo->IsPatchedDebugBreakSlotSequence()));
-    Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    MarkBit code_mark = Marking::MarkBitFrom(target);
-    heap->mark_compact_collector()->MarkObject(target, code_mark);
-    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    heap->mark_compact_collector()->MarkObject(code);
   }
 
   // Mark object pointed to by p.
-  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
-                                         Object** anchor_slot,
-                                         Object** p)) {
+  INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
     if (!(*p)->IsHeapObject()) return;
     HeapObject* object = ShortCircuitConsString(p);
-    collector->RecordSlot(anchor_slot, p, object);
-    MarkBit mark = Marking::MarkBitFrom(object);
-    collector->MarkObject(object, mark);
+    if (!object->IsMarked()) {
+      heap->mark_compact_collector()->MarkUnmarkedObject(object);
+    }
   }
 
 
@@ -1087,15 +515,12 @@
                                          HeapObject* obj)) {
 #ifdef DEBUG
     ASSERT(Isolate::Current()->heap()->Contains(obj));
-    ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
+    ASSERT(!obj->IsMarked());
 #endif
     Map* map = obj->map();
-    Heap* heap = obj->GetHeap();
-    MarkBit mark = Marking::MarkBitFrom(obj);
-    heap->mark_compact_collector()->SetMark(obj, mark);
+    collector->SetMark(obj);
     // Mark the map pointer and the body.
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    heap->mark_compact_collector()->MarkObject(map, map_mark);
+    if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
     IterateBody(map, obj);
   }
 
@@ -1111,19 +536,15 @@
     MarkCompactCollector* collector = heap->mark_compact_collector();
     // Visit the unmarked objects.
     for (Object** p = start; p < end; p++) {
-      Object* o = *p;
-      if (!o->IsHeapObject()) continue;
-      collector->RecordSlot(start, p, o);
-      HeapObject* obj = HeapObject::cast(o);
-      MarkBit mark = Marking::MarkBitFrom(obj);
-      if (mark.Get()) continue;
+      if (!(*p)->IsHeapObject()) continue;
+      HeapObject* obj = HeapObject::cast(*p);
+      if (obj->IsMarked()) continue;
       VisitUnmarkedObject(collector, obj);
     }
     return true;
   }
 
   static inline void VisitExternalReference(Address* p) { }
-  static inline void VisitExternalReference(RelocInfo* rinfo) { }
   static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
 
  private:
@@ -1146,7 +567,7 @@
                               void> StructObjectVisitor;
 
   static void VisitJSWeakMap(Map* map, HeapObject* object) {
-    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
 
     // Enqueue weak map in linked list of encountered weak maps.
@@ -1157,40 +578,25 @@
     // Skip visiting the backing hash table containing the mappings.
     int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->GetHeap(),
+        map->heap(),
         object,
         JSWeakMap::BodyDescriptor::kStartOffset,
         JSWeakMap::kTableOffset);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->GetHeap(),
+        map->heap(),
         object,
         JSWeakMap::kTableOffset + kPointerSize,
         object_size);
 
     // Mark the backing hash table without pushing it on the marking stack.
-    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
-    ASSERT(!MarkCompactCollector::IsMarked(table));
-    collector->SetMark(table, Marking::MarkBitFrom(table));
-    collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
-    ASSERT(MarkCompactCollector::IsMarked(table->map()));
+    ASSERT(!weak_map->unchecked_table()->IsMarked());
+    ASSERT(weak_map->unchecked_table()->map()->IsMarked());
+    collector->SetMark(weak_map->unchecked_table());
   }
 
   static void VisitCode(Map* map, HeapObject* object) {
-    Heap* heap = map->GetHeap();
-    Code* code = reinterpret_cast<Code*>(object);
-    if (FLAG_cleanup_code_caches_at_gc) {
-      Object* raw_info = code->type_feedback_info();
-      if (raw_info->IsTypeFeedbackInfo()) {
-        TypeFeedbackCells* type_feedback_cells =
-            TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
-        for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
-          ASSERT(type_feedback_cells->AstId(i)->IsSmi());
-          JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
-          cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
-        }
-      }
-    }
-    code->CodeIterateBody<StaticMarkingVisitor>(heap);
+    reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
+        map->heap());
   }
 
   // Code flushing support.
@@ -1202,19 +608,19 @@
   static const int kRegExpCodeThreshold = 5;
 
   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
-    Object* undefined = heap->undefined_value();
+    Object* undefined = heap->raw_unchecked_undefined_value();
     return (info->script() != undefined) &&
         (reinterpret_cast<Script*>(info->script())->source() != undefined);
   }
 
 
   inline static bool IsCompiled(JSFunction* function) {
-    return function->code() !=
+    return function->unchecked_code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
   inline static bool IsCompiled(SharedFunctionInfo* function) {
-    return function->code() !=
+    return function->unchecked_code() !=
         function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
   }
 
@@ -1223,16 +629,13 @@
 
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    MarkBit code_mark = Marking::MarkBitFrom(function->code());
-    if (code_mark.Get()) {
-      if (!Marking::MarkBitFrom(shared_info).Get()) {
-        shared_info->set_code_age(0);
-      }
+    if (function->unchecked_code()->IsMarked()) {
+      shared_info->set_code_age(0);
       return false;
     }
 
     // We do not flush code for optimized functions.
-    if (function->code() != shared_info->code()) {
+    if (function->code() != shared_info->unchecked_code()) {
       return false;
     }
 
@@ -1242,9 +645,8 @@
   inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    MarkBit code_mark =
-        Marking::MarkBitFrom(shared_info->code());
-    if (code_mark.Get()) {
+    if (shared_info->unchecked_code()->IsMarked()) {
+      shared_info->set_code_age(0);
       return false;
     }
 
@@ -1256,7 +658,9 @@
 
     // We never flush code for Api functions.
     Object* function_data = shared_info->function_data();
-    if (function_data->IsFunctionTemplateInfo()) {
+    if (function_data->IsHeapObject() &&
+        (SafeMap(function_data)->instance_type() ==
+         FUNCTION_TEMPLATE_INFO_TYPE)) {
       return false;
     }
 
@@ -1297,9 +701,40 @@
     return true;
   }
 
+
+  static inline Map* SafeMap(Object* obj) {
+    MapWord map_word = HeapObject::cast(obj)->map_word();
+    map_word.ClearMark();
+    map_word.ClearOverflow();
+    return map_word.ToMap();
+  }
+
+
+  static inline bool IsJSBuiltinsObject(Object* obj) {
+    return obj->IsHeapObject() &&
+        (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
+  }
+
+
   static inline bool IsValidNotBuiltinContext(Object* ctx) {
-    return ctx->IsContext() &&
-        !Context::cast(ctx)->global()->IsJSBuiltinsObject();
+    if (!ctx->IsHeapObject()) return false;
+
+    Map* map = SafeMap(ctx);
+    Heap* heap = map->heap();
+    if (!(map == heap->raw_unchecked_function_context_map() ||
+          map == heap->raw_unchecked_catch_context_map() ||
+          map == heap->raw_unchecked_with_context_map() ||
+          map == heap->raw_unchecked_global_context_map())) {
+      return false;
+    }
+
+    Context* context = reinterpret_cast<Context*>(ctx);
+
+    if (IsJSBuiltinsObject(context->global())) {
+      return false;
+    }
+
+    return true;
   }
 
 
@@ -1319,29 +754,17 @@
                                           bool is_ascii) {
     // Make sure that the fixed array is in fact initialized on the RegExp.
     // We could potentially trigger a GC when initializing the RegExp.
-    if (HeapObject::cast(re->data())->map()->instance_type() !=
-            FIXED_ARRAY_TYPE) return;
+    if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
 
     // Make sure this is a RegExp that actually contains code.
     if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
 
     Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
-    if (!code->IsSmi() &&
-        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
+    if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
       // Save a copy that can be reinstated if we need the code again.
       re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
                              code,
                              heap);
-
-      // Saving a copy might create a pointer into compaction candidate
-      // that was not observed by marker.  This might happen if JSRegExp data
-      // was marked through the compilation cache before marker reached JSRegExp
-      // object.
-      FixedArray* data = FixedArray::cast(re->data());
-      Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
-      heap->mark_compact_collector()->
-          RecordSlot(slot, slot, code);
-
       // Set a number in the 0-255 range to guarantee no smi overflow.
       re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
                              Smi::FromInt(heap->sweep_generation() & 0xff),
@@ -1373,14 +796,14 @@
   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
   // we flush the code.
   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->GetHeap();
+    Heap* heap = map->heap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSRegExpFields(map, object);
       return;
     }
     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
-    // Flush code or set age on both ASCII and two byte code.
+    // Flush code or set age on both ascii and two byte code.
     UpdateRegExpCodeAgeAndFlush(heap, re, true);
     UpdateRegExpCodeAgeAndFlush(heap, re, false);
     // Visit the fields of the RegExp, including the updated FixedArray.
@@ -1390,7 +813,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
                                                   HeapObject* object) {
-    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitSharedFunctionInfoGeneric(map, object);
       return;
@@ -1401,7 +824,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCodeGeneric(
       Map* map, HeapObject* object, bool known_flush_code_candidate) {
-    Heap* heap = map->GetHeap();
+    Heap* heap = map->heap();
     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
 
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
@@ -1418,30 +841,18 @@
 
 
   static void VisitCodeEntry(Heap* heap, Address entry_address) {
-    Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
-    MarkBit mark = Marking::MarkBitFrom(code);
-    heap->mark_compact_collector()->MarkObject(code, mark);
-    heap->mark_compact_collector()->
-        RecordCodeEntrySlot(entry_address, code);
-  }
-
-  static void VisitGlobalContext(Map* map, HeapObject* object) {
-    FixedBodyVisitor<StaticMarkingVisitor,
-                     Context::MarkCompactBodyDescriptor,
-                     void>::Visit(map, object);
-
-    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
-    for (int idx = Context::FIRST_WEAK_SLOT;
-         idx < Context::GLOBAL_CONTEXT_SLOTS;
-         ++idx) {
-      Object** slot =
-          HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
-      collector->RecordSlot(slot, slot, *slot);
+    Object* code = Code::GetObjectFromEntryAddress(entry_address);
+    Object* old_code = code;
+    VisitPointer(heap, &code);
+    if (code != old_code) {
+      Memory::Address_at(entry_address) =
+          reinterpret_cast<Code*>(code)->entry();
     }
   }
 
+
   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->GetHeap();
+    Heap* heap = map->heap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSFunction(map, object);
@@ -1456,12 +867,10 @@
     }
 
     if (!flush_code_candidate) {
-      Code* code = jsfunction->shared()->code();
-      MarkBit code_mark = Marking::MarkBitFrom(code);
-      collector->MarkObject(code, code_mark);
+      collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
 
-      if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
-        collector->MarkInlinedFunctionsCode(jsfunction->code());
+      if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        collector->MarkInlinedFunctionsCode(jsfunction->unchecked_code());
       }
     }
 
@@ -1485,11 +894,12 @@
   static inline void VisitJSFunctionFields(Map* map,
                                            JSFunction* object,
                                            bool flush_code_candidate) {
-    Heap* heap = map->GetHeap();
+    Heap* heap = map->heap();
+    MarkCompactCollector* collector = heap->mark_compact_collector();
 
     VisitPointers(heap,
-                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
-                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
+                  SLOT_ADDR(object, JSFunction::kPropertiesOffset),
+                  SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
 
     if (!flush_code_candidate) {
       VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
@@ -1499,39 +909,29 @@
       // Visit shared function info to avoid double checking of it's
       // flushability.
       SharedFunctionInfo* shared_info = object->unchecked_shared();
-      MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
-      if (!shared_info_mark.Get()) {
+      if (!shared_info->IsMarked()) {
         Map* shared_info_map = shared_info->map();
-        MarkBit shared_info_map_mark =
-            Marking::MarkBitFrom(shared_info_map);
-        heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
-        heap->mark_compact_collector()->MarkObject(shared_info_map,
-                                                   shared_info_map_mark);
+        collector->SetMark(shared_info);
+        collector->MarkObject(shared_info_map);
         VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
                                                    shared_info,
                                                    true);
       }
     }
 
-    VisitPointers(
-        heap,
-        HeapObject::RawField(object,
-                             JSFunction::kCodeEntryOffset + kPointerSize),
-        HeapObject::RawField(object,
-                             JSFunction::kNonWeakFieldsEndOffset));
+    VisitPointers(heap,
+                  SLOT_ADDR(object,
+                            JSFunction::kCodeEntryOffset + kPointerSize),
+                  SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
 
     // Don't visit the next function list field as it is a weak reference.
-    Object** next_function =
-        HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
-    heap->mark_compact_collector()->RecordSlot(
-        next_function, next_function, *next_function);
   }
 
   static inline void VisitJSRegExpFields(Map* map,
                                          HeapObject* object) {
     int last_property_offset =
         JSRegExp::kSize + kPointerSize * map->inobject_properties();
-    VisitPointers(map->GetHeap(),
+    VisitPointers(map->heap(),
                   SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
                   SLOT_ADDR(object, last_property_offset));
   }
@@ -1607,10 +1007,8 @@
     Object* obj = *slot;
     if (obj->IsSharedFunctionInfo()) {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
-      MarkBit shared_mark = Marking::MarkBitFrom(shared);
-      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
-      collector_->MarkObject(shared->code(), code_mark);
-      collector_->MarkObject(shared, shared_mark);
+      collector_->MarkObject(shared->unchecked_code());
+      collector_->MarkObject(shared);
     }
   }
 
@@ -1624,17 +1022,16 @@
   // of it's code and non-optimized version of all inlined functions.
   // This is required to support bailing out from inlined code.
   DeoptimizationInputData* data =
-      DeoptimizationInputData::cast(code->deoptimization_data());
+      reinterpret_cast<DeoptimizationInputData*>(
+          code->unchecked_deoptimization_data());
 
-  FixedArray* literals = data->LiteralArray();
+  FixedArray* literals = data->UncheckedLiteralArray();
 
   for (int i = 0, count = data->InlinedFunctionCount()->value();
        i < count;
        i++) {
-    JSFunction* inlined = JSFunction::cast(literals->get(i));
-    Code* inlined_code = inlined->shared()->code();
-    MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
-    MarkObject(inlined_code, inlined_code_mark);
+    JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
+    MarkObject(inlined->unchecked_shared()->unchecked_code());
   }
 }
 
@@ -1648,8 +1045,7 @@
     // actual optimized code object.
     StackFrame* frame = it.frame();
     Code* code = frame->unchecked_code();
-    MarkBit code_mark = Marking::MarkBitFrom(code);
-    MarkObject(code, code_mark);
+    MarkObject(code);
     if (frame->is_optimized()) {
       MarkInlinedFunctionsCode(frame->LookupCode());
     }
@@ -1660,8 +1056,7 @@
 void MarkCompactCollector::PrepareForCodeFlushing() {
   ASSERT(heap() == Isolate::Current()->heap());
 
-  // TODO(1609) Currently incremental marker does not support code flushing.
-  if (!FLAG_flush_code || was_marked_incrementally_) {
+  if (!FLAG_flush_code) {
     EnableCodeFlushing(false);
     return;
   }
@@ -1673,14 +1068,11 @@
     return;
   }
 #endif
-
   EnableCodeFlushing(true);
 
   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
   // relies on it being marked before any other descriptor array.
-  HeapObject* descriptor_array = heap()->empty_descriptor_array();
-  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
-  MarkObject(descriptor_array, descriptor_array_mark);
+  MarkObject(heap()->raw_unchecked_empty_descriptor_array());
 
   // Make sure we are not referencing the code from the stack.
   ASSERT(this == heap()->mark_compact_collector());
@@ -1697,7 +1089,7 @@
   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
 
-  ProcessMarkingDeque();
+  ProcessMarkingStack();
 }
 
 
@@ -1721,21 +1113,19 @@
 
     // Replace flat cons strings in place.
     HeapObject* object = ShortCircuitConsString(p);
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) return;
+    if (object->IsMarked()) return;
 
     Map* map = object->map();
     // Mark the object.
-    collector_->SetMark(object, mark_bit);
+    collector_->SetMark(object);
 
     // Mark the map pointer and body, and push them on the marking stack.
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    collector_->MarkObject(map, map_mark);
+    collector_->MarkObject(map);
     StaticMarkingVisitor::IterateBody(map, object);
 
     // Mark all the objects reachable from the map and body.  May leave
     // overflowed objects in the heap.
-    collector_->EmptyMarkingDeque();
+    collector_->EmptyMarkingStack();
   }
 
   MarkCompactCollector* collector_;
@@ -1751,19 +1141,17 @@
   virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
-      Object* o = *p;
-      if (o->IsHeapObject() &&
-          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
+      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
         // Check if the symbol being pruned is an external symbol. We need to
         // delete the associated external data as this symbol is going away.
 
         // Since no objects have yet been moved we can safely access the map of
         // the object.
-        if (o->IsExternalString()) {
+        if ((*p)->IsExternalString()) {
           heap_->FinalizeExternalString(String::cast(*p));
         }
-        // Set the entry to the_hole_value (as deleted).
-        *p = heap_->the_hole_value();
+        // Set the entry to null_value (as deleted).
+        *p = heap_->raw_unchecked_null_value();
         pointers_removed_++;
       }
     }
@@ -1784,7 +1172,8 @@
 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
  public:
   virtual Object* RetainAs(Object* object) {
-    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
+    MapWord first_word = HeapObject::cast(object)->map_word();
+    if (first_word.IsMarked()) {
       return object;
     } else {
       return NULL;
@@ -1793,24 +1182,28 @@
 };
 
 
-void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
-  ASSERT(IsMarked(object));
+void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
+  ASSERT(!object->IsMarked());
   ASSERT(HEAP->Contains(object));
   if (object->IsMap()) {
     Map* map = Map::cast(object);
-    heap_->ClearCacheOnMap(map);
+    if (FLAG_cleanup_code_caches_at_gc) {
+      map->ClearCodeCache(heap());
+    }
+    SetMark(map);
 
     // When map collection is enabled we have to mark through map's transitions
     // in a special way to make transition links weak.
     // Only maps for subclasses of JSReceiver can have transitions.
     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+    if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
       MarkMapContents(map);
     } else {
-      marking_deque_.PushBlack(map);
+      marking_stack_.Push(map);
     }
   } else {
-    marking_deque_.PushBlack(object);
+    SetMark(object);
+    marking_stack_.Push(object);
   }
 }
 
@@ -1819,17 +1212,12 @@
   // Mark prototype transitions array but don't push it into marking stack.
   // This will make references from it weak. We will clean dead prototype
   // transitions in ClearNonLiveTransitions.
-  FixedArray* prototype_transitions = map->prototype_transitions();
-  MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
-  if (!mark.Get()) {
-    mark.Set();
-    MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
-                                          prototype_transitions->Size());
-  }
+  FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
+  if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
 
-  Object** raw_descriptor_array_slot =
-      HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
-  Object* raw_descriptor_array = *raw_descriptor_array_slot;
+  Object* raw_descriptor_array =
+      *HeapObject::RawField(map,
+                            Map::kInstanceDescriptorsOrBitField3Offset);
   if (!raw_descriptor_array->IsSmi()) {
     MarkDescriptorArray(
         reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
@@ -1843,36 +1231,24 @@
 
   Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
 
-  StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
-}
-
-
-void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
-                                                int offset) {
-  Object** slot = HeapObject::RawField(accessors, offset);
-  HeapObject* accessor = HeapObject::cast(*slot);
-  if (accessor->IsMap()) return;
-  RecordSlot(slot, slot, accessor);
-  MarkObjectAndPush(accessor);
+  StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
 }
 
 
 void MarkCompactCollector::MarkDescriptorArray(
     DescriptorArray* descriptors) {
-  MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
-  if (descriptors_mark.Get()) return;
+  if (descriptors->IsMarked()) return;
   // Empty descriptor array is marked as a root before any maps are marked.
-  ASSERT(descriptors != heap()->empty_descriptor_array());
-  SetMark(descriptors, descriptors_mark);
+  ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
+  SetMark(descriptors);
 
   FixedArray* contents = reinterpret_cast<FixedArray*>(
       descriptors->get(DescriptorArray::kContentArrayIndex));
   ASSERT(contents->IsHeapObject());
-  ASSERT(!IsMarked(contents));
+  ASSERT(!contents->IsMarked());
   ASSERT(contents->IsFixedArray());
   ASSERT(contents->length() >= 2);
-  MarkBit contents_mark = Marking::MarkBitFrom(contents);
-  SetMark(contents, contents_mark);
+  SetMark(contents);
   // Contents contains (value, details) pairs.  If the details say that the type
   // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
   // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
@@ -1882,54 +1258,27 @@
     // If the pair (value, details) at index i, i+1 is not
     // a transition or null descriptor, mark the value.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
-
-    Object** slot = contents->data_start() + i;
-    if (!(*slot)->IsHeapObject()) continue;
-    HeapObject* value = HeapObject::cast(*slot);
-
-    RecordSlot(slot, slot, *slot);
-
-    switch (details.type()) {
-      case NORMAL:
-      case FIELD:
-      case CONSTANT_FUNCTION:
-      case HANDLER:
-      case INTERCEPTOR:
-        MarkObjectAndPush(value);
-        break;
-      case CALLBACKS:
-        if (!value->IsAccessorPair()) {
-          MarkObjectAndPush(value);
-        } else if (!MarkObjectWithoutPush(value)) {
-          MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
-          MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
-        }
-        break;
-      case ELEMENTS_TRANSITION:
-        // For maps with multiple elements transitions, the transition maps are
-        // stored in a FixedArray. Keep the fixed array alive but not the maps
-        // that it refers to.
-        if (value->IsFixedArray()) MarkObjectWithoutPush(value);
-        break;
-      case MAP_TRANSITION:
-      case CONSTANT_TRANSITION:
-      case NULL_DESCRIPTOR:
-        break;
+    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
+      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
+      if (object->IsHeapObject() && !object->IsMarked()) {
+        SetMark(object);
+        marking_stack_.Push(object);
+      }
     }
   }
   // The DescriptorArray descriptors contains a pointer to its contents array,
   // but the contents array is already marked.
-  marking_deque_.PushBlack(descriptors);
+  marking_stack_.Push(descriptors);
 }
 
 
 void MarkCompactCollector::CreateBackPointers() {
   HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* next_object = iterator.Next();
-       next_object != NULL; next_object = iterator.Next()) {
-    if (next_object->IsMap()) {  // Could also be FreeSpace object on free list.
+  for (HeapObject* next_object = iterator.next();
+       next_object != NULL; next_object = iterator.next()) {
+    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
       Map* map = Map::cast(next_object);
-      STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+      STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
       if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
         map->CreateBackPointers();
       } else {
@@ -1940,126 +1289,54 @@
 }
 
 
-// Fill the marking stack with overflowed objects returned by the given
-// iterator.  Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template<class T>
-static void DiscoverGreyObjectsWithIterator(Heap* heap,
-                                            MarkingDeque* marking_deque,
-                                            T* it) {
-  // The caller should ensure that the marking stack is initially not full,
-  // so that we don't waste effort pointlessly scanning for objects.
-  ASSERT(!marking_deque->IsFull());
-
-  Map* filler_map = heap->one_pointer_filler_map();
-  for (HeapObject* object = it->Next();
-       object != NULL;
-       object = it->Next()) {
-    MarkBit markbit = Marking::MarkBitFrom(object);
-    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
-      Marking::GreyToBlack(markbit);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
-      marking_deque->PushBlack(object);
-      if (marking_deque->IsFull()) return;
-    }
-  }
+static int OverflowObjectSize(HeapObject* obj) {
+  // Recover the normal map pointer, it might be marked as live and
+  // overflowed.
+  MapWord map_word = obj->map_word();
+  map_word.ClearMark();
+  map_word.ClearOverflow();
+  return obj->SizeFromMap(map_word.ToMap());
 }
 
 
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
+class OverflowedObjectsScanner : public AllStatic {
+ public:
+  // Fill the marking stack with overflowed objects returned by the given
+  // iterator.  Stop when the marking stack is filled or the end of the space
+  // is reached, whichever comes first.
+  template<class T>
+  static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
+                                           T* it) {
+    // The caller should ensure that the marking stack is initially not full,
+    // so that we don't waste effort pointlessly scanning for objects.
+    ASSERT(!collector->marking_stack_.is_full());
 
-
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  MarkBit::CellType* cells = p->markbits()->cells();
-
-  int last_cell_index =
-      Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->area_end())));
-
-  Address cell_base = p->area_start();
-  int cell_index = Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(cell_base)));
-
-
-  for (;
-       cell_index < last_cell_index;
-       cell_index++, cell_base += 32 * kPointerSize) {
-    ASSERT((unsigned)cell_index ==
-        Bitmap::IndexToCell(
-            Bitmap::CellAlignIndex(
-                p->AddressToMarkbitIndex(cell_base))));
-
-    const MarkBit::CellType current_cell = cells[cell_index];
-    if (current_cell == 0) continue;
-
-    const MarkBit::CellType next_cell = cells[cell_index + 1];
-    MarkBit::CellType grey_objects = current_cell &
-        ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
-
-    int offset = 0;
-    while (grey_objects != 0) {
-      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
-      grey_objects >>= trailing_zeros;
-      offset += trailing_zeros;
-      MarkBit markbit(&cells[cell_index], 1 << offset, false);
-      ASSERT(Marking::IsGrey(markbit));
-      Marking::GreyToBlack(markbit);
-      Address addr = cell_base + offset * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(addr);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
-      marking_deque->PushBlack(object);
-      if (marking_deque->IsFull()) return;
-      offset += 2;
-      grey_objects >>= 2;
-    }
-
-    grey_objects >>= (Bitmap::kBitsPerCell - 1);
-  }
-}
-
-
-static void DiscoverGreyObjectsInSpace(Heap* heap,
-                                       MarkingDeque* marking_deque,
-                                       PagedSpace* space) {
-  if (!space->was_swept_conservatively()) {
-    HeapObjectIterator it(space);
-    DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
-  } else {
-    PageIterator it(space);
-    while (it.has_next()) {
-      Page* p = it.next();
-      DiscoverGreyObjectsOnPage(marking_deque, p);
-      if (marking_deque->IsFull()) return;
+    for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
+      if (object->IsOverflowed()) {
+        object->ClearOverflow();
+        ASSERT(object->IsMarked());
+        ASSERT(HEAP->Contains(object));
+        collector->marking_stack_.Push(object);
+        if (collector->marking_stack_.is_full()) return;
+      }
     }
   }
-}
+};
 
 
 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
-  Object* o = *p;
-  if (!o->IsHeapObject()) return false;
-  HeapObject* heap_object = HeapObject::cast(o);
-  MarkBit mark = Marking::MarkBitFrom(heap_object);
-  return !mark.Get();
+  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
 }
 
 
 void MarkCompactCollector::MarkSymbolTable() {
-  SymbolTable* symbol_table = heap()->symbol_table();
+  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
   // Mark the symbol table itself.
-  MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
-  SetMark(symbol_table, symbol_table_mark);
+  SetMark(symbol_table);
   // Explicitly mark the prefix.
   MarkingVisitor marker(heap());
   symbol_table->IteratePrefix(&marker);
-  ProcessMarkingDeque();
+  ProcessMarkingStack();
 }
 
 
@@ -2072,9 +1349,9 @@
   MarkSymbolTable();
 
   // There may be overflowed objects in the heap.  Visit them now.
-  while (marking_deque_.overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
+  while (marking_stack_.overflowed()) {
+    RefillMarkingStack();
+    EmptyMarkingStack();
   }
 }
 
@@ -2092,13 +1369,9 @@
     bool group_marked = false;
     for (size_t j = 0; j < entry->length_; j++) {
       Object* object = *objects[j];
-      if (object->IsHeapObject()) {
-        HeapObject* heap_object = HeapObject::cast(object);
-        MarkBit mark = Marking::MarkBitFrom(heap_object);
-        if (mark.Get()) {
-          group_marked = true;
-          break;
-        }
+      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
+        group_marked = true;
+        break;
       }
     }
 
@@ -2107,21 +1380,17 @@
       continue;
     }
 
-    // An object in the group is marked, so mark as grey all white heap
-    // objects in the group.
+    // An object in the group is marked, so mark all heap objects in
+    // the group.
     for (size_t j = 0; j < entry->length_; ++j) {
-      Object* object = *objects[j];
-      if (object->IsHeapObject()) {
-        HeapObject* heap_object = HeapObject::cast(object);
-        MarkBit mark = Marking::MarkBitFrom(heap_object);
-        MarkObject(heap_object, mark);
+      if ((*objects[j])->IsHeapObject()) {
+        MarkObject(HeapObject::cast(*objects[j]));
       }
     }
 
-    // Once the entire group has been colored grey, set the object group
-    // to NULL so it won't be processed again.
+    // Once the entire group has been marked, dispose it because it's
+    // not needed anymore.
     entry->Dispose();
-    object_groups->at(i) = NULL;
   }
   object_groups->Rewind(last);
 }
@@ -2136,7 +1405,7 @@
     ImplicitRefGroup* entry = ref_groups->at(i);
     ASSERT(entry != NULL);
 
-    if (!IsMarked(*entry->parent_)) {
+    if (!(*entry->parent_)->IsMarked()) {
       (*ref_groups)[last++] = entry;
       continue;
     }
@@ -2145,9 +1414,7 @@
     // A parent object is marked, so mark all child heap objects.
     for (size_t j = 0; j < entry->length_; ++j) {
       if ((*children[j])->IsHeapObject()) {
-        HeapObject* child = HeapObject::cast(*children[j]);
-        MarkBit mark = Marking::MarkBitFrom(child);
-        MarkObject(child, mark);
+        MarkObject(HeapObject::cast(*children[j]));
       }
     }
 
@@ -2163,17 +1430,21 @@
 // Before: the marking stack contains zero or more heap object pointers.
 // After: the marking stack is empty, and all objects reachable from the
 // marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingDeque() {
-  while (!marking_deque_.IsEmpty()) {
-    while (!marking_deque_.IsEmpty()) {
-      HeapObject* object = marking_deque_.Pop();
+void MarkCompactCollector::EmptyMarkingStack() {
+  while (!marking_stack_.is_empty()) {
+    while (!marking_stack_.is_empty()) {
+      HeapObject* object = marking_stack_.Pop();
       ASSERT(object->IsHeapObject());
       ASSERT(heap()->Contains(object));
-      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
+      ASSERT(object->IsMarked());
+      ASSERT(!object->IsOverflowed());
 
-      Map* map = object->map();
-      MarkBit map_mark = Marking::MarkBitFrom(map);
-      MarkObject(map, map_mark);
+      // Because the object is marked, we have to recover the original map
+      // pointer and use it to mark the object's body.
+      MapWord map_word = object->map_word();
+      map_word.ClearMark();
+      Map* map = map_word.ToMap();
+      MarkObject(map);
 
       StaticMarkingVisitor::IterateBody(map, object);
     }
@@ -2190,45 +1461,39 @@
 // before sweeping completes.  If sweeping completes, there are no remaining
 // overflowed objects in the heap so the overflow flag on the markings stack
 // is cleared.
-void MarkCompactCollector::RefillMarkingDeque() {
-  ASSERT(marking_deque_.overflowed());
+void MarkCompactCollector::RefillMarkingStack() {
+  ASSERT(marking_stack_.overflowed());
 
-  SemiSpaceIterator new_it(heap()->new_space());
-  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
-  if (marking_deque_.IsFull()) return;
+  SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
+  OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
+  if (marking_stack_.is_full()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->old_pointer_space());
-  if (marking_deque_.IsFull()) return;
+  HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
+                                    &OverflowObjectSize);
+  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
+  if (marking_stack_.is_full()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->old_data_space());
-  if (marking_deque_.IsFull()) return;
+  HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
+  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
+  if (marking_stack_.is_full()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->code_space());
-  if (marking_deque_.IsFull()) return;
+  HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
+  OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
+  if (marking_stack_.is_full()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->map_space());
-  if (marking_deque_.IsFull()) return;
+  HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
+  OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
+  if (marking_stack_.is_full()) return;
 
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->cell_space());
-  if (marking_deque_.IsFull()) return;
+  HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
+  OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
+  if (marking_stack_.is_full()) return;
 
-  LargeObjectIterator lo_it(heap()->lo_space());
-  DiscoverGreyObjectsWithIterator(heap(),
-                                  &marking_deque_,
-                                  &lo_it);
-  if (marking_deque_.IsFull()) return;
+  LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
+  OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
+  if (marking_stack_.is_full()) return;
 
-  marking_deque_.ClearOverflowed();
+  marking_stack_.clear_overflowed();
 }
 
 
@@ -2236,23 +1501,23 @@
 // stack.  Before: the marking stack contains zero or more heap object
 // pointers.  After: the marking stack is empty and there are no overflowed
 // objects in the heap.
-void MarkCompactCollector::ProcessMarkingDeque() {
-  EmptyMarkingDeque();
-  while (marking_deque_.overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
+void MarkCompactCollector::ProcessMarkingStack() {
+  EmptyMarkingStack();
+  while (marking_stack_.overflowed()) {
+    RefillMarkingStack();
+    EmptyMarkingStack();
   }
 }
 
 
 void MarkCompactCollector::ProcessExternalMarking() {
   bool work_to_do = true;
-  ASSERT(marking_deque_.IsEmpty());
+  ASSERT(marking_stack_.is_empty());
   while (work_to_do) {
     MarkObjectGroups();
     MarkImplicitRefGroups();
-    work_to_do = !marking_deque_.IsEmpty();
-    ProcessMarkingDeque();
+    work_to_do = !marking_stack_.is_empty();
+    ProcessMarkingStack();
   }
 }
 
@@ -2264,64 +1529,19 @@
   // with the C stack limit check.
   PostponeInterruptsScope postpone(heap()->isolate());
 
-  bool incremental_marking_overflowed = false;
-  IncrementalMarking* incremental_marking = heap_->incremental_marking();
-  if (was_marked_incrementally_) {
-    // Finalize the incremental marking and check whether we had an overflow.
-    // Both markers use grey color to mark overflowed objects so
-    // non-incremental marker can deal with them as if overflow
-    // occured during normal marking.
-    // But incremental marker uses a separate marking deque
-    // so we have to explicitly copy it's overflow state.
-    incremental_marking->Finalize();
-    incremental_marking_overflowed =
-        incremental_marking->marking_deque()->overflowed();
-    incremental_marking->marking_deque()->ClearOverflowed();
-  } else {
-    // Abort any pending incremental activities e.g. incremental sweeping.
-    incremental_marking->Abort();
-  }
-
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
   state_ = MARK_LIVE_OBJECTS;
 #endif
-  // The to space contains live objects, a page in from space is used as a
-  // marking stack.
-  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
-  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
-  if (FLAG_force_marking_deque_overflows) {
-    marking_deque_end = marking_deque_start + 64 * kPointerSize;
-  }
-  marking_deque_.Initialize(marking_deque_start,
-                            marking_deque_end);
-  ASSERT(!marking_deque_.overflowed());
+  // The to space contains live objects, the from space is used as a marking
+  // stack.
+  marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
+                            heap()->new_space()->FromSpaceHigh());
 
-  if (incremental_marking_overflowed) {
-    // There are overflowed objects left in the heap after incremental marking.
-    marking_deque_.SetOverflowed();
-  }
+  ASSERT(!marking_stack_.overflowed());
 
   PrepareForCodeFlushing();
 
-  if (was_marked_incrementally_) {
-    // There is no write barrier on cells so we have to scan them now at the end
-    // of the incremental marking.
-    {
-      HeapObjectIterator cell_iterator(heap()->cell_space());
-      HeapObject* cell;
-      while ((cell = cell_iterator.Next()) != NULL) {
-        ASSERT(cell->IsJSGlobalPropertyCell());
-        if (IsMarked(cell)) {
-          int offset = JSGlobalPropertyCell::kValueOffset;
-          StaticMarkingVisitor::VisitPointer(
-              heap(),
-              reinterpret_cast<Object**>(cell->address() + offset));
-        }
-      }
-    }
-  }
-
   RootMarkingVisitor root_visitor(heap());
   MarkRoots(&root_visitor);
 
@@ -2340,20 +1560,15 @@
       &IsUnmarkedHeapObject);
   // Then we mark the objects and process the transitive closure.
   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-  while (marking_deque_.overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
+  while (marking_stack_.overflowed()) {
+    RefillMarkingStack();
+    EmptyMarkingStack();
   }
 
   // Repeat host application specific marking to mark unmarked objects
   // reachable from the weak roots.
   ProcessExternalMarking();
 
-  AfterMarking();
-}
-
-
-void MarkCompactCollector::AfterMarking() {
   // Object literal map caches reference symbols (cache keys) and maps
   // (cache values). At this point still useful maps have already been
   // marked. Mark the keys for the alive values before we process the
@@ -2363,7 +1578,7 @@
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
   // table is marked.
-  SymbolTable* symbol_table = heap()->symbol_table();
+  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
   SymbolTableCleaner v(heap());
   symbol_table->IterateElements(&v);
   symbol_table->ElementsRemoved(v.PointersRemoved());
@@ -2383,10 +1598,8 @@
     code_flusher_->ProcessCandidates();
   }
 
-  if (!FLAG_watch_ic_patching) {
-    // Clean up dead objects from the runtime profiler.
-    heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
-  }
+  // Clean up dead objects from the runtime profiler.
+  heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
 }
 
 
@@ -2394,13 +1607,13 @@
   Object* raw_context = heap()->global_contexts_list_;
   while (raw_context != heap()->undefined_value()) {
     Context* context = reinterpret_cast<Context*>(raw_context);
-    if (IsMarked(context)) {
+    if (context->IsMarked()) {
       HeapObject* raw_map_cache =
           HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
       // A map cache may be reachable from the stack. In this case
       // it's already transitively marked and it's too late to clean
       // up its parts.
-      if (!IsMarked(raw_map_cache) &&
+      if (!raw_map_cache->IsMarked() &&
           raw_map_cache != heap()->undefined_value()) {
         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
         int existing_elements = map_cache->NumberOfElements();
@@ -2410,16 +1623,17 @@
              i += MapCache::kEntrySize) {
           Object* raw_key = map_cache->get(i);
           if (raw_key == heap()->undefined_value() ||
-              raw_key == heap()->the_hole_value()) continue;
+              raw_key == heap()->null_value()) continue;
           STATIC_ASSERT(MapCache::kEntrySize == 2);
           Object* raw_map = map_cache->get(i + 1);
-          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
+          if (raw_map->IsHeapObject() &&
+              HeapObject::cast(raw_map)->IsMarked()) {
             ++used_elements;
           } else {
             // Delete useless entries with unmarked maps.
             ASSERT(raw_map->IsMap());
-            map_cache->set_the_hole(i);
-            map_cache->set_the_hole(i + 1);
+            map_cache->set_null_unchecked(heap(), i);
+            map_cache->set_null_unchecked(heap(), i + 1);
           }
         }
         if (used_elements == 0) {
@@ -2429,38 +1643,64 @@
           // extra complexity during GC. We rely on subsequent cache
           // usages (EnsureCapacity) to do this.
           map_cache->ElementsRemoved(existing_elements - used_elements);
-          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
-          MarkObject(map_cache, map_cache_markbit);
+          MarkObject(map_cache);
         }
       }
     }
     // Move to next element in the list.
     raw_context = context->get(Context::NEXT_CONTEXT_LINK);
   }
-  ProcessMarkingDeque();
+  ProcessMarkingStack();
 }
 
 
-void MarkCompactCollector::ReattachInitialMaps() {
-  HeapObjectIterator map_iterator(heap()->map_space());
-  for (HeapObject* obj = map_iterator.Next();
-       obj != NULL;
-       obj = map_iterator.Next()) {
-    if (obj->IsFreeSpace()) continue;
-    Map* map = Map::cast(obj);
-
-    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
-
-    if (map->attached_to_shared_function_info()) {
-      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
-    }
+#ifdef DEBUG
+void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
+  live_bytes_ += obj->Size();
+  if (heap()->new_space()->Contains(obj)) {
+    live_young_objects_size_ += obj->Size();
+  } else if (heap()->map_space()->Contains(obj)) {
+    ASSERT(obj->IsMap());
+    live_map_objects_size_ += obj->Size();
+  } else if (heap()->cell_space()->Contains(obj)) {
+    ASSERT(obj->IsJSGlobalPropertyCell());
+    live_cell_objects_size_ += obj->Size();
+  } else if (heap()->old_pointer_space()->Contains(obj)) {
+    live_old_pointer_objects_size_ += obj->Size();
+  } else if (heap()->old_data_space()->Contains(obj)) {
+    live_old_data_objects_size_ += obj->Size();
+  } else if (heap()->code_space()->Contains(obj)) {
+    live_code_objects_size_ += obj->Size();
+  } else if (heap()->lo_space()->Contains(obj)) {
+    live_lo_objects_size_ += obj->Size();
+  } else {
+    UNREACHABLE();
   }
 }
+#endif  // DEBUG
+
+
+void MarkCompactCollector::SweepLargeObjectSpace() {
+#ifdef DEBUG
+  ASSERT(state_ == MARK_LIVE_OBJECTS);
+  state_ =
+      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
+#endif
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  heap()->lo_space()->FreeUnmarkedObjects();
+}
+
+
+// Safe to use during marking phase only.
+bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
+  MapWord metamap = object->map_word();
+  metamap.ClearMark();
+  return metamap.ToMap()->instance_type() == MAP_TYPE;
+}
 
 
 void MarkCompactCollector::ClearNonLiveTransitions() {
-  HeapObjectIterator map_iterator(heap()->map_space());
+  HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  At the same time,
   // set all the prototype fields of maps back to their original value,
@@ -2471,111 +1711,96 @@
   // scan the descriptor arrays of those maps, not all maps.
   // All of these actions are carried out only on maps of JSObjects
   // and related subtypes.
-  for (HeapObject* obj = map_iterator.Next();
-       obj != NULL; obj = map_iterator.Next()) {
+  for (HeapObject* obj = map_iterator.next();
+       obj != NULL; obj = map_iterator.next()) {
     Map* map = reinterpret_cast<Map*>(obj);
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    if (map->IsFreeSpace()) continue;
+    if (!map->IsMarked() && map->IsByteArray()) continue;
 
-    ASSERT(map->IsMap());
+    ASSERT(SafeIsMap(map));
     // Only JSObject and subtypes have map transitions and back pointers.
-    STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
-    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
 
-    if (map_mark.Get() &&
-        map->attached_to_shared_function_info()) {
+    if (map->IsMarked() && map->attached_to_shared_function_info()) {
       // This map is used for inobject slack tracking and has been detached
       // from SharedFunctionInfo during the mark phase.
       // Since it survived the GC, reattach it now.
       map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
     }
 
-    ClearNonLivePrototypeTransitions(map);
-    ClearNonLiveMapTransitions(map, map_mark);
-  }
-}
-
-
-void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
-  int number_of_transitions = map->NumberOfProtoTransitions();
-  FixedArray* prototype_transitions = map->prototype_transitions();
-
-  int new_number_of_transitions = 0;
-  const int header = Map::kProtoTransitionHeaderSize;
-  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
-  const int map_offset = header + Map::kProtoTransitionMapOffset;
-  const int step = Map::kProtoTransitionElementsPerEntry;
-  for (int i = 0; i < number_of_transitions; i++) {
-    Object* prototype = prototype_transitions->get(proto_offset + i * step);
-    Object* cached_map = prototype_transitions->get(map_offset + i * step);
-    if (IsMarked(prototype) && IsMarked(cached_map)) {
-      int proto_index = proto_offset + new_number_of_transitions * step;
-      int map_index = map_offset + new_number_of_transitions * step;
-      if (new_number_of_transitions != i) {
-        prototype_transitions->set_unchecked(
-            heap_,
-            proto_index,
-            prototype,
-            UPDATE_WRITE_BARRIER);
-        prototype_transitions->set_unchecked(
-            heap_,
-            map_index,
-            cached_map,
-            SKIP_WRITE_BARRIER);
+    // Clear dead prototype transitions.
+    int number_of_transitions = map->NumberOfProtoTransitions();
+    if (number_of_transitions > 0) {
+      FixedArray* prototype_transitions =
+          map->unchecked_prototype_transitions();
+      int new_number_of_transitions = 0;
+      const int header = Map::kProtoTransitionHeaderSize;
+      const int proto_offset =
+          header + Map::kProtoTransitionPrototypeOffset;
+      const int map_offset = header + Map::kProtoTransitionMapOffset;
+      const int step = Map::kProtoTransitionElementsPerEntry;
+      for (int i = 0; i < number_of_transitions; i++) {
+        Object* prototype = prototype_transitions->get(proto_offset + i * step);
+        Object* cached_map = prototype_transitions->get(map_offset + i * step);
+        if (HeapObject::cast(prototype)->IsMarked() &&
+            HeapObject::cast(cached_map)->IsMarked()) {
+          if (new_number_of_transitions != i) {
+            prototype_transitions->set_unchecked(
+                heap_,
+                proto_offset + new_number_of_transitions * step,
+                prototype,
+                UPDATE_WRITE_BARRIER);
+            prototype_transitions->set_unchecked(
+                heap_,
+                map_offset + new_number_of_transitions * step,
+                cached_map,
+                SKIP_WRITE_BARRIER);
+          }
+          new_number_of_transitions++;
+        }
       }
-      Object** slot =
-          HeapObject::RawField(prototype_transitions,
-                               FixedArray::OffsetOfElementAt(proto_index));
-      RecordSlot(slot, slot, prototype);
-      new_number_of_transitions++;
-    }
-  }
 
-  if (new_number_of_transitions != number_of_transitions) {
-    map->SetNumberOfProtoTransitions(new_number_of_transitions);
-  }
-
-  // Fill slots that became free with undefined value.
-  for (int i = new_number_of_transitions * step;
-       i < number_of_transitions * step;
-       i++) {
-    prototype_transitions->set_undefined(heap_, header + i);
-  }
-}
-
-
-void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
-                                                      MarkBit map_mark) {
-  // Follow the chain of back pointers to find the prototype.
-  Object* real_prototype = map;
-  while (real_prototype->IsMap()) {
-    real_prototype = Map::cast(real_prototype)->prototype();
-    ASSERT(real_prototype->IsHeapObject());
-  }
-
-  // Follow back pointers, setting them to prototype, clearing map transitions
-  // when necessary.
-  Map* current = map;
-  bool current_is_alive = map_mark.Get();
-  bool on_dead_path = !current_is_alive;
-  while (current->IsMap()) {
-    Object* next = current->prototype();
-    // There should never be a dead map above a live map.
-    ASSERT(on_dead_path || current_is_alive);
-
-    // A live map above a dead map indicates a dead transition. This test will
-    // always be false on the first iteration.
-    if (on_dead_path && current_is_alive) {
-      on_dead_path = false;
-      current->ClearNonLiveTransitions(heap(), real_prototype);
+      // Fill slots that became free with undefined value.
+      Object* undefined = heap()->raw_unchecked_undefined_value();
+      for (int i = new_number_of_transitions * step;
+           i < number_of_transitions * step;
+           i++) {
+        prototype_transitions->set_unchecked(heap_,
+                                             header + i,
+                                             undefined,
+                                             SKIP_WRITE_BARRIER);
+      }
+      map->SetNumberOfProtoTransitions(new_number_of_transitions);
     }
 
-    Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
-    *slot = real_prototype;
-    if (current_is_alive) RecordSlot(slot, slot, real_prototype);
+    // Follow the chain of back pointers to find the prototype.
+    Map* current = map;
+    while (SafeIsMap(current)) {
+      current = reinterpret_cast<Map*>(current->prototype());
+      ASSERT(current->IsHeapObject());
+    }
+    Object* real_prototype = current;
 
-    current = reinterpret_cast<Map*>(next);
-    current_is_alive = Marking::MarkBitFrom(current).Get();
+    // Follow back pointers, setting them to prototype,
+    // clearing map transitions when necessary.
+    current = map;
+    bool on_dead_path = !current->IsMarked();
+    Object* next;
+    while (SafeIsMap(current)) {
+      next = current->prototype();
+      // There should never be a dead map above a live map.
+      ASSERT(on_dead_path || current->IsMarked());
+
+      // A live map above a dead map indicates a dead transition.
+      // This test will always be false on the first iteration.
+      if (on_dead_path && current->IsMarked()) {
+        on_dead_path = false;
+        current->ClearNonLiveTransitions(heap(), real_prototype);
+      }
+      *HeapObject::RawField(current, Map::kPrototypeOffset) =
+          real_prototype;
+      current = reinterpret_cast<Map*>(next);
+    }
   }
 }
 
@@ -2583,13 +1808,13 @@
 void MarkCompactCollector::ProcessWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
+    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
-    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+    ObjectHashTable* table = weak_map->unchecked_table();
     for (int i = 0; i < table->Capacity(); i++) {
-      if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+      if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
         Object* value = table->get(table->EntryToValueIndex(i));
-        StaticMarkingVisitor::VisitPointer(heap(), &value);
+        StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
         table->set_unchecked(heap(),
                              table->EntryToValueIndex(i),
                              value,
@@ -2604,12 +1829,12 @@
 void MarkCompactCollector::ClearWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
+    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
-    ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+    ObjectHashTable* table = weak_map->unchecked_table();
     for (int i = 0; i < table->Capacity(); i++) {
-      if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
-        table->RemoveEntry(i);
+      if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+        table->RemoveEntry(i, heap());
       }
     }
     weak_map_obj = weak_map->next();
@@ -2618,97 +1843,316 @@
   set_encountered_weak_maps(Smi::FromInt(0));
 }
 
+// -------------------------------------------------------------------------
+// Phase 2: Encode forwarding addresses.
+// When compacting, forwarding addresses for objects in old space and map
+// space are encoded in their map pointer word (along with an encoding of
+// their map pointers).
+//
+// The excact encoding is described in the comments for class MapWord in
+// objects.h.
+//
+// An address range [start, end) can have both live and non-live objects.
+// Maximal non-live regions are marked so they can be skipped on subsequent
+// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
+// free regions of one-word size (in which case the next word is the start
+// of a live object).  A second distinguished map-pointer encoding is used
+// to mark free regions larger than one word, and the size of the free
+// region (including the first word) is written to the second word of the
+// region.
+//
+// Any valid map page offset must lie in the object area of the page, so map
+// page offsets less than Page::kObjectStartOffset are invalid.  We use a
+// pair of distinguished invalid map encodings (for single word and multiple
+// words) to indicate free regions in the page found during computation of
+// forwarding addresses and skipped over in subsequent sweeps.
+
+
+// Encode a free region, defined by the given start address and size, in the
+// first word or two of the region.
+void EncodeFreeRegion(Address free_start, int free_size) {
+  ASSERT(free_size >= kIntSize);
+  if (free_size == kIntSize) {
+    Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
+  } else {
+    ASSERT(free_size >= 2 * kIntSize);
+    Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
+    Memory::int_at(free_start + kIntSize) = free_size;
+  }
+
+#ifdef DEBUG
+  // Zap the body of the free region.
+  if (FLAG_enable_slow_asserts) {
+    for (int offset = 2 * kIntSize;
+         offset < free_size;
+         offset += kPointerSize) {
+      Memory::Address_at(free_start + offset) = kZapValue;
+    }
+  }
+#endif
+}
+
+
+// Try to promote all objects in new space.  Heap numbers and sequential
+// strings are promoted to the code space, large objects to large object space,
+// and all others to the old space.
+inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
+                                           HeapObject* object,
+                                           int object_size) {
+  MaybeObject* forwarded;
+  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
+    forwarded = Failure::Exception();
+  } else {
+    OldSpace* target_space = heap->TargetSpace(object);
+    ASSERT(target_space == heap->old_pointer_space() ||
+           target_space == heap->old_data_space());
+    forwarded = target_space->MCAllocateRaw(object_size);
+  }
+  Object* result;
+  if (!forwarded->ToObject(&result)) {
+    result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
+  }
+  return result;
+}
+
+
+// Allocation functions for the paged spaces call the space's MCAllocateRaw.
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
+    Heap *heap,
+    HeapObject* ignore,
+    int object_size) {
+  return heap->old_pointer_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
+    Heap* heap,
+    HeapObject* ignore,
+    int object_size) {
+  return heap->old_data_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
+    Heap* heap,
+    HeapObject* ignore,
+    int object_size) {
+  return heap->code_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
+    Heap* heap,
+    HeapObject* ignore,
+    int object_size) {
+  return heap->map_space()->MCAllocateRaw(object_size);
+}
+
+
+MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
+    Heap* heap, HeapObject* ignore, int object_size) {
+  return heap->cell_space()->MCAllocateRaw(object_size);
+}
+
+
+// The forwarding address is encoded at the same offset as the current
+// to-space object, but in from space.
+inline void EncodeForwardingAddressInNewSpace(Heap* heap,
+                                              HeapObject* old_object,
+                                              int object_size,
+                                              Object* new_object,
+                                              int* ignored) {
+  int offset =
+      heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
+  Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
+      HeapObject::cast(new_object)->address();
+}
+
+
+// The forwarding address is encoded in the map pointer of the object as an
+// offset (in terms of live bytes) from the address of the first live object
+// in the page.
+inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
+                                                HeapObject* old_object,
+                                                int object_size,
+                                                Object* new_object,
+                                                int* offset) {
+  // Record the forwarding address of the first live object if necessary.
+  if (*offset == 0) {
+    Page::FromAddress(old_object->address())->mc_first_forwarded =
+        HeapObject::cast(new_object)->address();
+  }
+
+  MapWord encoding =
+      MapWord::EncodeAddress(old_object->map()->address(), *offset);
+  old_object->set_map_word(encoding);
+  *offset += object_size;
+  ASSERT(*offset <= Page::kObjectAreaSize);
+}
+
+
+// Most non-live objects are ignored.
+inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
+
+
+// Function template that, given a range of addresses (eg, a semispace or a
+// paged space page), iterates through the objects in the range to clear
+// mark bits and compute and encode forwarding addresses.  As a side effect,
+// maximal free chunks are marked so that they can be skipped on subsequent
+// sweeps.
+//
+// The template parameters are an allocation function, a forwarding address
+// encoding function, and a function to process non-live objects.
+template<MarkCompactCollector::AllocationFunction Alloc,
+         MarkCompactCollector::EncodingFunction Encode,
+         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
+                                             Address start,
+                                             Address end,
+                                             int* offset) {
+  // The start address of the current free region while sweeping the space.
+  // This address is set when a transition from live to non-live objects is
+  // encountered.  A value (an encoding of the 'next free region' pointer)
+  // is written to memory at this address when a transition from non-live to
+  // live objects is encountered.
+  Address free_start = NULL;
+
+  // A flag giving the state of the previously swept object.  Initially true
+  // to ensure that free_start is initialized to a proper address before
+  // trying to write to it.
+  bool is_prev_alive = true;
+
+  int object_size;  // Will be set on each iteration of the loop.
+  for (Address current = start; current < end; current += object_size) {
+    HeapObject* object = HeapObject::FromAddress(current);
+    if (object->IsMarked()) {
+      object->ClearMark();
+      collector->tracer()->decrement_marked_count();
+      object_size = object->Size();
+
+      Object* forwarded =
+          Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
+      Encode(collector->heap(), object, object_size, forwarded, offset);
+
+#ifdef DEBUG
+      if (FLAG_gc_verbose) {
+        PrintF("forward %p -> %p.\n", object->address(),
+               HeapObject::cast(forwarded)->address());
+      }
+#endif
+      if (!is_prev_alive) {  // Transition from non-live to live.
+        EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
+        is_prev_alive = true;
+      }
+    } else {  // Non-live object.
+      object_size = object->Size();
+      ProcessNonLive(object, collector->heap()->isolate());
+      if (is_prev_alive) {  // Transition from live to non-live.
+        free_start = current;
+        is_prev_alive = false;
+      }
+      LiveObjectList::ProcessNonLive(object);
+    }
+  }
+
+  // If we ended on a free region, mark it.
+  if (!is_prev_alive) {
+    EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
+  }
+}
+
+
+// Functions to encode the forwarding pointers in each compactable space.
+void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
+  int ignored;
+  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
+                                   EncodeForwardingAddressInNewSpace,
+                                   IgnoreNonLiveObject>(
+      this,
+      heap()->new_space()->bottom(),
+      heap()->new_space()->top(),
+      &ignored);
+}
+
+
+template<MarkCompactCollector::AllocationFunction Alloc,
+         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
+void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
+    PagedSpace* space) {
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    Page* p = it.next();
+
+    // The offset of each live object in the page from the first live object
+    // in the page.
+    int offset = 0;
+    EncodeForwardingAddressesInRange<Alloc,
+                                     EncodeForwardingAddressInPagedSpace,
+                                     ProcessNonLive>(
+        this,
+        p->ObjectAreaStart(),
+        p->AllocationTop(),
+        &offset);
+  }
+}
+
 
 // We scavange new space simultaneously with sweeping. This is done in two
 // passes.
-//
 // The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space.  Forwarding address is written directly into
-// first word of object without any encoding.  If object is dead we write
+// promotes them to old space. Forwading address is written directly into
+// first word of object without any encoding. If object is dead we are writing
 // NULL as a forwarding address.
-//
-// The second pass updates pointers to new space in all spaces.  It is possible
-// to encounter pointers to dead new space objects during traversal of pointers
-// to new space.  We should clear them to avoid encountering them during next
-// pointer iteration.  This is an issue if the store buffer overflows and we
-// have to scan the entire old space, including dead objects, looking for
-// pointers to new space.
-void MarkCompactCollector::MigrateObject(Address dst,
-                                         Address src,
-                                         int size,
-                                         AllocationSpace dest) {
-  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
-  if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
-    Address src_slot = src;
-    Address dst_slot = dst;
-    ASSERT(IsAligned(size, kPointerSize));
-
-    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
-      Object* value = Memory::Object_at(src_slot);
-
-      Memory::Object_at(dst_slot) = value;
-
-      if (heap_->InNewSpace(value)) {
-        heap_->store_buffer()->Mark(dst_slot);
-      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
-        SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                           &migration_slots_buffer_,
-                           reinterpret_cast<Object**>(dst_slot),
-                           SlotsBuffer::IGNORE_OVERFLOW);
-      }
-
-      src_slot += kPointerSize;
-      dst_slot += kPointerSize;
-    }
-
-    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
-      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
-      Address code_entry = Memory::Address_at(code_entry_slot);
-
-      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-        SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                           &migration_slots_buffer_,
-                           SlotsBuffer::CODE_ENTRY_SLOT,
-                           code_entry_slot,
-                           SlotsBuffer::IGNORE_OVERFLOW);
-      }
-    }
-  } else if (dest == CODE_SPACE) {
-    PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
-    heap()->MoveBlock(dst, src, size);
-    SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                       &migration_slots_buffer_,
-                       SlotsBuffer::RELOCATED_CODE_OBJECT,
-                       dst,
-                       SlotsBuffer::IGNORE_OVERFLOW);
-    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+// The second pass updates pointers to new space in all spaces. It is possible
+// to encounter pointers to dead objects during traversal of dirty regions we
+// should clear them to avoid encountering them during next dirty regions
+// iteration.
+static void MigrateObject(Heap* heap,
+                          Address dst,
+                          Address src,
+                          int size,
+                          bool to_old_space) {
+  if (to_old_space) {
+    heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
   } else {
-    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
-    heap()->MoveBlock(dst, src, size);
+    heap->CopyBlock(dst, src, size);
   }
+
   Memory::Address_at(src) = dst;
 }
 
 
+class StaticPointersToNewGenUpdatingVisitor : public
+  StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
+ public:
+  static inline void VisitPointer(Heap* heap, Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+
+    HeapObject* obj = HeapObject::cast(*p);
+    Address old_addr = obj->address();
+
+    if (heap->new_space()->Contains(obj)) {
+      ASSERT(heap->InFromSpace(*p));
+      *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
+    }
+  }
+};
+
+
 // Visitor for updating pointers from live objects in old spaces to new space.
 // It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor: public ObjectVisitor {
+class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
  public:
-  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
+  explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
 
   void VisitPointer(Object** p) {
-    UpdatePointer(p);
+    StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
   }
 
   void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) UpdatePointer(p);
-  }
-
-  void VisitEmbeddedPointer(RelocInfo* rinfo) {
-    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-    Object* target = rinfo->target_object();
-    VisitPointer(&target);
-    rinfo->set_target_object(target);
+    for (Object** p = start; p < end; p++) {
+      StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
+    }
   }
 
   void VisitCodeTarget(RelocInfo* rinfo) {
@@ -2728,96 +2172,68 @@
     rinfo->set_call_address(Code::cast(target)->instruction_start());
   }
 
-  static inline void UpdateSlot(Heap* heap, Object** slot) {
-    Object* obj = *slot;
-
-    if (!obj->IsHeapObject()) return;
-
-    HeapObject* heap_obj = HeapObject::cast(obj);
-
-    MapWord map_word = heap_obj->map_word();
-    if (map_word.IsForwardingAddress()) {
-      ASSERT(heap->InFromSpace(heap_obj) ||
-             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
-      HeapObject* target = map_word.ToForwardingAddress();
-      *slot = target;
-      ASSERT(!heap->InFromSpace(target) &&
-             !MarkCompactCollector::IsOnEvacuationCandidate(target));
-    }
-  }
-
  private:
-  inline void UpdatePointer(Object** p) {
-    UpdateSlot(heap_, p);
-  }
-
   Heap* heap_;
 };
 
 
-static void UpdatePointer(HeapObject** p, HeapObject* object) {
-  ASSERT(*p == object);
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It can encounter pointers to dead objects in new space when traversing map
+// space (see comment for MigrateObject).
+static void UpdatePointerToNewGen(HeapObject** p) {
+  if (!(*p)->IsHeapObject()) return;
 
-  Address old_addr = object->address();
+  Address old_addr = (*p)->address();
+  ASSERT(HEAP->InFromSpace(*p));
 
   Address new_addr = Memory::Address_at(old_addr);
 
-  // The new space sweep will overwrite the map word of dead objects
-  // with NULL. In this case we do not need to transfer this entry to
-  // the store buffer which we are rebuilding.
-  if (new_addr != NULL) {
-    *p = HeapObject::FromAddress(new_addr);
+  if (new_addr == NULL) {
+    // We encountered pointer to a dead object. Clear it so we will
+    // not visit it again during next iteration of dirty regions.
+    *p = NULL;
   } else {
-    // We have to zap this pointer, because the store buffer may overflow later,
-    // and then we have to scan the entire heap and we don't want to find
-    // spurious newspace pointers in the old space.
-    *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
+    *p = HeapObject::FromAddress(new_addr);
   }
 }
 
 
-static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
-                                                         Object** p) {
-  MapWord map_word = HeapObject::cast(*p)->map_word();
-
-  if (map_word.IsForwardingAddress()) {
-    return String::cast(map_word.ToForwardingAddress());
-  }
-
-  return String::cast(*p);
+static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+                                                                 Object** p) {
+  Address old_addr = HeapObject::cast(*p)->address();
+  Address new_addr = Memory::Address_at(old_addr);
+  return String::cast(HeapObject::FromAddress(new_addr));
 }
 
 
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
-                                            int object_size) {
+static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
   Object* result;
 
-  if (object_size > Page::kMaxNonCodeHeapObjectSize) {
+  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
     MaybeObject* maybe_result =
-        heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
+        heap->lo_space()->AllocateRawFixedArray(object_size);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(target->address(),
-                    object->address(),
-                    object_size,
-                    LO_SPACE);
-      heap()->mark_compact_collector()->tracer()->
+      MigrateObject(heap, target->address(), object->address(), object_size,
+                    true);
+      heap->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
   } else {
-    OldSpace* target_space = heap()->TargetSpace(object);
+    OldSpace* target_space = heap->TargetSpace(object);
 
-    ASSERT(target_space == heap()->old_pointer_space() ||
-           target_space == heap()->old_data_space());
+    ASSERT(target_space == heap->old_pointer_space() ||
+           target_space == heap->old_data_space());
     MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(target->address(),
+      MigrateObject(heap,
+                    target->address(),
                     object->address(),
                     object_size,
-                    target_space->identity());
-      heap()->mark_compact_collector()->tracer()->
+                    target_space == heap->old_pointer_space());
+      heap->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
@@ -2827,562 +2243,84 @@
 }
 
 
-void MarkCompactCollector::EvacuateNewSpace() {
-  // There are soft limits in the allocation code, designed trigger a mark
-  // sweep collection by failing allocations.  But since we are already in
-  // a mark-sweep allocation, there is no sense in trying to trigger one.
-  AlwaysAllocateScope scope;
-  heap()->CheckNewSpaceExpansionCriteria();
+static void SweepNewSpace(Heap* heap, NewSpace* space) {
+  heap->CheckNewSpaceExpansionCriteria();
 
-  NewSpace* new_space = heap()->new_space();
-
-  // Store allocation range before flipping semispaces.
-  Address from_bottom = new_space->bottom();
-  Address from_top = new_space->top();
+  Address from_bottom = space->bottom();
+  Address from_top = space->top();
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
-  new_space->Flip();
-  new_space->ResetAllocationInfo();
+  space->Flip();
+  space->ResetAllocationInfo();
 
+  int size = 0;
   int survivors_size = 0;
 
   // First pass: traverse all objects in inactive semispace, remove marks,
-  // migrate live objects and write forwarding addresses.  This stage puts
-  // new entries in the store buffer and may cause some pages to be marked
-  // scan-on-scavenge.
-  SemiSpaceIterator from_it(from_bottom, from_top);
-  for (HeapObject* object = from_it.Next();
-       object != NULL;
-       object = from_it.Next()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) {
-      mark_bit.Clear();
-      // Don't bother decrementing live bytes count. We'll discard the
-      // entire page at the end.
-      int size = object->Size();
+  // migrate live objects and write forwarding addresses.
+  for (Address current = from_bottom; current < from_top; current += size) {
+    HeapObject* object = HeapObject::FromAddress(current);
+
+    if (object->IsMarked()) {
+      object->ClearMark();
+      heap->mark_compact_collector()->tracer()->decrement_marked_count();
+
+      size = object->Size();
       survivors_size += size;
 
       // Aggressively promote young survivors to the old space.
-      if (TryPromoteObject(object, size)) {
+      if (TryPromoteObject(heap, object, size)) {
         continue;
       }
 
       // Promotion failed. Just migrate object to another semispace.
-      MaybeObject* allocation = new_space->AllocateRaw(size);
-      if (allocation->IsFailure()) {
-        if (!new_space->AddFreshPage()) {
-          // Shouldn't happen. We are sweeping linearly, and to-space
-          // has the same number of pages as from-space, so there is
-          // always room.
-          UNREACHABLE();
-        }
-        allocation = new_space->AllocateRaw(size);
-        ASSERT(!allocation->IsFailure());
-      }
-      Object* target = allocation->ToObjectUnchecked();
+      // Allocation cannot fail at this point: semispaces are of equal size.
+      Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
 
-      MigrateObject(HeapObject::cast(target)->address(),
-                    object->address(),
+      MigrateObject(heap,
+                    HeapObject::cast(target)->address(),
+                    current,
                     size,
-                    NEW_SPACE);
+                    false);
     } else {
       // Process the dead object before we write a NULL into its header.
       LiveObjectList::ProcessNonLive(object);
 
-      // Mark dead objects in the new space with null in their map field.
-      Memory::Address_at(object->address()) = NULL;
+      size = object->Size();
+      Memory::Address_at(current) = NULL;
     }
   }
 
-  heap_->IncrementYoungSurvivorsCounter(survivors_size);
-  new_space->set_age_mark(new_space->top());
-}
-
-
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
-  AlwaysAllocateScope always_allocate;
-  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
-  MarkBit::CellType* cells = p->markbits()->cells();
-  p->MarkSweptPrecisely();
-
-  int last_cell_index =
-      Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->area_end())));
-
-  Address cell_base = p->area_start();
-  int cell_index = Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(cell_base)));
-
-  int offsets[16];
-
-  for (;
-       cell_index < last_cell_index;
-       cell_index++, cell_base += 32 * kPointerSize) {
-    ASSERT((unsigned)cell_index ==
-        Bitmap::IndexToCell(
-            Bitmap::CellAlignIndex(
-                p->AddressToMarkbitIndex(cell_base))));
-    if (cells[cell_index] == 0) continue;
-
-    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
-    for (int i = 0; i < live_objects; i++) {
-      Address object_addr = cell_base + offsets[i] * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(object_addr);
-      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
-      int size = object->Size();
-
-      MaybeObject* target = space->AllocateRaw(size);
-      if (target->IsFailure()) {
-        // OS refused to give us memory.
-        V8::FatalProcessOutOfMemory("Evacuation");
-        return;
-      }
-
-      Object* target_object = target->ToObjectUnchecked();
-
-      MigrateObject(HeapObject::cast(target_object)->address(),
-                    object_addr,
-                    size,
-                    space->identity());
-      ASSERT(object->map_word().IsForwardingAddress());
-    }
-
-    // Clear marking bits for current cell.
-    cells[cell_index] = 0;
-  }
-  p->ResetLiveBytes();
-}
-
-
-void MarkCompactCollector::EvacuatePages() {
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
-    ASSERT(p->IsEvacuationCandidate() ||
-           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-    if (p->IsEvacuationCandidate()) {
-      // During compaction we might have to request a new page.
-      // Check that space still have room for that.
-      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
-        EvacuateLiveObjectsFromPage(p);
-      } else {
-        // Without room for expansion evacuation is not guaranteed to succeed.
-        // Pessimistically abandon unevacuated pages.
-        for (int j = i; j < npages; j++) {
-          Page* page = evacuation_candidates_[j];
-          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
-          page->ClearEvacuationCandidate();
-          page->SetFlag(Page::RESCAN_ON_EVACUATION);
-        }
-        return;
-      }
-    }
-  }
-}
-
-
-class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
- public:
-  virtual Object* RetainAs(Object* object) {
-    if (object->IsHeapObject()) {
-      HeapObject* heap_object = HeapObject::cast(object);
-      MapWord map_word = heap_object->map_word();
-      if (map_word.IsForwardingAddress()) {
-        return map_word.ToForwardingAddress();
-      }
-    }
-    return object;
-  }
-};
-
-
-static inline void UpdateSlot(ObjectVisitor* v,
-                              SlotsBuffer::SlotType slot_type,
-                              Address addr) {
-  switch (slot_type) {
-    case SlotsBuffer::CODE_TARGET_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
-      rinfo.Visit(v);
-      break;
-    }
-    case SlotsBuffer::CODE_ENTRY_SLOT: {
-      v->VisitCodeEntry(addr);
-      break;
-    }
-    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
-      HeapObject* obj = HeapObject::FromAddress(addr);
-      Code::cast(obj)->CodeIterateBody(v);
-      break;
-    }
-    case SlotsBuffer::DEBUG_TARGET_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
-      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
-      break;
-    }
-    case SlotsBuffer::JS_RETURN_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
-      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
-      break;
-    }
-    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
-      rinfo.Visit(v);
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-enum SweepingMode {
-  SWEEP_ONLY,
-  SWEEP_AND_VISIT_LIVE_OBJECTS
-};
-
-
-enum SkipListRebuildingMode {
-  REBUILD_SKIP_LIST,
-  IGNORE_SKIP_LIST
-};
-
-
-// Sweep a space precisely.  After this has been done the space can
-// be iterated precisely, hitting only the live objects.  Code space
-// is always swept precisely because we want to be able to iterate
-// over it.  Map space is swept precisely, because it is not compacted.
-// Slots in live objects pointing into evacuation candidates are updated
-// if requested.
-template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
-static void SweepPrecisely(PagedSpace* space,
-                           Page* p,
-                           ObjectVisitor* v) {
-  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
-  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
-            space->identity() == CODE_SPACE);
-  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
-
-  MarkBit::CellType* cells = p->markbits()->cells();
-  p->MarkSweptPrecisely();
-
-  int last_cell_index =
-      Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->area_end())));
-
-  Address free_start = p->area_start();
-  int cell_index =
-      Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(free_start)));
-
-  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
-  Address object_address = free_start;
-  int offsets[16];
-
-  SkipList* skip_list = p->skip_list();
-  int curr_region = -1;
-  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
-    skip_list->Clear();
-  }
-
-  for (;
-       cell_index < last_cell_index;
-       cell_index++, object_address += 32 * kPointerSize) {
-    ASSERT((unsigned)cell_index ==
-        Bitmap::IndexToCell(
-            Bitmap::CellAlignIndex(
-                p->AddressToMarkbitIndex(object_address))));
-    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
-    int live_index = 0;
-    for ( ; live_objects != 0; live_objects--) {
-      Address free_end = object_address + offsets[live_index++] * kPointerSize;
-      if (free_end != free_start) {
-        space->Free(free_start, static_cast<int>(free_end - free_start));
-      }
-      HeapObject* live_object = HeapObject::FromAddress(free_end);
-      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
-      Map* map = live_object->map();
-      int size = live_object->SizeFromMap(map);
-      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
-        live_object->IterateBody(map->instance_type(), size, v);
-      }
-      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
-        int new_region_start =
-            SkipList::RegionNumber(free_end);
-        int new_region_end =
-            SkipList::RegionNumber(free_end + size - kPointerSize);
-        if (new_region_start != curr_region ||
-            new_region_end != curr_region) {
-          skip_list->AddObject(free_end, size);
-          curr_region = new_region_end;
-        }
-      }
-      free_start = free_end + size;
-    }
-    // Clear marking bits for current cell.
-    cells[cell_index] = 0;
-  }
-  if (free_start != p->area_end()) {
-    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
-  }
-  p->ResetLiveBytes();
-}
-
-
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
-  Page* p = Page::FromAddress(code->address());
-
-  if (p->IsEvacuationCandidate() ||
-      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-    return false;
-  }
-
-  Address code_start = code->address();
-  Address code_end = code_start + code->Size();
-
-  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
-  uint32_t end_index =
-      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
-
-  Bitmap* b = p->markbits();
-
-  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
-  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
-
-  MarkBit::CellType* start_cell = start_mark_bit.cell();
-  MarkBit::CellType* end_cell = end_mark_bit.cell();
-
-  if (value) {
-    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
-    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
-
-    if (start_cell == end_cell) {
-      *start_cell |= start_mask & end_mask;
-    } else {
-      *start_cell |= start_mask;
-      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
-        *cell = ~0;
-      }
-      *end_cell |= end_mask;
-    }
-  } else {
-    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
-      *cell = 0;
-    }
-  }
-
-  return true;
-}
-
-
-static bool IsOnInvalidatedCodeObject(Address addr) {
-  // We did not record any slots in large objects thus
-  // we can safely go to the page from the slot address.
-  Page* p = Page::FromAddress(addr);
-
-  // First check owner's identity because old pointer and old data spaces
-  // are swept lazily and might still have non-zero mark-bits on some
-  // pages.
-  if (p->owner()->identity() != CODE_SPACE) return false;
-
-  // In code space only bits on evacuation candidates (but we don't record
-  // any slots on them) and under invalidated code objects are non-zero.
-  MarkBit mark_bit =
-      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
-  return mark_bit.Get();
-}
-
-
-void MarkCompactCollector::InvalidateCode(Code* code) {
-  if (heap_->incremental_marking()->IsCompacting() &&
-      !ShouldSkipEvacuationSlotRecording(code)) {
-    ASSERT(compacting_);
-
-    // If the object is white than no slots were recorded on it yet.
-    MarkBit mark_bit = Marking::MarkBitFrom(code);
-    if (Marking::IsWhite(mark_bit)) return;
-
-    invalidated_code_.Add(code);
-  }
-}
-
-
-bool MarkCompactCollector::MarkInvalidatedCode() {
-  bool code_marked = false;
-
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    Code* code = invalidated_code_[i];
-
-    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
-      code_marked = true;
-    }
-  }
-
-  return code_marked;
-}
-
-
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
-  }
-}
-
-
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    Code* code = invalidated_code_[i];
-    if (code != NULL) {
-      code->Iterate(visitor);
-      SetMarkBitsUnderInvalidatedCode(code, false);
-    }
-  }
-  invalidated_code_.Rewind(0);
-}
-
-
-void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
-  bool code_slots_filtering_required;
-  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    code_slots_filtering_required = MarkInvalidatedCode();
-
-    EvacuateNewSpace();
-  }
-
-
-  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
-    EvacuatePages();
-  }
-
   // Second pass: find pointers to new space and update them.
-  PointersUpdatingVisitor updating_visitor(heap());
+  PointersToNewGenUpdatingVisitor updating_visitor(heap);
 
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
-    // Update pointers in to space.
-    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
-                            heap()->new_space()->top());
-    for (HeapObject* object = to_it.Next();
-         object != NULL;
-         object = to_it.Next()) {
-      Map* map = object->map();
-      object->IterateBody(map->instance_type(),
-                          object->SizeFromMap(map),
-                          &updating_visitor);
-    }
+  // Update pointers in to space.
+  Address current = space->bottom();
+  while (current < space->top()) {
+    HeapObject* object = HeapObject::FromAddress(current);
+    current +=
+        StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
+                                                           object);
   }
 
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
-    // Update roots.
-    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-    LiveObjectList::IterateElements(&updating_visitor);
-  }
+  // Update roots.
+  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+  LiveObjectList::IterateElements(&updating_visitor);
 
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
-    StoreBufferRebuildScope scope(heap_,
-                                  heap_->store_buffer(),
-                                  &Heap::ScavengeStoreBufferCallback);
-    heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
-  }
+  // Update pointers in old spaces.
+  heap->IterateDirtyRegions(heap->old_pointer_space(),
+                            &Heap::IteratePointersInDirtyRegion,
+                            &UpdatePointerToNewGen,
+                            heap->WATERMARK_SHOULD_BE_VALID);
 
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
-    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                       migration_slots_buffer_,
-                                       code_slots_filtering_required);
-    if (FLAG_trace_fragmentation) {
-      PrintF("  migration slots buffer: %d\n",
-             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
-    }
-
-    if (compacting_ && was_marked_incrementally_) {
-      // It's difficult to filter out slots recorded for large objects.
-      LargeObjectIterator it(heap_->lo_space());
-      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-        // LargeObjectSpace is not swept yet thus we have to skip
-        // dead objects explicitly.
-        if (!IsMarked(obj)) continue;
-
-        Page* p = Page::FromAddress(obj->address());
-        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-          obj->Iterate(&updating_visitor);
-          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
-        }
-      }
-    }
-  }
-
-  int npages = evacuation_candidates_.length();
-  { GCTracer::Scope gc_scope(
-      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
-    for (int i = 0; i < npages; i++) {
-      Page* p = evacuation_candidates_[i];
-      ASSERT(p->IsEvacuationCandidate() ||
-             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-
-      if (p->IsEvacuationCandidate()) {
-        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                           p->slots_buffer(),
-                                           code_slots_filtering_required);
-        if (FLAG_trace_fragmentation) {
-          PrintF("  page %p slots buffer: %d\n",
-                 reinterpret_cast<void*>(p),
-                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
-        }
-
-        // Important: skip list should be cleared only after roots were updated
-        // because root iteration traverses the stack and might have to find
-        // code objects from non-updated pc pointing into evacuation candidate.
-        SkipList* list = p->skip_list();
-        if (list != NULL) list->Clear();
-      } else {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-
-        switch (space->identity()) {
-          case OLD_DATA_SPACE:
-            SweepConservatively(space, p);
-            break;
-          case OLD_POINTER_SPACE:
-            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
-                space, p, &updating_visitor);
-            break;
-          case CODE_SPACE:
-            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
-                space, p, &updating_visitor);
-            break;
-          default:
-            UNREACHABLE();
-            break;
-        }
-      }
-    }
-  }
-
-  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+  heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
 
   // Update pointers from cells.
-  HeapObjectIterator cell_iterator(heap_->cell_space());
-  for (HeapObject* cell = cell_iterator.Next();
+  HeapObjectIterator cell_iterator(heap->cell_space());
+  for (HeapObject* cell = cell_iterator.next();
        cell != NULL;
-       cell = cell_iterator.Next()) {
+       cell = cell_iterator.next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -3392,575 +2330,1020 @@
   }
 
   // Update pointer from the global contexts list.
-  updating_visitor.VisitPointer(heap_->global_contexts_list_address());
-
-  heap_->symbol_table()->Iterate(&updating_visitor);
+  updating_visitor.VisitPointer(heap->global_contexts_list_address());
 
   // Update pointers from external string table.
-  heap_->UpdateReferencesInExternalStringTable(
-      &UpdateReferenceInExternalStringTableEntry);
+  heap->UpdateNewSpaceReferencesInExternalStringTable(
+      &UpdateNewSpaceReferenceInExternalStringTableEntry);
 
-  if (!FLAG_watch_ic_patching) {
-    // Update JSFunction pointers from the runtime profiler.
-    heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
-        &updating_visitor);
-  }
+  // All pointers were updated. Update auxiliary allocation info.
+  heap->IncrementYoungSurvivorsCounter(survivors_size);
+  space->set_age_mark(space->top());
 
-  EvacuationWeakObjectRetainer evacuation_object_retainer;
-  heap()->ProcessWeakReferences(&evacuation_object_retainer);
-
-  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
-  // under it.
-  ProcessInvalidatedCode(&updating_visitor);
-
-#ifdef DEBUG
-  if (FLAG_verify_heap) {
-    VerifyEvacuation(heap_);
-  }
-#endif
-
-  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
-  ASSERT(migration_slots_buffer_ == NULL);
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
-    if (!p->IsEvacuationCandidate()) continue;
-    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    space->Free(p->area_start(), p->area_size());
-    p->set_scan_on_scavenge(false);
-    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
-    p->ResetLiveBytes();
-    space->ReleasePage(p);
-  }
-  evacuation_candidates_.Rewind(0);
-  compacting_ = false;
+  // Update JSFunction pointers from the runtime profiler.
+  heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
 }
 
 
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
+static void SweepSpace(Heap* heap, PagedSpace* space) {
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
 
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte.  The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
-  0, _, _, _, _,  // 0
-  1, 0, _, _, _,  // 1
-  1, 1, _, _, _,  // 2
-  X, _, _, _, _,  // 3
-  1, 2, _, _, _,  // 4
-  2, 0, 2, _, _,  // 5
-  X, _, _, _, _,  // 6
-  X, _, _, _, _,  // 7
-  1, 3, _, _, _,  // 8
-  2, 0, 3, _, _,  // 9
-  2, 1, 3, _, _,  // 10
-  X, _, _, _, _,  // 11
-  X, _, _, _, _,  // 12
-  X, _, _, _, _,  // 13
-  X, _, _, _, _,  // 14
-  X, _, _, _, _,  // 15
-  1, 4, _, _, _,  // 16
-  2, 0, 4, _, _,  // 17
-  2, 1, 4, _, _,  // 18
-  X, _, _, _, _,  // 19
-  2, 2, 4, _, _,  // 20
-  3, 0, 2, 4, _,  // 21
-  X, _, _, _, _,  // 22
-  X, _, _, _, _,  // 23
-  X, _, _, _, _,  // 24
-  X, _, _, _, _,  // 25
-  X, _, _, _, _,  // 26
-  X, _, _, _, _,  // 27
-  X, _, _, _, _,  // 28
-  X, _, _, _, _,  // 29
-  X, _, _, _, _,  // 30
-  X, _, _, _, _,  // 31
-  1, 5, _, _, _,  // 32
-  2, 0, 5, _, _,  // 33
-  2, 1, 5, _, _,  // 34
-  X, _, _, _, _,  // 35
-  2, 2, 5, _, _,  // 36
-  3, 0, 2, 5, _,  // 37
-  X, _, _, _, _,  // 38
-  X, _, _, _, _,  // 39
-  2, 3, 5, _, _,  // 40
-  3, 0, 3, 5, _,  // 41
-  3, 1, 3, 5, _,  // 42
-  X, _, _, _, _,  // 43
-  X, _, _, _, _,  // 44
-  X, _, _, _, _,  // 45
-  X, _, _, _, _,  // 46
-  X, _, _, _, _,  // 47
-  X, _, _, _, _,  // 48
-  X, _, _, _, _,  // 49
-  X, _, _, _, _,  // 50
-  X, _, _, _, _,  // 51
-  X, _, _, _, _,  // 52
-  X, _, _, _, _,  // 53
-  X, _, _, _, _,  // 54
-  X, _, _, _, _,  // 55
-  X, _, _, _, _,  // 56
-  X, _, _, _, _,  // 57
-  X, _, _, _, _,  // 58
-  X, _, _, _, _,  // 59
-  X, _, _, _, _,  // 60
-  X, _, _, _, _,  // 61
-  X, _, _, _, _,  // 62
-  X, _, _, _, _,  // 63
-  1, 6, _, _, _,  // 64
-  2, 0, 6, _, _,  // 65
-  2, 1, 6, _, _,  // 66
-  X, _, _, _, _,  // 67
-  2, 2, 6, _, _,  // 68
-  3, 0, 2, 6, _,  // 69
-  X, _, _, _, _,  // 70
-  X, _, _, _, _,  // 71
-  2, 3, 6, _, _,  // 72
-  3, 0, 3, 6, _,  // 73
-  3, 1, 3, 6, _,  // 74
-  X, _, _, _, _,  // 75
-  X, _, _, _, _,  // 76
-  X, _, _, _, _,  // 77
-  X, _, _, _, _,  // 78
-  X, _, _, _, _,  // 79
-  2, 4, 6, _, _,  // 80
-  3, 0, 4, 6, _,  // 81
-  3, 1, 4, 6, _,  // 82
-  X, _, _, _, _,  // 83
-  3, 2, 4, 6, _,  // 84
-  4, 0, 2, 4, 6,  // 85
-  X, _, _, _, _,  // 86
-  X, _, _, _, _,  // 87
-  X, _, _, _, _,  // 88
-  X, _, _, _, _,  // 89
-  X, _, _, _, _,  // 90
-  X, _, _, _, _,  // 91
-  X, _, _, _, _,  // 92
-  X, _, _, _, _,  // 93
-  X, _, _, _, _,  // 94
-  X, _, _, _, _,  // 95
-  X, _, _, _, _,  // 96
-  X, _, _, _, _,  // 97
-  X, _, _, _, _,  // 98
-  X, _, _, _, _,  // 99
-  X, _, _, _, _,  // 100
-  X, _, _, _, _,  // 101
-  X, _, _, _, _,  // 102
-  X, _, _, _, _,  // 103
-  X, _, _, _, _,  // 104
-  X, _, _, _, _,  // 105
-  X, _, _, _, _,  // 106
-  X, _, _, _, _,  // 107
-  X, _, _, _, _,  // 108
-  X, _, _, _, _,  // 109
-  X, _, _, _, _,  // 110
-  X, _, _, _, _,  // 111
-  X, _, _, _, _,  // 112
-  X, _, _, _, _,  // 113
-  X, _, _, _, _,  // 114
-  X, _, _, _, _,  // 115
-  X, _, _, _, _,  // 116
-  X, _, _, _, _,  // 117
-  X, _, _, _, _,  // 118
-  X, _, _, _, _,  // 119
-  X, _, _, _, _,  // 120
-  X, _, _, _, _,  // 121
-  X, _, _, _, _,  // 122
-  X, _, _, _, _,  // 123
-  X, _, _, _, _,  // 124
-  X, _, _, _, _,  // 125
-  X, _, _, _, _,  // 126
-  X, _, _, _, _,  // 127
-  1, 7, _, _, _,  // 128
-  2, 0, 7, _, _,  // 129
-  2, 1, 7, _, _,  // 130
-  X, _, _, _, _,  // 131
-  2, 2, 7, _, _,  // 132
-  3, 0, 2, 7, _,  // 133
-  X, _, _, _, _,  // 134
-  X, _, _, _, _,  // 135
-  2, 3, 7, _, _,  // 136
-  3, 0, 3, 7, _,  // 137
-  3, 1, 3, 7, _,  // 138
-  X, _, _, _, _,  // 139
-  X, _, _, _, _,  // 140
-  X, _, _, _, _,  // 141
-  X, _, _, _, _,  // 142
-  X, _, _, _, _,  // 143
-  2, 4, 7, _, _,  // 144
-  3, 0, 4, 7, _,  // 145
-  3, 1, 4, 7, _,  // 146
-  X, _, _, _, _,  // 147
-  3, 2, 4, 7, _,  // 148
-  4, 0, 2, 4, 7,  // 149
-  X, _, _, _, _,  // 150
-  X, _, _, _, _,  // 151
-  X, _, _, _, _,  // 152
-  X, _, _, _, _,  // 153
-  X, _, _, _, _,  // 154
-  X, _, _, _, _,  // 155
-  X, _, _, _, _,  // 156
-  X, _, _, _, _,  // 157
-  X, _, _, _, _,  // 158
-  X, _, _, _, _,  // 159
-  2, 5, 7, _, _,  // 160
-  3, 0, 5, 7, _,  // 161
-  3, 1, 5, 7, _,  // 162
-  X, _, _, _, _,  // 163
-  3, 2, 5, 7, _,  // 164
-  4, 0, 2, 5, 7,  // 165
-  X, _, _, _, _,  // 166
-  X, _, _, _, _,  // 167
-  3, 3, 5, 7, _,  // 168
-  4, 0, 3, 5, 7,  // 169
-  4, 1, 3, 5, 7   // 170
-};
-#undef _
-#undef X
+  // During sweeping of paged space we are trying to find longest sequences
+  // of pages without live objects and free them (instead of putting them on
+  // the free list).
 
+  // Page preceding current.
+  Page* prev = Page::FromAddress(NULL);
 
-// Takes a word of mark bits.  Returns the number of objects that start in the
-// range.  Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
-  int objects = 0;
-  int offset = 0;
+  // First empty page in a sequence.
+  Page* first_empty_page = Page::FromAddress(NULL);
 
-  // No consecutive 1 bits.
-  ASSERT((mark_bits & 0x180) != 0x180);
-  ASSERT((mark_bits & 0x18000) != 0x18000);
-  ASSERT((mark_bits & 0x1800000) != 0x1800000);
+  // Page preceding first empty page.
+  Page* prec_first_empty_page = Page::FromAddress(NULL);
 
-  while (mark_bits != 0) {
-    int byte = (mark_bits & 0xff);
-    mark_bits >>= 8;
-    if (byte != 0) {
-      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
-      char* table = kStartTable + byte * kStartTableEntriesPerLine;
-      int objects_in_these_8_words = table[0];
-      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
-      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
-      for (int i = 0; i < objects_in_these_8_words; i++) {
-        starts[objects++] = offset + table[1 + i];
-      }
-    }
-    offset += 8;
-  }
-  return objects;
-}
-
-
-static inline Address DigestFreeStart(Address approximate_free_start,
-                                      uint32_t free_start_cell) {
-  ASSERT(free_start_cell != 0);
-
-  // No consecutive 1 bits.
-  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
-
-  int offsets[16];
-  uint32_t cell = free_start_cell;
-  int offset_of_last_live;
-  if ((cell & 0x80000000u) != 0) {
-    // This case would overflow below.
-    offset_of_last_live = 31;
-  } else {
-    // Remove all but one bit, the most significant.  This is an optimization
-    // that may or may not be worthwhile.
-    cell |= cell >> 16;
-    cell |= cell >> 8;
-    cell |= cell >> 4;
-    cell |= cell >> 2;
-    cell |= cell >> 1;
-    cell = (cell + 1) >> 1;
-    int live_objects = MarkWordToObjectStarts(cell, offsets);
-    ASSERT(live_objects == 1);
-    offset_of_last_live = offsets[live_objects - 1];
-  }
-  Address last_live_start =
-      approximate_free_start + offset_of_last_live * kPointerSize;
-  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
-  Address free_start = last_live_start + last_live->Size();
-  return free_start;
-}
-
-
-static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
-  ASSERT(cell != 0);
-
-  // No consecutive 1 bits.
-  ASSERT((cell & (cell << 1)) == 0);
-
-  int offsets[16];
-  if (cell == 0x80000000u) {  // Avoid overflow below.
-    return block_address + 31 * kPointerSize;
-  }
-  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
-  ASSERT((first_set_bit & cell) == first_set_bit);
-  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
-  ASSERT(live_objects == 1);
-  USE(live_objects);
-  return block_address + offsets[0] * kPointerSize;
-}
-
-
-// Sweeps a space conservatively.  After this has been done the larger free
-// spaces have been put on the free list and the smaller ones have been
-// ignored and left untouched.  A free space is always either ignored or put
-// on the free list, never split up into two parts.  This is important
-// because it means that any FreeSpace maps left actually describe a region of
-// memory that can be ignored when scanning.  Dead objects other than free
-// spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
-  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
-  MarkBit::CellType* cells = p->markbits()->cells();
-  p->MarkSweptConservatively();
-
-  int last_cell_index =
-      Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->area_end())));
-
-  int cell_index =
-      Bitmap::IndexToCell(
-          Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->area_start())));
-
-  intptr_t freed_bytes = 0;
-
-  // This is the start of the 32 word block that we are currently looking at.
-  Address block_address = p->area_start();
-
-  // Skip over all the dead objects at the start of the page and mark them free.
-  for (;
-       cell_index < last_cell_index;
-       cell_index++, block_address += 32 * kPointerSize) {
-    if (cells[cell_index] != 0) break;
-  }
-  size_t size = block_address - p->area_start();
-  if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->Free(p->area_start(),
-                                                static_cast<int>(size)));
-    ASSERT_EQ(0, p->LiveBytes());
-    return freed_bytes;
-  }
-  // Grow the size of the start-of-page free space a little to get up to the
-  // first live object.
-  Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
-  // Free the first free space.
-  size = free_end - p->area_start();
-  freed_bytes += space->Free(p->area_start(),
-                             static_cast<int>(size));
-  // The start of the current free area is represented in undigested form by
-  // the address of the last 32-word section that contained a live object and
-  // the marking bitmap for that cell, which describes where the live object
-  // started.  Unless we find a large free space in the bitmap we will not
-  // digest this pair into a real address.  We start the iteration here at the
-  // first word in the marking bit map that indicates a live object.
-  Address free_start = block_address;
-  uint32_t free_start_cell = cells[cell_index];
-
-  for ( ;
-       cell_index < last_cell_index;
-       cell_index++, block_address += 32 * kPointerSize) {
-    ASSERT((unsigned)cell_index ==
-        Bitmap::IndexToCell(
-            Bitmap::CellAlignIndex(
-                p->AddressToMarkbitIndex(block_address))));
-    uint32_t cell = cells[cell_index];
-    if (cell != 0) {
-      // We have a live object.  Check approximately whether it is more than 32
-      // words since the last live object.
-      if (block_address - free_start > 32 * kPointerSize) {
-        free_start = DigestFreeStart(free_start, free_start_cell);
-        if (block_address - free_start > 32 * kPointerSize) {
-          // Now that we know the exact start of the free space it still looks
-          // like we have a large enough free space to be worth bothering with.
-          // so now we need to find the start of the first live object at the
-          // end of the free space.
-          free_end = StartOfLiveObject(block_address, cell);
-          freed_bytes += space->Free(free_start,
-                                     static_cast<int>(free_end - free_start));
-        }
-      }
-      // Update our undigested record of where the current free area started.
-      free_start = block_address;
-      free_start_cell = cell;
-      // Clear marking bits for current cell.
-      cells[cell_index] = 0;
-    }
-  }
-
-  // Handle the free space at the end of the page.
-  if (block_address - free_start > 32 * kPointerSize) {
-    free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += space->Free(free_start,
-                               static_cast<int>(block_address - free_start));
-  }
-
-  p->ResetLiveBytes();
-  return freed_bytes;
-}
-
-
-void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
-  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
-                                      sweeper == LAZY_CONSERVATIVE);
-
-  space->ClearStats();
-
-  PageIterator it(space);
-
-  intptr_t freed_bytes = 0;
-  int pages_swept = 0;
-  intptr_t newspace_size = space->heap()->new_space()->Size();
-  bool lazy_sweeping_active = false;
-  bool unused_page_present = false;
-
-  intptr_t old_space_size = heap()->PromotedSpaceSize();
-  intptr_t space_left =
-      Min(heap()->OldGenPromotionLimit(old_space_size),
-          heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
+  // If last used page of space ends with a sequence of dead objects
+  // we can adjust allocation top instead of puting this free area into
+  // the free list. Thus during sweeping we keep track of such areas
+  // and defer their deallocation until the sweeping of the next page
+  // is done: if one of the next pages contains live objects we have
+  // to put such area into the free list.
+  Address last_free_start = NULL;
+  int last_free_size = 0;
 
   while (it.has_next()) {
     Page* p = it.next();
 
-    // Clear sweeping flags indicating that marking bits are still intact.
-    p->ClearSweptPrecisely();
-    p->ClearSweptConservatively();
+    bool is_previous_alive = true;
+    Address free_start = NULL;
+    HeapObject* object;
 
-    if (p->IsEvacuationCandidate()) {
-      ASSERT(evacuation_candidates_.length() > 0);
-      continue;
+    for (Address current = p->ObjectAreaStart();
+         current < p->AllocationTop();
+         current += object->Size()) {
+      object = HeapObject::FromAddress(current);
+      if (object->IsMarked()) {
+        object->ClearMark();
+        heap->mark_compact_collector()->tracer()->decrement_marked_count();
+
+        if (!is_previous_alive) {  // Transition from free to live.
+          space->DeallocateBlock(free_start,
+                                 static_cast<int>(current - free_start),
+                                 true);
+          is_previous_alive = true;
+        }
+      } else {
+        heap->mark_compact_collector()->ReportDeleteIfNeeded(
+            object, heap->isolate());
+        if (is_previous_alive) {  // Transition from live to free.
+          free_start = current;
+          is_previous_alive = false;
+        }
+        LiveObjectList::ProcessNonLive(object);
+      }
+      // The object is now unmarked for the call to Size() at the top of the
+      // loop.
     }
 
-    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-      // Will be processed in EvacuateNewSpaceAndCandidates.
-      continue;
+    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
+        || (!is_previous_alive && free_start == p->ObjectAreaStart());
+
+    if (page_is_empty) {
+      // This page is empty. Check whether we are in the middle of
+      // sequence of empty pages and start one if not.
+      if (!first_empty_page->is_valid()) {
+        first_empty_page = p;
+        prec_first_empty_page = prev;
+      }
+
+      if (!is_previous_alive) {
+        // There are dead objects on this page. Update space accounting stats
+        // without putting anything into free list.
+        int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
+        if (size_in_bytes > 0) {
+          space->DeallocateBlock(free_start, size_in_bytes, false);
+        }
+      }
+    } else {
+      // This page is not empty. Sequence of empty pages ended on the previous
+      // one.
+      if (first_empty_page->is_valid()) {
+        space->FreePages(prec_first_empty_page, prev);
+        prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
+      }
+
+      // If there is a free ending area on one of the previous pages we have
+      // deallocate that area and put it on the free list.
+      if (last_free_size > 0) {
+        Page::FromAddress(last_free_start)->
+            SetAllocationWatermark(last_free_start);
+        space->DeallocateBlock(last_free_start, last_free_size, true);
+        last_free_start = NULL;
+        last_free_size  = 0;
+      }
+
+      // If the last region of this page was not live we remember it.
+      if (!is_previous_alive) {
+        ASSERT(last_free_size == 0);
+        last_free_size = static_cast<int>(p->AllocationTop() - free_start);
+        last_free_start = free_start;
+      }
     }
 
-    // One unused page is kept, all further are released before sweeping them.
-    if (p->LiveBytes() == 0) {
-      if (unused_page_present) {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        // Adjust unswept free bytes because releasing a page expects said
-        // counter to be accurate for unswept pages.
-        space->IncreaseUnsweptFreeBytes(p);
-        space->ReleasePage(p);
-        continue;
-      }
-      unused_page_present = true;
-    }
-
-    if (lazy_sweeping_active) {
-      if (FLAG_gc_verbose) {
-        PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
-               reinterpret_cast<intptr_t>(p));
-      }
-      space->IncreaseUnsweptFreeBytes(p);
-      continue;
-    }
-
-    switch (sweeper) {
-      case CONSERVATIVE: {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        SweepConservatively(space, p);
-        pages_swept++;
-        break;
-      }
-      case LAZY_CONSERVATIVE: {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        freed_bytes += SweepConservatively(space, p);
-        pages_swept++;
-        if (space_left + freed_bytes > newspace_size) {
-          space->SetPagesToSweep(p->next_page());
-          lazy_sweeping_active = true;
-        } else {
-          if (FLAG_gc_verbose) {
-            PrintF("Only %" V8PRIdPTR " bytes freed.  Still sweeping.\n",
-                   freed_bytes);
-          }
-        }
-        break;
-      }
-      case PRECISE: {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        if (space->identity() == CODE_SPACE) {
-          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
-        } else {
-          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
-        }
-        pages_swept++;
-        break;
-      }
-      default: {
-        UNREACHABLE();
-      }
-    }
+    prev = p;
   }
 
-  if (FLAG_gc_verbose) {
-    PrintF("SweepSpace: %s (%d pages swept)\n",
-           AllocationSpaceName(space->identity()),
-           pages_swept);
+  // We reached end of space. See if we need to adjust allocation top.
+  Address new_allocation_top = NULL;
+
+  if (first_empty_page->is_valid()) {
+    // Last used pages in space are empty. We can move allocation top backwards
+    // to the beginning of first empty page.
+    ASSERT(prev == space->AllocationTopPage());
+
+    new_allocation_top = first_empty_page->ObjectAreaStart();
   }
 
-  // Give pages that are queued to be freed back to the OS.
-  heap()->FreeQueuedChunks();
+  if (last_free_size > 0) {
+    // There was a free ending area on the previous page.
+    // Deallocate it without putting it into freelist and move allocation
+    // top to the beginning of this free area.
+    space->DeallocateBlock(last_free_start, last_free_size, false);
+    new_allocation_top = last_free_start;
+  }
+
+  if (new_allocation_top != NULL) {
+#ifdef DEBUG
+    Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
+    if (!first_empty_page->is_valid()) {
+      ASSERT(new_allocation_top_page == space->AllocationTopPage());
+    } else if (last_free_size > 0) {
+      ASSERT(new_allocation_top_page == prec_first_empty_page);
+    } else {
+      ASSERT(new_allocation_top_page == first_empty_page);
+    }
+#endif
+
+    space->SetTop(new_allocation_top);
+  }
 }
 
 
+void MarkCompactCollector::EncodeForwardingAddresses() {
+  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+  // Objects in the active semispace of the young generation may be
+  // relocated to the inactive semispace (if not promoted).  Set the
+  // relocation info to the beginning of the inactive semispace.
+  heap()->new_space()->MCResetRelocationInfo();
+
+  // Compute the forwarding pointers in each space.
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
+                                        ReportDeleteIfNeeded>(
+      heap()->old_pointer_space());
+
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
+                                        IgnoreNonLiveObject>(
+      heap()->old_data_space());
+
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
+                                        ReportDeleteIfNeeded>(
+      heap()->code_space());
+
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
+                                        IgnoreNonLiveObject>(
+      heap()->cell_space());
+
+
+  // Compute new space next to last after the old and code spaces have been
+  // compacted.  Objects in new space can be promoted to old or code space.
+  EncodeForwardingAddressesInNewSpace();
+
+  // Compute map space last because computing forwarding addresses
+  // overwrites non-live objects.  Objects in the other spaces rely on
+  // non-live map pointers to get the sizes of non-live objects.
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
+                                        IgnoreNonLiveObject>(
+      heap()->map_space());
+
+  // Write relocation info to the top page, so we can use it later.  This is
+  // done after promoting objects from the new space so we get the correct
+  // allocation top.
+  heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
+  heap()->old_data_space()->MCWriteRelocationInfoToPage();
+  heap()->code_space()->MCWriteRelocationInfoToPage();
+  heap()->map_space()->MCWriteRelocationInfoToPage();
+  heap()->cell_space()->MCWriteRelocationInfoToPage();
+}
+
+
+class MapIterator : public HeapObjectIterator {
+ public:
+  explicit MapIterator(Heap* heap)
+    : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
+
+  MapIterator(Heap* heap, Address start)
+      : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
+
+ private:
+  static int SizeCallback(HeapObject* unused) {
+    USE(unused);
+    return Map::kSize;
+  }
+};
+
+
+class MapCompact {
+ public:
+  explicit MapCompact(Heap* heap, int live_maps)
+    : heap_(heap),
+      live_maps_(live_maps),
+      to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
+      vacant_map_it_(heap),
+      map_to_evacuate_it_(heap, to_evacuate_start_),
+      first_map_to_evacuate_(
+          reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
+  }
+
+  void CompactMaps() {
+    // As we know the number of maps to evacuate beforehand,
+    // we stop then there is no more vacant maps.
+    for (Map* next_vacant_map = NextVacantMap();
+         next_vacant_map;
+         next_vacant_map = NextVacantMap()) {
+      EvacuateMap(next_vacant_map, NextMapToEvacuate());
+    }
+
+#ifdef DEBUG
+    CheckNoMapsToEvacuate();
+#endif
+  }
+
+  void UpdateMapPointersInRoots() {
+    MapUpdatingVisitor map_updating_visitor;
+    heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
+    heap()->isolate()->global_handles()->IterateWeakRoots(
+        &map_updating_visitor);
+    LiveObjectList::IterateElements(&map_updating_visitor);
+  }
+
+  void UpdateMapPointersInPagedSpace(PagedSpace* space) {
+    ASSERT(space != heap()->map_space());
+
+    PageIterator it(space, PageIterator::PAGES_IN_USE);
+    while (it.has_next()) {
+      Page* p = it.next();
+      UpdateMapPointersInRange(heap(),
+                               p->ObjectAreaStart(),
+                               p->AllocationTop());
+    }
+  }
+
+  void UpdateMapPointersInNewSpace() {
+    NewSpace* space = heap()->new_space();
+    UpdateMapPointersInRange(heap(), space->bottom(), space->top());
+  }
+
+  void UpdateMapPointersInLargeObjectSpace() {
+    LargeObjectIterator it(heap()->lo_space());
+    for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+      UpdateMapPointersInObject(heap(), obj);
+  }
+
+  void Finish() {
+    heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+  }
+
+  inline Heap* heap() const { return heap_; }
+
+ private:
+  Heap* heap_;
+  int live_maps_;
+  Address to_evacuate_start_;
+  MapIterator vacant_map_it_;
+  MapIterator map_to_evacuate_it_;
+  Map* first_map_to_evacuate_;
+
+  // Helper class for updating map pointers in HeapObjects.
+  class MapUpdatingVisitor: public ObjectVisitor {
+  public:
+    MapUpdatingVisitor() {}
+
+    void VisitPointer(Object** p) {
+      UpdateMapPointer(p);
+    }
+
+    void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) UpdateMapPointer(p);
+    }
+
+  private:
+    void UpdateMapPointer(Object** p) {
+      if (!(*p)->IsHeapObject()) return;
+      HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
+
+      // Moved maps are tagged with overflowed map word.  They are the only
+      // objects those map word is overflowed as marking is already complete.
+      MapWord map_word = old_map->map_word();
+      if (!map_word.IsOverflowed()) return;
+
+      *p = GetForwardedMap(map_word);
+    }
+  };
+
+  static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
+    while (true) {
+      HeapObject* next = it->next();
+      ASSERT(next != NULL);
+      if (next == last)
+        return NULL;
+      ASSERT(!next->IsOverflowed());
+      ASSERT(!next->IsMarked());
+      ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
+      if (next->IsMap() == live)
+        return reinterpret_cast<Map*>(next);
+    }
+  }
+
+  Map* NextVacantMap() {
+    Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
+    ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
+    return map;
+  }
+
+  Map* NextMapToEvacuate() {
+    Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
+    ASSERT(map != NULL);
+    ASSERT(map->IsMap());
+    return map;
+  }
+
+  static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
+    ASSERT(FreeListNode::IsFreeListNode(vacant_map));
+    ASSERT(map_to_evacuate->IsMap());
+
+    ASSERT(Map::kSize % 4 == 0);
+
+    map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
+        vacant_map->address(), map_to_evacuate->address(), Map::kSize);
+
+    ASSERT(vacant_map->IsMap());  // Due to memcpy above.
+
+    MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
+    forwarding_map_word.SetOverflow();
+    map_to_evacuate->set_map_word(forwarding_map_word);
+
+    ASSERT(map_to_evacuate->map_word().IsOverflowed());
+    ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
+  }
+
+  static Map* GetForwardedMap(MapWord map_word) {
+    ASSERT(map_word.IsOverflowed());
+    map_word.ClearOverflow();
+    Map* new_map = map_word.ToMap();
+    ASSERT_MAP_ALIGNED(new_map->address());
+    return new_map;
+  }
+
+  static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
+    ASSERT(!obj->IsMarked());
+    Map* map = obj->map();
+    ASSERT(heap->map_space()->Contains(map));
+    MapWord map_word = map->map_word();
+    ASSERT(!map_word.IsMarked());
+    if (map_word.IsOverflowed()) {
+      Map* new_map = GetForwardedMap(map_word);
+      ASSERT(heap->map_space()->Contains(new_map));
+      obj->set_map(new_map);
+
+#ifdef DEBUG
+      if (FLAG_gc_verbose) {
+        PrintF("update %p : %p -> %p\n",
+               obj->address(),
+               reinterpret_cast<void*>(map),
+               reinterpret_cast<void*>(new_map));
+      }
+#endif
+    }
+
+    int size = obj->SizeFromMap(map);
+    MapUpdatingVisitor map_updating_visitor;
+    obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
+    return size;
+  }
+
+  static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
+    HeapObject* object;
+    int size;
+    for (Address current = start; current < end; current += size) {
+      object = HeapObject::FromAddress(current);
+      size = UpdateMapPointersInObject(heap, object);
+      ASSERT(size > 0);
+    }
+  }
+
+#ifdef DEBUG
+  void CheckNoMapsToEvacuate() {
+    if (!FLAG_enable_slow_asserts)
+      return;
+
+    for (HeapObject* obj = map_to_evacuate_it_.next();
+         obj != NULL; obj = map_to_evacuate_it_.next())
+      ASSERT(FreeListNode::IsFreeListNode(obj));
+  }
+#endif
+};
+
+
 void MarkCompactCollector::SweepSpaces() {
   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-#ifdef DEBUG
-  state_ = SWEEP_SPACES;
-#endif
-  SweeperType how_to_sweep =
-      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
-  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
-  if (sweep_precisely_) how_to_sweep = PRECISE;
+
+  ASSERT(state_ == SWEEP_SPACES);
+  ASSERT(!IsCompacting());
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
-  SweepSpace(heap()->old_data_space(), how_to_sweep);
+  SweepSpace(heap(), heap()->old_pointer_space());
+  SweepSpace(heap(), heap()->old_data_space());
+  SweepSpace(heap(), heap()->code_space());
+  SweepSpace(heap(), heap()->cell_space());
+  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
+    SweepNewSpace(heap(), heap()->new_space());
+  }
+  SweepSpace(heap(), heap()->map_space());
 
-  RemoveDeadInvalidatedCode();
-  SweepSpace(heap()->code_space(), PRECISE);
+  heap()->IterateDirtyRegions(heap()->map_space(),
+                             &heap()->IteratePointersInDirtyMapsRegion,
+                             &UpdatePointerToNewGen,
+                             heap()->WATERMARK_SHOULD_BE_VALID);
 
-  SweepSpace(heap()->cell_space(), PRECISE);
+  intptr_t live_maps_size = heap()->map_space()->Size();
+  int live_maps = static_cast<int>(live_maps_size / Map::kSize);
+  ASSERT(live_map_objects_size_ == live_maps_size);
 
-  EvacuateNewSpaceAndCandidates();
+  if (heap()->map_space()->NeedsCompaction(live_maps)) {
+    MapCompact map_compact(heap(), live_maps);
 
-  // ClearNonLiveTransitions depends on precise sweeping of map space to
-  // detect whether unmarked map became dead in this collection or in one
-  // of the previous ones.
-  SweepSpace(heap()->map_space(), PRECISE);
+    map_compact.CompactMaps();
+    map_compact.UpdateMapPointersInRoots();
 
-  // Deallocate unmarked objects and clear marked bits for marked objects.
-  heap_->lo_space()->FreeUnmarkedObjects();
+    PagedSpaces spaces;
+    for (PagedSpace* space = spaces.next();
+         space != NULL; space = spaces.next()) {
+      if (space == heap()->map_space()) continue;
+      map_compact.UpdateMapPointersInPagedSpace(space);
+    }
+    map_compact.UpdateMapPointersInNewSpace();
+    map_compact.UpdateMapPointersInLargeObjectSpace();
+
+    map_compact.Finish();
+  }
+}
+
+
+// Iterate the live objects in a range of addresses (eg, a page or a
+// semispace).  The live regions of the range have been linked into a list.
+// The first live region is [first_live_start, first_live_end), and the last
+// address in the range is top.  The callback function is used to get the
+// size of each live object.
+int MarkCompactCollector::IterateLiveObjectsInRange(
+    Address start,
+    Address end,
+    LiveObjectCallback size_func) {
+  int live_objects_size = 0;
+  Address current = start;
+  while (current < end) {
+    uint32_t encoded_map = Memory::uint32_at(current);
+    if (encoded_map == kSingleFreeEncoding) {
+      current += kPointerSize;
+    } else if (encoded_map == kMultiFreeEncoding) {
+      current += Memory::int_at(current + kIntSize);
+    } else {
+      int size = (this->*size_func)(HeapObject::FromAddress(current));
+      current += size;
+      live_objects_size += size;
+    }
+  }
+  return live_objects_size;
+}
+
+
+int MarkCompactCollector::IterateLiveObjects(
+    NewSpace* space, LiveObjectCallback size_f) {
+  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
+}
+
+
+int MarkCompactCollector::IterateLiveObjects(
+    PagedSpace* space, LiveObjectCallback size_f) {
+  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
+  int total = 0;
+  PageIterator it(space, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    Page* p = it.next();
+    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
+                                       p->AllocationTop(),
+                                       size_f);
+  }
+  return total;
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 3: Update pointers
+
+// Helper class for updating pointers in HeapObjects.
+class UpdatingVisitor: public ObjectVisitor {
+ public:
+  explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointer(Object** p) {
+    UpdatePointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    // Mark all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    VisitPointer(&target);
+    rinfo->set_target_address(
+        reinterpret_cast<Code*>(target)->instruction_start());
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+            rinfo->IsPatchedReturnSequence()) ||
+           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+            rinfo->IsPatchedDebugBreakSlotSequence()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    VisitPointer(&target);
+    rinfo->set_call_address(
+        reinterpret_cast<Code*>(target)->instruction_start());
+  }
+
+  inline Heap* heap() const { return heap_; }
+
+ private:
+  void UpdatePointer(Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+
+    HeapObject* obj = HeapObject::cast(*p);
+    Address old_addr = obj->address();
+    Address new_addr;
+    ASSERT(!heap()->InFromSpace(obj));
+
+    if (heap()->new_space()->Contains(obj)) {
+      Address forwarding_pointer_addr =
+          heap()->new_space()->FromSpaceLow() +
+          heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
+      new_addr = Memory::Address_at(forwarding_pointer_addr);
+
+#ifdef DEBUG
+      ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
+             heap()->old_data_space()->Contains(new_addr) ||
+             heap()->new_space()->FromSpaceContains(new_addr) ||
+             heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
+
+      if (heap()->new_space()->FromSpaceContains(new_addr)) {
+        ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+               heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
+      }
+#endif
+
+    } else if (heap()->lo_space()->Contains(obj)) {
+      // Don't move objects in the large object space.
+      return;
+
+    } else {
+#ifdef DEBUG
+      PagedSpaces spaces;
+      PagedSpace* original_space = spaces.next();
+      while (original_space != NULL) {
+        if (original_space->Contains(obj)) break;
+        original_space = spaces.next();
+      }
+      ASSERT(original_space != NULL);
+#endif
+      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
+      ASSERT(original_space->Contains(new_addr));
+      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
+             original_space->MCSpaceOffsetForAddress(old_addr));
+    }
+
+    *p = HeapObject::FromAddress(new_addr);
+
+#ifdef DEBUG
+    if (FLAG_gc_verbose) {
+      PrintF("update %p : %p -> %p\n",
+             reinterpret_cast<Address>(p), old_addr, new_addr);
+    }
+#endif
+  }
+
+  Heap* heap_;
+};
+
+
+void MarkCompactCollector::UpdatePointers() {
+#ifdef DEBUG
+  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
+  state_ = UPDATE_POINTERS;
+#endif
+  UpdatingVisitor updating_visitor(heap());
+  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+      &updating_visitor);
+  heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
+  heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
+
+  // Update the pointer to the head of the weak list of global contexts.
+  updating_visitor.VisitPointer(&heap()->global_contexts_list_);
+
+  LiveObjectList::IterateElements(&updating_visitor);
+
+  int live_maps_size = IterateLiveObjects(
+      heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+  int live_pointer_olds_size = IterateLiveObjects(
+      heap()->old_pointer_space(),
+      &MarkCompactCollector::UpdatePointersInOldObject);
+  int live_data_olds_size = IterateLiveObjects(
+      heap()->old_data_space(),
+      &MarkCompactCollector::UpdatePointersInOldObject);
+  int live_codes_size = IterateLiveObjects(
+      heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+  int live_cells_size = IterateLiveObjects(
+      heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
+  int live_news_size = IterateLiveObjects(
+      heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
+
+  // Large objects do not move, the map word can be updated directly.
+  LargeObjectIterator it(heap()->lo_space());
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+    UpdatePointersInNewObject(obj);
+  }
+
+  USE(live_maps_size);
+  USE(live_pointer_olds_size);
+  USE(live_data_olds_size);
+  USE(live_codes_size);
+  USE(live_cells_size);
+  USE(live_news_size);
+  ASSERT(live_maps_size == live_map_objects_size_);
+  ASSERT(live_data_olds_size == live_old_data_objects_size_);
+  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+  ASSERT(live_codes_size == live_code_objects_size_);
+  ASSERT(live_cells_size == live_cell_objects_size_);
+  ASSERT(live_news_size == live_young_objects_size_);
+}
+
+
+int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
+  // Keep old map pointers
+  Map* old_map = obj->map();
+  ASSERT(old_map->IsHeapObject());
+
+  Address forwarded = GetForwardingAddressInOldSpace(old_map);
+
+  ASSERT(heap()->map_space()->Contains(old_map));
+  ASSERT(heap()->map_space()->Contains(forwarded));
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
+           forwarded);
+  }
+#endif
+  // Update the map pointer.
+  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
+
+  // We have to compute the object size relying on the old map because
+  // map objects are not relocated yet.
+  int obj_size = obj->SizeFromMap(old_map);
+
+  // Update pointers in the object body.
+  UpdatingVisitor updating_visitor(heap());
+  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
+  return obj_size;
+}
+
+
+int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
+  // Decode the map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+  // At this point, the first word of map_addr is also encoded, cannot
+  // cast it to Map* using Map::cast.
+  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
+  int obj_size = obj->SizeFromMap(map);
+  InstanceType type = map->instance_type();
+
+  // Update map pointer.
+  Address new_map_addr = GetForwardingAddressInOldSpace(map);
+  int offset = encoding.DecodeOffset();
+  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("update %p : %p -> %p\n", obj->address(),
+           map_addr, new_map_addr);
+  }
+#endif
+
+  // Update pointers in the object body.
+  UpdatingVisitor updating_visitor(heap());
+  obj->IterateBody(type, obj_size, &updating_visitor);
+  return obj_size;
+}
+
+
+Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
+  // Object should either in old or map space.
+  MapWord encoding = obj->map_word();
+
+  // Offset to the first live object's forwarding address.
+  int offset = encoding.DecodeOffset();
+  Address obj_addr = obj->address();
+
+  // Find the first live object's forwarding address.
+  Page* p = Page::FromAddress(obj_addr);
+  Address first_forwarded = p->mc_first_forwarded;
+
+  // Page start address of forwarded address.
+  Page* forwarded_page = Page::FromAddress(first_forwarded);
+  int forwarded_offset = forwarded_page->Offset(first_forwarded);
+
+  // Find end of allocation in the page of first_forwarded.
+  int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
+
+  // Check if current object's forward pointer is in the same page
+  // as the first live object's forwarding pointer
+  if (forwarded_offset + offset < mc_top_offset) {
+    // In the same page.
+    return first_forwarded + offset;
+  }
+
+  // Must be in the next page, NOTE: this may cross chunks.
+  Page* next_page = forwarded_page->next_page();
+  ASSERT(next_page->is_valid());
+
+  offset -= (mc_top_offset - forwarded_offset);
+  offset += Page::kObjectStartOffset;
+
+  ASSERT_PAGE_OFFSET(offset);
+  ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
+
+  return next_page->OffsetToAddress(offset);
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 4: Relocate objects
+
+void MarkCompactCollector::RelocateObjects() {
+#ifdef DEBUG
+  ASSERT(state_ == UPDATE_POINTERS);
+  state_ = RELOCATE_OBJECTS;
+#endif
+  // Relocates objects, always relocate map objects first. Relocating
+  // objects in other space relies on map objects to get object size.
+  int live_maps_size = IterateLiveObjects(
+      heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
+  int live_pointer_olds_size = IterateLiveObjects(
+      heap()->old_pointer_space(),
+      &MarkCompactCollector::RelocateOldPointerObject);
+  int live_data_olds_size = IterateLiveObjects(
+      heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
+  int live_codes_size = IterateLiveObjects(
+      heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
+  int live_cells_size = IterateLiveObjects(
+      heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
+  int live_news_size = IterateLiveObjects(
+      heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
+
+  USE(live_maps_size);
+  USE(live_pointer_olds_size);
+  USE(live_data_olds_size);
+  USE(live_codes_size);
+  USE(live_cells_size);
+  USE(live_news_size);
+  ASSERT(live_maps_size == live_map_objects_size_);
+  ASSERT(live_data_olds_size == live_old_data_objects_size_);
+  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
+  ASSERT(live_codes_size == live_code_objects_size_);
+  ASSERT(live_cells_size == live_cell_objects_size_);
+  ASSERT(live_news_size == live_young_objects_size_);
+
+  // Flip from and to spaces
+  heap()->new_space()->Flip();
+
+  heap()->new_space()->MCCommitRelocationInfo();
+
+  // Set age_mark to bottom in to space
+  Address mark = heap()->new_space()->bottom();
+  heap()->new_space()->set_age_mark(mark);
+
+  PagedSpaces spaces;
+  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+    space->MCCommitRelocationInfo();
+
+  heap()->CheckNewSpaceExpansionCriteria();
+  heap()->IncrementYoungSurvivorsCounter(live_news_size);
+}
+
+
+int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
+  // Recover map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+  // Get forwarding address before resetting map pointer
+  Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+  // Reset map pointer.  The meta map object may not be copied yet so
+  // Map::cast does not yet work.
+  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
+
+  Address old_addr = obj->address();
+
+  if (new_addr != old_addr) {
+    // Move contents.
+    heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+                                                   old_addr,
+                                                   Map::kSize);
+  }
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("relocate %p -> %p\n", old_addr, new_addr);
+  }
+#endif
+
+  return Map::kSize;
+}
+
+
+static inline int RestoreMap(HeapObject* obj,
+                             PagedSpace* space,
+                             Address new_addr,
+                             Address map_addr) {
+  // This must be a non-map object, and the function relies on the
+  // assumption that the Map space is compacted before the other paged
+  // spaces (see RelocateObjects).
+
+  // Reset map pointer.
+  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
+
+  int obj_size = obj->Size();
+  ASSERT_OBJECT_SIZE(obj_size);
+
+  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
+         space->MCSpaceOffsetForAddress(obj->address()));
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
+  }
+#endif
+
+  return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
+                                                   PagedSpace* space) {
+  // Recover map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(map_addr));
+
+  // Get forwarding address before resetting map pointer.
+  Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+  // Reset the map pointer.
+  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
+
+  Address old_addr = obj->address();
+
+  if (new_addr != old_addr) {
+    // Move contents.
+    if (space == heap()->old_data_space()) {
+      heap()->MoveBlock(new_addr, old_addr, obj_size);
+    } else {
+      heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+                                                     old_addr,
+                                                     obj_size);
+    }
+  }
+
+  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
+
+  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+  if (copied_to->IsSharedFunctionInfo()) {
+    PROFILE(heap()->isolate(),
+            SharedFunctionInfoMoveEvent(old_addr, new_addr));
+  }
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
+
+  return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
+  return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
+}
+
+
+int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
+  return RelocateOldNonCodeObject(obj, heap()->old_data_space());
+}
+
+
+int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
+  return RelocateOldNonCodeObject(obj, heap()->cell_space());
+}
+
+
+int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
+  // Recover map pointer.
+  MapWord encoding = obj->map_word();
+  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
+  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
+
+  // Get forwarding address before resetting map pointer
+  Address new_addr = GetForwardingAddressInOldSpace(obj);
+
+  // Reset the map pointer.
+  int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
+
+  Address old_addr = obj->address();
+
+  if (new_addr != old_addr) {
+    // Move contents.
+    heap()->MoveBlock(new_addr, old_addr, obj_size);
+  }
+
+  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+  if (copied_to->IsCode()) {
+    // May also update inline cache target.
+    Code::cast(copied_to)->Relocate(new_addr - old_addr);
+    // Notify the logger that compiled code has moved.
+    PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
+  }
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
+
+  return obj_size;
+}
+
+
+int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
+  int obj_size = obj->Size();
+
+  // Get forwarding address
+  Address old_addr = obj->address();
+  int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
+
+  Address new_addr =
+    Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
+
+#ifdef DEBUG
+  if (heap()->new_space()->FromSpaceContains(new_addr)) {
+    ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
+           heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
+  } else {
+    ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
+           heap()->TargetSpace(obj) == heap()->old_data_space());
+  }
+#endif
+
+  // New and old addresses cannot overlap.
+  if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
+    heap()->CopyBlock(new_addr, old_addr, obj_size);
+  } else {
+    heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+                                                   old_addr,
+                                                   obj_size);
+  }
+
+#ifdef DEBUG
+  if (FLAG_gc_verbose) {
+    PrintF("relocate %p -> %p\n", old_addr, new_addr);
+  }
+#endif
+
+  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+  if (copied_to->IsSharedFunctionInfo()) {
+    PROFILE(heap()->isolate(),
+            SharedFunctionInfoMoveEvent(old_addr, new_addr));
+  }
+  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
+
+  return obj_size;
 }
 
 
@@ -3976,9 +3359,6 @@
 }
 
 
-// TODO(1466) ReportDeleteIfNeeded is not called currently.
-// Our profiling tools do not expect intersections between
-// code objects. We should either reenable it or change our tools.
 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
                                                 Isolate* isolate) {
 #ifdef ENABLE_GDB_JIT_INTERFACE
@@ -3992,149 +3372,17 @@
 }
 
 
+int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
+  MapWord map_word = obj->map_word();
+  map_word.ClearMark();
+  return obj->SizeFromMap(map_word.ToMap());
+}
+
+
 void MarkCompactCollector::Initialize() {
+  StaticPointersToNewGenUpdatingVisitor::Initialize();
   StaticMarkingVisitor::Initialize();
 }
 
 
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
-  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
-                        SlotsBuffer** buffer_address,
-                        SlotType type,
-                        Address addr,
-                        AdditionMode mode) {
-  SlotsBuffer* buffer = *buffer_address;
-  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
-    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-      allocator->DeallocateChain(buffer_address);
-      return false;
-    }
-    buffer = allocator->AllocateBuffer(buffer);
-    *buffer_address = buffer;
-  }
-  ASSERT(buffer->HasSpaceForTypedSlot());
-  buffer->Add(reinterpret_cast<ObjectSlot>(type));
-  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
-  return true;
-}
-
-
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
-  if (RelocInfo::IsCodeTarget(rmode)) {
-    return SlotsBuffer::CODE_TARGET_SLOT;
-  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
-    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
-  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
-    return SlotsBuffer::DEBUG_TARGET_SLOT;
-  } else if (RelocInfo::IsJSReturn(rmode)) {
-    return SlotsBuffer::JS_RETURN_SLOT;
-  }
-  UNREACHABLE();
-  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
-  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
-  if (target_page->IsEvacuationCandidate() &&
-      (rinfo->host() == NULL ||
-       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            target_page->slots_buffer_address(),
-                            SlotTypeForRMode(rinfo->rmode()),
-                            rinfo->pc(),
-                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
-      EvictEvacuationCandidate(target_page);
-    }
-  }
-}
-
-
-void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
-  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
-  if (target_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            target_page->slots_buffer_address(),
-                            SlotsBuffer::CODE_ENTRY_SLOT,
-                            slot,
-                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
-      EvictEvacuationCandidate(target_page);
-    }
-  }
-}
-
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
-    SlotsBuffer::ObjectSlot slot) {
-  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void SlotsBuffer::UpdateSlots(Heap* heap) {
-  PointersUpdatingVisitor v(heap);
-
-  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
-    ObjectSlot slot = slots_[slot_idx];
-    if (!IsTypedSlot(slot)) {
-      PointersUpdatingVisitor::UpdateSlot(heap, slot);
-    } else {
-      ++slot_idx;
-      ASSERT(slot_idx < idx_);
-      UpdateSlot(&v,
-                 DecodeSlotType(slot),
-                 reinterpret_cast<Address>(slots_[slot_idx]));
-    }
-  }
-}
-
-
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
-  PointersUpdatingVisitor v(heap);
-
-  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
-    ObjectSlot slot = slots_[slot_idx];
-    if (!IsTypedSlot(slot)) {
-      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
-        PointersUpdatingVisitor::UpdateSlot(heap, slot);
-      }
-    } else {
-      ++slot_idx;
-      ASSERT(slot_idx < idx_);
-      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
-      if (!IsOnInvalidatedCodeObject(pc)) {
-        UpdateSlot(&v,
-                   DecodeSlotType(slot),
-                   reinterpret_cast<Address>(slots_[slot_idx]));
-      }
-    }
-  }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
-  return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
-  delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
-  SlotsBuffer* buffer = *buffer_address;
-  while (buffer != NULL) {
-    SlotsBuffer* next_buffer = buffer->next();
-    DeallocateBuffer(buffer);
-    buffer = next_buffer;
-  }
-  *buffer_address = NULL;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 66ffd19..f72c813 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,6 @@
 #ifndef V8_MARK_COMPACT_H_
 #define V8_MARK_COMPACT_H_
 
-#include "compiler-intrinsics.h"
 #include "spaces.h"
 
 namespace v8 {
@@ -46,340 +45,54 @@
 class RootMarkingVisitor;
 
 
-class Marking {
- public:
-  explicit Marking(Heap* heap)
-      : heap_(heap) {
-  }
-
-  static inline MarkBit MarkBitFrom(Address addr);
-
-  static inline MarkBit MarkBitFrom(HeapObject* obj) {
-    return MarkBitFrom(reinterpret_cast<Address>(obj));
-  }
-
-  // Impossible markbits: 01
-  static const char* kImpossibleBitPattern;
-  static inline bool IsImpossible(MarkBit mark_bit) {
-    return !mark_bit.Get() && mark_bit.Next().Get();
-  }
-
-  // Black markbits: 10 - this is required by the sweeper.
-  static const char* kBlackBitPattern;
-  static inline bool IsBlack(MarkBit mark_bit) {
-    return mark_bit.Get() && !mark_bit.Next().Get();
-  }
-
-  // White markbits: 00 - this is required by the mark bit clearer.
-  static const char* kWhiteBitPattern;
-  static inline bool IsWhite(MarkBit mark_bit) {
-    return !mark_bit.Get();
-  }
-
-  // Grey markbits: 11
-  static const char* kGreyBitPattern;
-  static inline bool IsGrey(MarkBit mark_bit) {
-    return mark_bit.Get() && mark_bit.Next().Get();
-  }
-
-  static inline void MarkBlack(MarkBit mark_bit) {
-    mark_bit.Set();
-    mark_bit.Next().Clear();
-  }
-
-  static inline void BlackToGrey(MarkBit markbit) {
-    markbit.Next().Set();
-  }
-
-  static inline void WhiteToGrey(MarkBit markbit) {
-    markbit.Set();
-    markbit.Next().Set();
-  }
-
-  static inline void GreyToBlack(MarkBit markbit) {
-    markbit.Next().Clear();
-  }
-
-  static inline void BlackToGrey(HeapObject* obj) {
-    BlackToGrey(MarkBitFrom(obj));
-  }
-
-  static inline void AnyToGrey(MarkBit markbit) {
-    markbit.Set();
-    markbit.Next().Set();
-  }
-
-  // Returns true if the the object whose mark is transferred is marked black.
-  bool TransferMark(Address old_start, Address new_start);
-
-#ifdef DEBUG
-  enum ObjectColor {
-    BLACK_OBJECT,
-    WHITE_OBJECT,
-    GREY_OBJECT,
-    IMPOSSIBLE_COLOR
-  };
-
-  static const char* ColorName(ObjectColor color) {
-    switch (color) {
-      case BLACK_OBJECT: return "black";
-      case WHITE_OBJECT: return "white";
-      case GREY_OBJECT: return "grey";
-      case IMPOSSIBLE_COLOR: return "impossible";
-    }
-    return "error";
-  }
-
-  static ObjectColor Color(HeapObject* obj) {
-    return Color(Marking::MarkBitFrom(obj));
-  }
-
-  static ObjectColor Color(MarkBit mark_bit) {
-    if (IsBlack(mark_bit)) return BLACK_OBJECT;
-    if (IsWhite(mark_bit)) return WHITE_OBJECT;
-    if (IsGrey(mark_bit)) return GREY_OBJECT;
-    UNREACHABLE();
-    return IMPOSSIBLE_COLOR;
-  }
-#endif
-
-  // Returns true if the transferred color is black.
-  INLINE(static bool TransferColor(HeapObject* from,
-                                   HeapObject* to)) {
-    MarkBit from_mark_bit = MarkBitFrom(from);
-    MarkBit to_mark_bit = MarkBitFrom(to);
-    bool is_black = false;
-    if (from_mark_bit.Get()) {
-      to_mark_bit.Set();
-      is_black = true;  // Looks black so far.
-    }
-    if (from_mark_bit.Next().Get()) {
-      to_mark_bit.Next().Set();
-      is_black = false;  // Was actually gray.
-    }
-    return is_black;
-  }
-
- private:
-  Heap* heap_;
-};
-
 // ----------------------------------------------------------------------------
-// Marking deque for tracing live objects.
+// Marking stack for tracing live objects.
 
-class MarkingDeque {
+class MarkingStack {
  public:
-  MarkingDeque()
-      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
+  MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
 
   void Initialize(Address low, Address high) {
-    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
-    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
-    array_ = obj_low;
-    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
-    top_ = bottom_ = 0;
+    top_ = low_ = reinterpret_cast<HeapObject**>(low);
+    high_ = reinterpret_cast<HeapObject**>(high);
     overflowed_ = false;
   }
 
-  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
+  bool is_full() const { return top_ >= high_; }
 
-  inline bool IsEmpty() { return top_ == bottom_; }
+  bool is_empty() const { return top_ <= low_; }
 
   bool overflowed() const { return overflowed_; }
 
-  void ClearOverflowed() { overflowed_ = false; }
-
-  void SetOverflowed() { overflowed_ = true; }
+  void clear_overflowed() { overflowed_ = false; }
 
   // Push the (marked) object on the marking stack if there is room,
   // otherwise mark the object as overflowed and wait for a rescan of the
   // heap.
-  inline void PushBlack(HeapObject* object) {
-    ASSERT(object->IsHeapObject());
-    if (IsFull()) {
-      Marking::BlackToGrey(object);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
-      SetOverflowed();
+  void Push(HeapObject* object) {
+    CHECK(object->IsHeapObject());
+    if (is_full()) {
+      object->SetOverflow();
+      overflowed_ = true;
     } else {
-      array_[top_] = object;
-      top_ = ((top_ + 1) & mask_);
+      *(top_++) = object;
     }
   }
 
-  inline void PushGrey(HeapObject* object) {
-    ASSERT(object->IsHeapObject());
-    if (IsFull()) {
-      SetOverflowed();
-    } else {
-      array_[top_] = object;
-      top_ = ((top_ + 1) & mask_);
-    }
-  }
-
-  inline HeapObject* Pop() {
-    ASSERT(!IsEmpty());
-    top_ = ((top_ - 1) & mask_);
-    HeapObject* object = array_[top_];
-    ASSERT(object->IsHeapObject());
+  HeapObject* Pop() {
+    ASSERT(!is_empty());
+    HeapObject* object = *(--top_);
+    CHECK(object->IsHeapObject());
     return object;
   }
 
-  inline void UnshiftGrey(HeapObject* object) {
-    ASSERT(object->IsHeapObject());
-    if (IsFull()) {
-      SetOverflowed();
-    } else {
-      bottom_ = ((bottom_ - 1) & mask_);
-      array_[bottom_] = object;
-    }
-  }
-
-  HeapObject** array() { return array_; }
-  int bottom() { return bottom_; }
-  int top() { return top_; }
-  int mask() { return mask_; }
-  void set_top(int top) { top_ = top; }
-
  private:
-  HeapObject** array_;
-  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
-  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
-  // (mod mask + 1).
-  int top_;
-  int bottom_;
-  int mask_;
+  HeapObject** low_;
+  HeapObject** top_;
+  HeapObject** high_;
   bool overflowed_;
 
-  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
-};
-
-
-class SlotsBufferAllocator {
- public:
-  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
-  void DeallocateBuffer(SlotsBuffer* buffer);
-
-  void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-//    - Untyped slots are expected to contain a tagged object pointer.
-//      They are recorded by an address.
-//    - Typed slots are expected to contain an encoded pointer to a heap
-//      object where the way of encoding depends on the type of the slot.
-//      They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
-  typedef Object** ObjectSlot;
-
-  explicit SlotsBuffer(SlotsBuffer* next_buffer)
-      : idx_(0), chain_length_(1), next_(next_buffer) {
-    if (next_ != NULL) {
-      chain_length_ = next_->chain_length_ + 1;
-    }
-  }
-
-  ~SlotsBuffer() {
-  }
-
-  void Add(ObjectSlot slot) {
-    ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
-    slots_[idx_++] = slot;
-  }
-
-  enum SlotType {
-    EMBEDDED_OBJECT_SLOT,
-    RELOCATED_CODE_OBJECT,
-    CODE_TARGET_SLOT,
-    CODE_ENTRY_SLOT,
-    DEBUG_TARGET_SLOT,
-    JS_RETURN_SLOT,
-    NUMBER_OF_SLOT_TYPES
-  };
-
-  void UpdateSlots(Heap* heap);
-
-  void UpdateSlotsWithFilter(Heap* heap);
-
-  SlotsBuffer* next() { return next_; }
-
-  static int SizeOfChain(SlotsBuffer* buffer) {
-    if (buffer == NULL) return 0;
-    return static_cast<int>(buffer->idx_ +
-                            (buffer->chain_length_ - 1) * kNumberOfElements);
-  }
-
-  inline bool IsFull() {
-    return idx_ == kNumberOfElements;
-  }
-
-  inline bool HasSpaceForTypedSlot() {
-    return idx_ < kNumberOfElements - 1;
-  }
-
-  static void UpdateSlotsRecordedIn(Heap* heap,
-                                    SlotsBuffer* buffer,
-                                    bool code_slots_filtering_required) {
-    while (buffer != NULL) {
-      if (code_slots_filtering_required) {
-        buffer->UpdateSlotsWithFilter(heap);
-      } else {
-        buffer->UpdateSlots(heap);
-      }
-      buffer = buffer->next();
-    }
-  }
-
-  enum AdditionMode {
-    FAIL_ON_OVERFLOW,
-    IGNORE_OVERFLOW
-  };
-
-  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
-    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
-  }
-
-  static bool AddTo(SlotsBufferAllocator* allocator,
-                    SlotsBuffer** buffer_address,
-                    ObjectSlot slot,
-                    AdditionMode mode) {
-    SlotsBuffer* buffer = *buffer_address;
-    if (buffer == NULL || buffer->IsFull()) {
-      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-        allocator->DeallocateChain(buffer_address);
-        return false;
-      }
-      buffer = allocator->AllocateBuffer(buffer);
-      *buffer_address = buffer;
-    }
-    buffer->Add(slot);
-    return true;
-  }
-
-  static bool IsTypedSlot(ObjectSlot slot);
-
-  static bool AddTo(SlotsBufferAllocator* allocator,
-                    SlotsBuffer** buffer_address,
-                    SlotType type,
-                    Address addr,
-                    AdditionMode mode);
-
-  static const int kNumberOfElements = 1021;
-
- private:
-  static const int kChainLengthThreshold = 15;
-
-  intptr_t idx_;
-  intptr_t chain_length_;
-  SlotsBuffer* next_;
-  ObjectSlot slots_[kNumberOfElements];
+  DISALLOW_COPY_AND_ASSIGN(MarkingStack);
 };
 
 
@@ -389,6 +102,9 @@
 
 // -------------------------------------------------------------------------
 // Mark-Compact collector
+
+class OverflowedObjectsScanner;
+
 class MarkCompactCollector {
  public:
   // Type of functions to compute forwarding addresses of objects in
@@ -407,7 +123,7 @@
   // object from the forwarding address of the previous live object in the
   // page as input, and is updated to contain the offset to be used for the
   // next live object in the same page.  For spaces using a different
-  // encoding (i.e., contiguous spaces), the offset parameter is ignored.
+  // encoding (ie, contiguous spaces), the offset parameter is ignored.
   typedef void (*EncodingFunction)(Heap* heap,
                                    HeapObject* old_object,
                                    int object_size,
@@ -420,15 +136,15 @@
   // Pointer to member function, used in IterateLiveObjects.
   typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
 
-  // Set the global flags, it must be called before Prepare to take effect.
-  inline void SetFlags(int flags);
+  // Set the global force_compaction flag, it must be called before Prepare
+  // to take effect.
+  void SetForceCompaction(bool value) {
+    force_compaction_ = value;
+  }
+
 
   static void Initialize();
 
-  void CollectEvacuationCandidates(PagedSpace* space);
-
-  void AddEvacuationCandidate(Page* p);
-
   // Prepares for GC by resetting relocation info in old and map spaces and
   // choosing spaces to compact.
   void Prepare(GCTracer* tracer);
@@ -436,14 +152,23 @@
   // Performs a global garbage collection.
   void CollectGarbage();
 
-  enum CompactionMode {
-    INCREMENTAL_COMPACTION,
-    NON_INCREMENTAL_COMPACTION
-  };
+  // True if the last full GC performed heap compaction.
+  bool HasCompacted() { return compacting_collection_; }
 
-  bool StartCompaction(CompactionMode mode);
+  // True after the Prepare phase if the compaction is taking place.
+  bool IsCompacting() {
+#ifdef DEBUG
+    // For the purposes of asserts we don't want this to keep returning true
+    // after the collection is completed.
+    return state_ != IDLE && compacting_collection_;
+#else
+    return compacting_collection_;
+#endif
+  }
 
-  void AbortCompaction();
+  // The count of the number of objects left marked at the end of the last
+  // completed full GC (expected to be zero).
+  int previous_marked_count() { return previous_marked_count_; }
 
   // During a full GC, there is a stack-allocated GCTracer that is used for
   // bookkeeping information.  Return a pointer to that tracer.
@@ -458,101 +183,29 @@
   // Determine type of object and emit deletion log event.
   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
 
+  // Returns size of a possibly marked object.
+  static int SizeOfMarkedObject(HeapObject* obj);
+
   // Distinguishable invalid map encodings (for single word and multiple words)
   // that indicate free regions.
   static const uint32_t kSingleFreeEncoding = 0;
   static const uint32_t kMultiFreeEncoding = 1;
 
-  static inline bool IsMarked(Object* obj);
-
   inline Heap* heap() const { return heap_; }
 
   CodeFlusher* code_flusher() { return code_flusher_; }
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
   void EnableCodeFlushing(bool enable);
 
-  enum SweeperType {
-    CONSERVATIVE,
-    LAZY_CONSERVATIVE,
-    PRECISE
-  };
-
-#ifdef DEBUG
-  void VerifyMarkbitsAreClean();
-  static void VerifyMarkbitsAreClean(PagedSpace* space);
-  static void VerifyMarkbitsAreClean(NewSpace* space);
-#endif
-
-  // Sweep a single page from the given space conservatively.
-  // Return a number of reclaimed bytes.
-  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
-
-  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
-    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
-        ShouldSkipEvacuationSlotRecording();
-  }
-
-  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
-    return Page::FromAddress(reinterpret_cast<Address>(host))->
-        ShouldSkipEvacuationSlotRecording();
-  }
-
-  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
-    return Page::FromAddress(reinterpret_cast<Address>(obj))->
-        IsEvacuationCandidate();
-  }
-
-  void EvictEvacuationCandidate(Page* page) {
-    if (FLAG_trace_fragmentation) {
-      PrintF("Page %p is too popular. Disabling evacuation.\n",
-             reinterpret_cast<void*>(page));
-    }
-
-    // TODO(gc) If all evacuation candidates are too popular we
-    // should stop slots recording entirely.
-    page->ClearEvacuationCandidate();
-
-    // We were not collecting slots on this page that point
-    // to other evacuation candidates thus we have to
-    // rescan the page after evacuation to discover and update all
-    // pointers to evacuated objects.
-    if (page->owner()->identity() == OLD_DATA_SPACE) {
-      evacuation_candidates_.RemoveElement(page);
-    } else {
-      page->SetFlag(Page::RESCAN_ON_EVACUATION);
-    }
-  }
-
-  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
-  void RecordCodeEntrySlot(Address slot, Code* target);
-
-  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
-
-  void MigrateObject(Address dst,
-                     Address src,
-                     int size,
-                     AllocationSpace to_old_space);
-
-  bool TryPromoteObject(HeapObject* object, int object_size);
-
   inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
   inline void set_encountered_weak_maps(Object* weak_map) {
     encountered_weak_maps_ = weak_map;
   }
 
-  void InvalidateCode(Code* code);
-
-  void ClearMarkbits();
-
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
 
-  bool MarkInvalidatedCode();
-  void RemoveDeadInvalidatedCode();
-  void ProcessInvalidatedCode(ObjectVisitor* visitor);
-
-
 #ifdef DEBUG
   enum CollectorState {
     IDLE,
@@ -568,32 +221,23 @@
   CollectorState state_;
 #endif
 
-  // Global flag that forces sweeping to be precise, so we can traverse the
-  // heap.
-  bool sweep_precisely_;
+  // Global flag that forces a compaction.
+  bool force_compaction_;
 
-  bool reduce_memory_footprint_;
+  // Global flag indicating whether spaces were compacted on the last GC.
+  bool compacting_collection_;
 
-  bool abort_incremental_marking_;
+  // Global flag indicating whether spaces will be compacted on the next GC.
+  bool compact_on_next_gc_;
 
-  // True if we are collecting slots to perform evacuation from evacuation
-  // candidates.
-  bool compacting_;
-
-  bool was_marked_incrementally_;
-
-  bool collect_maps_;
-
-  bool flush_monomorphic_ics_;
+  // The number of objects left marked at the end of the last completed full
+  // GC (expected to be zero).
+  int previous_marked_count_;
 
   // A pointer to the current stack-allocated GC tracer object during a full
   // collection (NULL before and after).
   GCTracer* tracer_;
 
-  SlotsBufferAllocator slots_buffer_allocator_;
-
-  SlotsBuffer* migration_slots_buffer_;
-
   // Finishes GC, performs heap verification if enabled.
   void Finish();
 
@@ -626,19 +270,13 @@
   // Marking operations for objects reachable from roots.
   void MarkLiveObjects();
 
-  void AfterMarking();
+  void MarkUnmarkedObject(HeapObject* obj);
 
-  // Marks the object black and pushes it on the marking stack.
-  // This is for non-incremental marking.
-  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
+  inline void MarkObject(HeapObject* obj) {
+    if (!obj->IsMarked()) MarkUnmarkedObject(obj);
+  }
 
-  INLINE(bool MarkObjectWithoutPush(HeapObject* object));
-  INLINE(void MarkObjectAndPush(HeapObject* value));
-
-  // Marks the object black.  This is for non-incremental marking.
-  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
-
-  void ProcessNewlyMarkedObject(HeapObject* obj);
+  inline void SetMark(HeapObject* obj);
 
   // Creates back pointers for all map transitions, stores them in
   // the prototype field.  The original prototype pointers are restored
@@ -649,7 +287,6 @@
 
   // Mark a Map and its DescriptorArray together, skipping transitions.
   void MarkMapContents(Map* map);
-  void MarkAccessorPairSlot(HeapObject* accessors, int offset);
   void MarkDescriptorArray(DescriptorArray* descriptors);
 
   // Mark the heap roots and all objects reachable from them.
@@ -673,18 +310,18 @@
 
   // Mark objects reachable (transitively) from objects in the marking stack
   // or overflowed in the heap.
-  void ProcessMarkingDeque();
+  void ProcessMarkingStack();
 
   // Mark objects reachable (transitively) from objects in the marking
   // stack.  This function empties the marking stack, but may leave
   // overflowed objects in the heap, in which case the marking stack's
   // overflow flag will be set.
-  void EmptyMarkingDeque();
+  void EmptyMarkingStack();
 
   // Refill the marking stack with overflowed objects from the heap.  This
   // function either leaves the marking stack full or clears the overflow
   // flag on the marking stack.
-  void RefillMarkingDeque();
+  void RefillMarkingStack();
 
   // After reachable maps have been marked process per context object
   // literal map caches removing unmarked entries.
@@ -694,17 +331,20 @@
   // heap object.
   static bool IsUnmarkedHeapObject(Object** p);
 
+#ifdef DEBUG
+  void UpdateLiveObjectCount(HeapObject* obj);
+#endif
+
+  // We sweep the large object space in the same way whether we are
+  // compacting or not, because the large object space is never compacted.
+  void SweepLargeObjectSpace();
+
+  // Test whether a (possibly marked) object is a Map.
+  static inline bool SafeIsMap(HeapObject* object);
+
   // Map transitions from a live map to a dead map must be killed.
   // We replace them with a null descriptor, with the same key.
   void ClearNonLiveTransitions();
-  void ClearNonLivePrototypeTransitions(Map* map);
-  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
-
-  // Marking detaches initial maps from SharedFunctionInfo objects
-  // to make this reference weak. We need to reattach initial maps
-  // back after collection. This is either done during
-  // ClearNonLiveTransitions pass or by calling this function.
-  void ReattachInitialMaps();
 
   // Mark all values associated with reachable keys in weak maps encountered
   // so far.  This might push new object or even new weak maps onto the
@@ -718,31 +358,164 @@
 
   // -----------------------------------------------------------------------
   // Phase 2: Sweeping to clear mark bits and free non-live objects for
-  // a non-compacting collection.
+  // a non-compacting collection, or else computing and encoding
+  // forwarding addresses for a compacting collection.
   //
   //  Before: Live objects are marked and non-live objects are unmarked.
   //
-  //   After: Live objects are unmarked, non-live regions have been added to
-  //          their space's free list. Active eden semispace is compacted by
-  //          evacuation.
+  //   After: (Non-compacting collection.)  Live objects are unmarked,
+  //          non-live regions have been added to their space's free
+  //          list.
   //
+  //   After: (Compacting collection.)  The forwarding address of live
+  //          objects in the paged spaces is encoded in their map word
+  //          along with their (non-forwarded) map pointer.
+  //
+  //          The forwarding address of live objects in the new space is
+  //          written to their map word's offset in the inactive
+  //          semispace.
+  //
+  //          Bookkeeping data is written to the page header of
+  //          eached paged-space page that contains live objects after
+  //          compaction:
+  //
+  //          The allocation watermark field is used to track the
+  //          relocation top address, the address of the first word
+  //          after the end of the last live object in the page after
+  //          compaction.
+  //
+  //          The Page::mc_page_index field contains the zero-based index of the
+  //          page in its space.  This word is only used for map space pages, in
+  //          order to encode the map addresses in 21 bits to free 11
+  //          bits per map word for the forwarding address.
+  //
+  //          The Page::mc_first_forwarded field contains the (nonencoded)
+  //          forwarding address of the first live object in the page.
+  //
+  //          In both the new space and the paged spaces, a linked list
+  //          of live regions is constructructed (linked through
+  //          pointers in the non-live region immediately following each
+  //          live region) to speed further passes of the collector.
+
+  // Encodes forwarding addresses of objects in compactable parts of the
+  // heap.
+  void EncodeForwardingAddresses();
+
+  // Encodes the forwarding addresses of objects in new space.
+  void EncodeForwardingAddressesInNewSpace();
+
+  // Function template to encode the forwarding addresses of objects in
+  // paged spaces, parameterized by allocation and non-live processing
+  // functions.
+  template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
+  void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
+
+  // Iterates live objects in a space, passes live objects
+  // to a callback function which returns the heap size of the object.
+  // Returns the number of live objects iterated.
+  int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
+  int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
+
+  // Iterates the live objects between a range of addresses, returning the
+  // number of live objects.
+  int IterateLiveObjectsInRange(Address start, Address end,
+                                LiveObjectCallback size_func);
 
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
   // regions to each space's free list.
   void SweepSpaces();
 
-  void EvacuateNewSpace();
+  // -----------------------------------------------------------------------
+  // Phase 3: Updating pointers in live objects.
+  //
+  //  Before: Same as after phase 2 (compacting collection).
+  //
+  //   After: All pointers in live objects, including encoded map
+  //          pointers, are updated to point to their target's new
+  //          location.
 
-  void EvacuateLiveObjectsFromPage(Page* p);
+  friend class UpdatingVisitor;  // helper for updating visited objects
 
-  void EvacuatePages();
+  // Updates pointers in all spaces.
+  void UpdatePointers();
 
-  void EvacuateNewSpaceAndCandidates();
+  // Updates pointers in an object in new space.
+  // Returns the heap size of the object.
+  int UpdatePointersInNewObject(HeapObject* obj);
 
-  void SweepSpace(PagedSpace* space, SweeperType sweeper);
+  // Updates pointers in an object in old spaces.
+  // Returns the heap size of the object.
+  int UpdatePointersInOldObject(HeapObject* obj);
+
+  // Calculates the forwarding address of an object in an old space.
+  static Address GetForwardingAddressInOldSpace(HeapObject* obj);
+
+  // -----------------------------------------------------------------------
+  // Phase 4: Relocating objects.
+  //
+  //  Before: Pointers to live objects are updated to point to their
+  //          target's new location.
+  //
+  //   After: Objects have been moved to their new addresses.
+
+  // Relocates objects in all spaces.
+  void RelocateObjects();
+
+  // Converts a code object's inline target to addresses, convention from
+  // address to target happens in the marking phase.
+  int ConvertCodeICTargetToAddress(HeapObject* obj);
+
+  // Relocate a map object.
+  int RelocateMapObject(HeapObject* obj);
+
+  // Relocates an old object.
+  int RelocateOldPointerObject(HeapObject* obj);
+  int RelocateOldDataObject(HeapObject* obj);
+
+  // Relocate a property cell object.
+  int RelocateCellObject(HeapObject* obj);
+
+  // Helper function.
+  inline int RelocateOldNonCodeObject(HeapObject* obj,
+                                      PagedSpace* space);
+
+  // Relocates an object in the code space.
+  int RelocateCodeObject(HeapObject* obj);
+
+  // Copy a new object.
+  int RelocateNewObject(HeapObject* obj);
 
 #ifdef DEBUG
+  // -----------------------------------------------------------------------
+  // Debugging variables, functions and classes
+  // Counters used for debugging the marking phase of mark-compact or
+  // mark-sweep collection.
+
+  // Size of live objects in Heap::to_space_.
+  int live_young_objects_size_;
+
+  // Size of live objects in Heap::old_pointer_space_.
+  int live_old_pointer_objects_size_;
+
+  // Size of live objects in Heap::old_data_space_.
+  int live_old_data_objects_size_;
+
+  // Size of live objects in Heap::code_space_.
+  int live_code_objects_size_;
+
+  // Size of live objects in Heap::map_space_.
+  int live_map_objects_size_;
+
+  // Size of live objects in Heap::cell_space_.
+  int live_cell_objects_size_;
+
+  // Size of live objects in Heap::lo_space_.
+  int live_lo_objects_size_;
+
+  // Number of live bytes in this collection.
+  int live_bytes_;
+
   friend class MarkObjectVisitor;
   static void VisitObject(HeapObject* obj);
 
@@ -751,19 +524,15 @@
 #endif
 
   Heap* heap_;
-  MarkingDeque marking_deque_;
+  MarkingStack marking_stack_;
   CodeFlusher* code_flusher_;
   Object* encountered_weak_maps_;
 
-  List<Page*> evacuation_candidates_;
-  List<Code*> invalidated_code_;
-
   friend class Heap;
+  friend class OverflowedObjectsScanner;
 };
 
 
-const char* AllocationSpaceName(AllocationSpace space);
-
 } }  // namespace v8::internal
 
 #endif  // V8_MARK_COMPACT_H_
diff --git a/src/math.js b/src/math.js
index 8e735c4..b5a6d18 100644
--- a/src/math.js
+++ b/src/math.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,15 +29,15 @@
 // Keep reference to original values of some global properties.  This
 // has the added benefit that the code in this file is isolated from
 // changes to these properties.
-var $floor = MathFloor;
-var $random = MathRandom;
-var $abs = MathAbs;
+const $floor = MathFloor;
+const $random = MathRandom;
+const $abs = MathAbs;
 
 // Instance class name can only be set on functions. That is the only
 // purpose for MathConstructor.
 function MathConstructor() {}
 %FunctionSetInstanceClassName(MathConstructor, 'Math');
-var $Math = new MathConstructor();
+const $Math = new MathConstructor();
 $Math.__proto__ = $Object.prototype;
 %SetProperty(global, "Math", $Math, DONT_ENUM);
 
@@ -119,19 +119,6 @@
 // ECMA 262 - 15.8.2.11
 function MathMax(arg1, arg2) {  // length == 2
   var length = %_ArgumentsLength();
-  if (length == 2) {
-    if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
-    if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
-    if (arg2 > arg1) return arg2;
-    if (arg1 > arg2) return arg1;
-    if (arg1 == arg2) {
-      // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can be
-      // a Smi or a heap number.
-      return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1;
-    }
-    // All comparisons failed, one of the arguments must be NaN.
-    return 0/0;  // Compiler constant-folds this to NaN.
-  }
   if (length == 0) {
     return -1/0;  // Compiler constant-folds this to -Infinity.
   }
@@ -144,7 +131,7 @@
     if (NUMBER_IS_NAN(n)) return n;
     // Make sure +0 is considered greater than -0.  -0 is never a Smi, +0 can be
     // a Smi or heap number.
-    if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
+    if (n > r || (r === 0 && n === 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
   }
   return r;
 }
@@ -152,19 +139,6 @@
 // ECMA 262 - 15.8.2.12
 function MathMin(arg1, arg2) {  // length == 2
   var length = %_ArgumentsLength();
-  if (length == 2) {
-    if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1);
-    if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2);
-    if (arg2 > arg1) return arg1;
-    if (arg1 > arg2) return arg2;
-    if (arg1 == arg2) {
-      // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can be
-      // a Smi or a heap number.
-      return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2;
-    }
-    // All comparisons failed, one of the arguments must be NaN.
-    return 0/0;  // Compiler constant-folds this to NaN.
-  }
   if (length == 0) {
     return 1/0;  // Compiler constant-folds this to Infinity.
   }
@@ -175,9 +149,9 @@
     var n = %_Arguments(i);
     if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
     if (NUMBER_IS_NAN(n)) return n;
-    // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can be a
+    // Make sure -0 is considered less than +0.  -0 is never a Smi, +0 can b a
     // Smi or a heap number.
-    if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
+    if (n < r || (r === 0 && n === 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
   }
   return r;
 }
@@ -215,7 +189,7 @@
 // ECMA 262 - 15.8.2.18
 function MathTan(x) {
   if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  return %_MathTan(x);
+  return %Math_tan(x);
 }
 
 
@@ -265,7 +239,7 @@
 
   // Set up non-enumerable functions of the Math object and
   // set their names.
-  InstallFunctions($Math, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
     "random", MathRandom,
     "abs", MathAbs,
     "acos", MathAcos,
diff --git a/src/messages.cc b/src/messages.cc
index a0793c2..b6ad5ac 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -1,4 +1,5 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -80,11 +81,11 @@
   }
 
   Handle<Object> stack_trace_handle = stack_trace.is_null()
-      ? Handle<Object>::cast(FACTORY->undefined_value())
+      ? FACTORY->undefined_value()
       : Handle<Object>::cast(stack_trace);
 
   Handle<Object> stack_frames_handle = stack_frames.is_null()
-      ? Handle<Object>::cast(FACTORY->undefined_value())
+      ? FACTORY->undefined_value()
       : Handle<Object>::cast(stack_frames);
 
   Handle<JSMessageObject> message =
@@ -126,7 +127,7 @@
       v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
       Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
       v8::MessageCallback callback =
-          FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
+          FUNCTION_CAST<v8::MessageCallback>(callback_obj->address());
       Handle<Object> callback_data(listener.get(1));
       {
         // Do not allow exceptions to propagate.
@@ -148,15 +149,12 @@
           JSFunction::cast(
               Isolate::Current()->js_builtins_object()->
               GetPropertyNoExceptionThrown(*fmt_str)));
-  Handle<Object> argv[] = { data };
+  Object** argv[1] = { data.location() };
 
   bool caught_exception;
   Handle<Object> result =
       Execution::TryCall(fun,
-                         Isolate::Current()->js_builtins_object(),
-                         ARRAY_SIZE(argv),
-                         argv,
-                         &caught_exception);
+          Isolate::Current()->js_builtins_object(), 1, argv, &caught_exception);
 
   if (caught_exception || !result->IsString()) {
     return FACTORY->LookupAsciiSymbol("<error>");
diff --git a/src/messages.js b/src/messages.js
index a3adcf8..a9993af 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,16 +25,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+
 // -------------------------------------------------------------------
 //
 // If this object gets passed to an error constructor the error will
 // get an accessor for .message that constructs a descriptive error
 // message on access.
-var kAddMessageAccessorsMarker = { };
+const kAddMessageAccessorsMarker = { };
 
 // This will be lazily initialized when first needed (and forcibly
 // overwritten even though it's const).
-var kMessages = 0;
+const kMessages = 0;
 
 function FormatString(format, message) {
   var args = %MessageGetArguments(message);
@@ -82,7 +83,7 @@
 // objects between script tags in a browser setting.
 function ToStringCheckErrorObject(obj) {
   if (IsNativeErrorObject(obj)) {
-    return %_CallFunction(obj, ErrorToString);
+    return %_CallFunction(obj, errorToString);
   } else {
     return ToString(obj);
   }
@@ -184,20 +185,18 @@
       "define_disallowed",            ["Cannot define property:", "%0", ", object is not extensible."],
       "non_extensible_proto",         ["%0", " is not extensible"],
       "handler_non_object",           ["Proxy.", "%0", " called with non-object as handler"],
-      "proto_non_object",             ["Proxy.", "%0", " called with non-object as prototype"],
-      "trap_function_expected",       ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
+      "trap_function_expected",       ["Proxy.", "%0", " called with non-function for ", "%1", " trap"],
       "handler_trap_missing",         ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
       "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
-      "handler_returned_false",       ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
-      "handler_returned_undefined",   ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
-      "proxy_prop_not_configurable",  ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
-      "proxy_non_object_prop_names",  ["Trap '", "%1", "' returned non-object ", "%0"],
-      "proxy_repeated_prop_name",     ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
+      "handler_returned_false",       ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"],
+      "handler_returned_undefined",   ["Proxy handler ", "%0", " returned undefined for '", "%1", "' trap"],
+      "proxy_prop_not_configurable",  ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"],
+      "proxy_non_object_prop_names",  ["Trap ", "%1", " returned non-object ", "%0"],
+      "proxy_repeated_prop_name",     ["Trap ", "%1", " returned repeated property name ", "%2"],
       "invalid_weakmap_key",          ["Invalid value used as weak map key"],
       // RangeError
       "invalid_array_length",         ["Invalid array length"],
       "stack_overflow",               ["Maximum call stack size exceeded"],
-      "invalid_time_value",           ["Invalid time value"],
       // SyntaxError
       "unable_to_parse",              ["Parse error"],
       "invalid_regexp_flags",         ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
@@ -205,12 +204,11 @@
       "illegal_break",                ["Illegal break statement"],
       "illegal_continue",             ["Illegal continue statement"],
       "illegal_return",               ["Illegal return statement"],
-      "illegal_let",                  ["Illegal let declaration outside extended mode"],
       "error_loading_debugger",       ["Error loading debugger"],
       "no_input_to_regexp",           ["No input to ", "%0"],
       "invalid_json",                 ["String '", "%0", "' is not valid JSON"],
       "circular_structure",           ["Converting circular structure to JSON"],
-      "called_on_non_object",         ["%0", " called on non-object"],
+      "obj_ctor_property_non_object", ["Object.", "%0", " called on non-object"],
       "called_on_null_or_undefined",  ["%0", " called on null or undefined"],
       "array_indexof_not_defined",    ["Array.getIndexOf: Argument undefined"],
       "object_not_extensible",        ["Can't add property ", "%0", ", object is not extensible"],
@@ -242,29 +240,20 @@
       "strict_poison_pill",           ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
       "strict_caller",                ["Illegal access to a strict mode caller function."],
       "unprotected_let",              ["Illegal let declaration in unprotected statement context."],
-      "unprotected_const",            ["Illegal const declaration in unprotected statement context."],
       "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
       "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
-      "harmony_const_assign",         ["Assignment to constant variable."],
-      "invalid_module_path",          ["Module does not export '", "%0", "', or export is not itself a module"],
-      "module_type_error",            ["Module '", "%0", "' used improperly"],
     ];
     var messages = { __proto__ : null };
+    var desc = new PropertyDescriptor();
+    desc.setConfigurable(false);
+    desc.setEnumerable(false);
+    desc.setWritable(false);
     for (var i = 0; i < messagesDictionary.length; i += 2) {
       var key = messagesDictionary[i];
       var format = messagesDictionary[i + 1];
-
-      for (var j = 0; j < format.length; j++) {
-        %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j],
-                                        DONT_DELETE | READ_ONLY | DONT_ENUM);
-      }
-      %IgnoreAttributesAndSetProperty(format, 'length', format.length,
-                                      DONT_DELETE | READ_ONLY | DONT_ENUM);
-      %PreventExtensions(format);
-      %IgnoreAttributesAndSetProperty(messages,
-                                      key,
-                                      format,
-                                      DONT_DELETE | DONT_ENUM | READ_ONLY);
+      ObjectFreeze(format);
+      desc.setValue(format);
+      DefineOwnProperty(messages, key, desc);
     }
     %PreventExtensions(messages);
     %IgnoreAttributesAndSetProperty(builtins, "kMessages",
@@ -397,7 +386,7 @@
   }
 
   return new SourceLocation(this, position, line, column, start, end);
-}
+};
 
 
 /**
@@ -427,7 +416,7 @@
   // resource.
   var column = opt_column || 0;
   if (line == 0) {
-    column -= this.column_offset;
+    column -= this.column_offset
   }
 
   var offset_position = opt_offset_position || 0;
@@ -442,8 +431,7 @@
       return null;
     }
 
-    return this.locationFromPosition(
-        this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
+    return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
   }
 }
 
@@ -459,10 +447,8 @@
  *     invalid
  */
 function ScriptSourceSlice(opt_from_line, opt_to_line) {
-  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset
-                                              : opt_from_line;
-  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount()
-                                          : opt_to_line;
+  var from_line = IS_UNDEFINED(opt_from_line) ? this.line_offset : opt_from_line;
+  var to_line = IS_UNDEFINED(opt_to_line) ? this.line_offset + this.lineCount() : opt_to_line
 
   // Adjust according to the offset within the resource.
   from_line -= this.line_offset;
@@ -482,10 +468,8 @@
   var to_position = to_line == 0 ? 0 : line_ends[to_line - 1] + 1;
 
   // Return a source slice with line numbers re-adjusted to the resource.
-  return new SourceSlice(this,
-                         from_line + this.line_offset,
-                         to_line + this.line_offset,
-                          from_position, to_position);
+  return new SourceSlice(this, from_line + this.line_offset, to_line + this.line_offset,
+                         from_position, to_position);
 }
 
 
@@ -518,7 +502,7 @@
 function ScriptLineCount() {
   // Return number of source lines.
   return this.line_ends.length;
-}
+};
 
 
 /**
@@ -535,13 +519,6 @@
   if (this.name) {
     return this.name;
   }
-
-  // The result is cached as on long scripts it takes noticable time to search
-  // for the sourceURL.
-  if (this.hasCachedNameOrSourceURL)
-      return this.cachedNameOrSourceURL;
-  this.hasCachedNameOrSourceURL = true;
-
   // TODO(608): the spaces in a regexp below had to be escaped as \040
   // because this file is being processed by js2c whose handling of spaces
   // in regexps is broken. Also, ['"] are excluded from allowed URLs to
@@ -550,7 +527,6 @@
   // the scanner/parser.
   var source = ToString(this.source);
   var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
-  this.cachedNameOrSourceURL = this.name;
   if (sourceUrlPos > 4) {
     var sourceUrlPattern =
         /\/\/@[\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
@@ -561,17 +537,15 @@
     var match =
         %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
     if (match) {
-      this.cachedNameOrSourceURL =
-          SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
+      return SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
     }
   }
-  return this.cachedNameOrSourceURL;
+  return this.name;
 }
 
 
 SetUpLockedPrototype(Script,
-  $Array("source", "name", "line_ends", "line_offset", "column_offset",
-         "cachedNameOrSourceURL", "hasCachedNameOrSourceURL" ),
+  $Array("source", "name", "line_ends", "line_offset", "column_offset"),
   $Array(
     "lineFromPosition", ScriptLineFromPosition,
     "locationFromPosition", ScriptLocationFromPosition,
@@ -593,10 +567,10 @@
  *   position : position within the source
  *   start    : position of start of source context (inclusive)
  *   end      : position of end of source context (not inclusive)
- * Source text for the source context is the character interval
- * [start, end[. In most cases end will point to a newline character.
- * It might point just past the final position of the source if the last
- * source line does not end with a newline character.
+ * Source text for the source context is the character interval [start, end[. In
+ * most cases end will point to a newline character. It might point just past
+ * the final position of the source if the last source line does not end with a
+ * newline character.
  * @param {Script} script The Script object for which this is a location
  * @param {number} position Source position for the location
  * @param {number} line The line number for the location
@@ -614,7 +588,7 @@
   this.end = end;
 }
 
-var kLineLengthLimit = 78;
+const kLineLengthLimit = 78;
 
 /**
  * Restrict source location start and end positions to make the source slice
@@ -663,7 +637,7 @@
       this.end = this.start + limit;
     }
   }
-}
+};
 
 
 /**
@@ -672,11 +646,8 @@
  *     Source text for this location.
  */
 function SourceLocationSourceText() {
-  return %_CallFunction(this.script.source,
-                        this.start,
-                        this.end,
-                        StringSubstring);
-}
+  return %_CallFunction(this.script.source, this.start, this.end, StringSubstring);
+};
 
 
 SetUpLockedPrototype(SourceLocation,
@@ -684,7 +655,7 @@
   $Array(
     "restrict", SourceLocationRestrict,
     "sourceText", SourceLocationSourceText
- )
+  )
 );
 
 
@@ -724,7 +695,7 @@
                         this.from_position,
                         this.to_position,
                         StringSubstring);
-}
+};
 
 SetUpLockedPrototype(SourceSlice,
   $Array("script", "from_line", "to_line", "from_position", "to_position"),
@@ -759,19 +730,24 @@
   // can't rely on 'this' being the same as 'obj'.
   var hasBeenSet = false;
   var value;
-  var getter = function() {
+  function getter() {
     if (hasBeenSet) {
       return value;
     }
     hasBeenSet = true;
     value = fun(obj);
     return value;
-  };
-  var setter = function(v) {
+  }
+  function setter(v) {
     hasBeenSet = true;
     value = v;
-  };
-  %DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
+  }
+  var desc = { get: getter,
+               set: setter,
+               enumerable: false,
+               configurable: true };
+  desc = ToPropertyDescriptor(desc);
+  DefineOwnProperty(obj, name, desc, true);
 }
 
 function CallSite(receiver, fun, pos) {
@@ -782,7 +758,7 @@
 
 function CallSiteGetThis() {
   return this.receiver;
-}
+};
 
 function CallSiteGetTypeName() {
   var constructor = this.receiver.constructor;
@@ -794,33 +770,33 @@
     return %_CallFunction(this.receiver, ObjectToString);
   }
   return constructorName;
-}
+};
 
 function CallSiteIsToplevel() {
   if (this.receiver == null) {
     return true;
   }
   return IS_GLOBAL(this.receiver);
-}
+};
 
 function CallSiteIsEval() {
   var script = %FunctionGetScript(this.fun);
   return script && script.compilation_type == COMPILATION_TYPE_EVAL;
-}
+};
 
 function CallSiteGetEvalOrigin() {
   var script = %FunctionGetScript(this.fun);
   return FormatEvalOrigin(script);
-}
+};
 
 function CallSiteGetScriptNameOrSourceURL() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.nameOrSourceURL() : null;
-}
+};
 
 function CallSiteGetFunction() {
   return this.fun;
-}
+};
 
 function CallSiteGetFunctionName() {
   // See if the function knows its own name
@@ -836,19 +812,15 @@
     return "eval";
   }
   return null;
-}
+};
 
 function CallSiteGetMethodName() {
   // See if we can find a unique property on the receiver that holds
   // this function.
   var ownName = this.fun.name;
   if (ownName && this.receiver &&
-      (%_CallFunction(this.receiver,
-                      ownName,
-                      ObjectLookupGetter) === this.fun ||
-       %_CallFunction(this.receiver,
-                      ownName,
-                      ObjectLookupSetter) === this.fun ||
+      (%_CallFunction(this.receiver, ownName, ObjectLookupGetter) === this.fun ||
+       %_CallFunction(this.receiver, ownName, ObjectLookupSetter) === this.fun ||
        this.receiver[ownName] === this.fun)) {
     // To handle DontEnum properties we guess that the method has
     // the same name as the function.
@@ -858,8 +830,7 @@
   for (var prop in this.receiver) {
     if (this.receiver.__lookupGetter__(prop) === this.fun ||
         this.receiver.__lookupSetter__(prop) === this.fun ||
-        (!this.receiver.__lookupGetter__(prop) &&
-         this.receiver[prop] === this.fun)) {
+        (!this.receiver.__lookupGetter__(prop) && this.receiver[prop] === this.fun)) {
       // If we find more than one match bail out to avoid confusion.
       if (name) {
         return null;
@@ -871,12 +842,12 @@
     return name;
   }
   return null;
-}
+};
 
 function CallSiteGetFileName() {
   var script = %FunctionGetScript(this.fun);
   return script ? script.name : null;
-}
+};
 
 function CallSiteGetLineNumber() {
   if (this.pos == -1) {
@@ -888,7 +859,7 @@
     location = script.locationFromPosition(this.pos, true);
   }
   return location ? location.line + 1 : null;
-}
+};
 
 function CallSiteGetColumnNumber() {
   if (this.pos == -1) {
@@ -900,16 +871,16 @@
     location = script.locationFromPosition(this.pos, true);
   }
   return location ? location.column + 1: null;
-}
+};
 
 function CallSiteIsNative() {
   var script = %FunctionGetScript(this.fun);
   return script ? (script.type == TYPE_NATIVE) : false;
-}
+};
 
 function CallSiteGetPosition() {
   return this.pos;
-}
+};
 
 function CallSiteIsConstructor() {
   var constructor = this.receiver ? this.receiver.constructor : null;
@@ -917,7 +888,7 @@
     return false;
   }
   return this.fun === constructor;
-}
+};
 
 SetUpLockedPrototype(CallSite, $Array("receiver", "fun", "pos"), $Array(
   "getThis", CallSiteGetThis,
@@ -960,13 +931,12 @@
       // eval script originated from "real" source.
       if (eval_from_script.name) {
         eval_origin += " (" + eval_from_script.name;
-        var location = eval_from_script.locationFromPosition(
-            script.eval_from_script_position, true);
+        var location = eval_from_script.locationFromPosition(script.eval_from_script_position, true);
         if (location) {
           eval_origin += ":" + (location.line + 1);
           eval_origin += ":" + (location.column + 1);
         }
-        eval_origin += ")";
+        eval_origin += ")"
       } else {
         eval_origin += " (unknown source)";
       }
@@ -974,7 +944,7 @@
   }
 
   return eval_origin;
-}
+};
 
 function FormatSourcePosition(frame) {
   var fileName;
@@ -983,9 +953,8 @@
     fileLocation = "native";
   } else if (frame.isEval()) {
     fileName = frame.getScriptNameOrSourceURL();
-    if (!fileName) {
+    if (!fileName)
       fileLocation = frame.getEvalOrigin();
-    }
   } else {
     fileName = frame.getFileName();
   }
@@ -1088,19 +1057,19 @@
   if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
     stackTraceLimit = 10000;
   }
-  var raw_stack = %CollectStackTrace(obj,
-                                     cons_opt ? cons_opt : captureStackTrace,
-                                     stackTraceLimit);
+  var raw_stack = %CollectStackTrace(cons_opt
+                                     ? cons_opt
+                                     : captureStackTrace, stackTraceLimit);
   DefineOneShotAccessor(obj, 'stack', function (obj) {
     return FormatRawStackTrace(obj, raw_stack);
   });
-}
+};
 
 
 function SetUpError() {
   // Define special error type constructors.
 
-  var DefineError = function(f) {
+  function DefineError(f) {
     // Store the error function in both the global object
     // and the runtime object. The function is fetched
     // from the runtime object when throwing errors from
@@ -1116,7 +1085,7 @@
       // However, it can't be an instance of the Error object because
       // it hasn't been properly configured yet.  Instead we create a
       // special not-a-true-error-but-close-enough object.
-      var ErrorPrototype = function() {};
+      function ErrorPrototype() {}
       %FunctionSetPrototype(ErrorPrototype, $Object.prototype);
       %FunctionSetInstanceClassName(ErrorPrototype, 'Error');
       %FunctionSetPrototype(f, new ErrorPrototype());
@@ -1157,8 +1126,7 @@
         return new f(m);
       }
     });
-    %SetNativeFlag(f);
-  };
+  }
 
   DefineError(function Error() { });
   DefineError(function TypeError() { });
@@ -1175,42 +1143,42 @@
 
 %SetProperty($Error.prototype, 'message', '', DONT_ENUM);
 
-// Global list of error objects visited during ErrorToString. This is
+// Global list of error objects visited during errorToString. This is
 // used to detect cycles in error toString formatting.
-var visited_errors = new InternalArray();
-var cyclic_error_marker = new $Object();
+const visited_errors = new InternalArray();
+const cyclic_error_marker = new $Object();
 
-function ErrorToStringDetectCycle(error) {
+function errorToStringDetectCycle(error) {
   if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
   try {
     var type = error.type;
-    var name = error.name;
-    name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
-    var message = error.message;
     var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
     if (type && !hasMessage) {
-      message = FormatMessage(%NewMessageObject(type, error.arguments));
+      var formatted = FormatMessage(%NewMessageObject(type, error.arguments));
+      return error.name + ": " + formatted;
     }
-    message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
-    if (name === "") return message;
-    if (message === "") return name;
-    return name + ": " + message;
+    var message = hasMessage ? (": " + error.message) : "";
+    return error.name + message;
   } finally {
     visited_errors.length = visited_errors.length - 1;
   }
 }
 
-function ErrorToString() {
-  if (!IS_SPEC_OBJECT(this)) {
-    throw MakeTypeError("called_on_non_object", ["Error.prototype.toString"]);
+function errorToString() {
+  if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+    throw MakeTypeError("called_on_null_or_undefined",
+                        ["Error.prototype.toString"]);
   }
+  // This helper function is needed because access to properties on
+  // the builtins object do not work inside of a catch clause.
+  function isCyclicErrorMarker(o) { return o === cyclic_error_marker; }
 
   try {
-    return ErrorToStringDetectCycle(this);
+    return errorToStringDetectCycle(this);
   } catch(e) {
     // If this error message was encountered already return the empty
     // string for it instead of recursively formatting it.
-    if (e === cyclic_error_marker) {
+    if (isCyclicErrorMarker(e)) {
       return '';
     }
     throw e;
@@ -1218,8 +1186,8 @@
 }
 
 
-InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
+InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
 
 // Boilerplate for exceptions for stack overflows. Used from
 // Isolate::StackOverflow().
-var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
+const kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 2ff4710..c4c4fd2 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -30,14 +30,13 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
 #define V8_MIPS_ASSEMBLER_MIPS_INL_H_
 
 #include "mips/assembler-mips.h"
-
 #include "cpu.h"
 #include "debug.h"
 
@@ -79,15 +78,6 @@
 }
 
 
-int FPURegister::ToAllocationIndex(FPURegister reg) {
-  ASSERT(reg.code() % 2 == 0);
-  ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
-  ASSERT(reg.is_valid());
-  ASSERT(!reg.is(kDoubleRegZero));
-  ASSERT(!reg.is(kLithiumScratchDouble));
-  return (reg.code() / 2);
-}
-
 
 // -----------------------------------------------------------------------------
 // RelocInfo.
@@ -117,42 +107,19 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) ||
-         rmode_ == RUNTIME_ENTRY ||
-         rmode_ == EMBEDDED_OBJECT ||
-         rmode_ == EXTERNAL_REFERENCE);
-  // Read the address of the word containing the target_address in an
-  // instruction stream.
-  // The only architecture-independent user of this function is the serializer.
-  // The serializer uses it to find out how many raw bytes of instruction to
-  // output before the next target.
-  // For an instruction like LUI/ORI where the target bits are mixed into the
-  // instruction bits, the size of the target will be zero, indicating that the
-  // serializer should not step forward in memory after a target is resolved
-  // and written. In this case the target_address_address function should
-  // return the end of the instructions to be patched, allowing the
-  // deserializer to deserialize the instructions as raw bytes and put them in
-  // place, ready to be patched with the target. After jump optimization,
-  // that is the address of the instruction that follows J/JAL/JR/JALR
-  // instruction.
-  return reinterpret_cast<Address>(
-    pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
 }
 
 
 int RelocInfo::target_address_size() {
-  return Assembler::kSpecialTargetSize;
+  return Assembler::kExternalTargetSize;
 }
 
 
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
-  if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-        host(), this, HeapObject::cast(target_code));
-  }
 }
 
 
@@ -162,7 +129,7 @@
 }
 
 
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Handle<Object>(reinterpret_cast<Object**>(
       Assembler::target_address_at(pc_)));
@@ -179,15 +146,9 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-  if (mode == UPDATE_WRITE_BARRIER &&
-      host() != NULL &&
-      target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
-  }
 }
 
 
@@ -215,17 +176,10 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
-                                WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
-  if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
-  }
 }
 
 
@@ -246,11 +200,6 @@
   // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
   // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
   Assembler::set_target_address_at(pc_, target);
-  if (host() != NULL) {
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-        host(), this, HeapObject::cast(target_code));
-  }
 }
 
 
@@ -293,19 +242,24 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitEmbeddedPointer(this);
+    Object** p = target_object_address();
+    Object* orig = *p;
+    visitor->VisitPointer(p);
+    if (*p != orig) {
+      set_target_object(*p);
+    }
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(this);
+    visitor->VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
   } else if (((RelocInfo::IsJSReturn(mode) &&
-              IsPatchedReturnSequence()) ||
-             (RelocInfo::IsDebugBreakSlot(mode) &&
-             IsPatchedDebugBreakSlotSequence())) &&
+               IsPatchedReturnSequence()) ||
+              (RelocInfo::IsDebugBreakSlot(mode) &&
+               IsPatchedDebugBreakSlotSequence())) &&
              Isolate::Current()->debug()->has_break_points()) {
     visitor->VisitDebugTarget(this);
 #endif
@@ -319,13 +273,13 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitEmbeddedPointer(heap, this);
+    StaticVisitor::VisitPointer(heap, target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(this);
+    StaticVisitor::VisitExternalReference(target_reference_address());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
              ((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 0d7f921..e01a0ca 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #include "v8.h"
@@ -74,9 +74,7 @@
 
 
 void CpuFeatures::Probe() {
-  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
-                                CpuFeaturesImpliedByCompiler());
-  ASSERT(supported_ == 0 || supported_ == standard_features);
+  ASSERT(!initialized_);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -84,7 +82,8 @@
   // Get the features implied by the OS and the compiler settings. This is the
   // minimal set of features which is also allowed for generated code in the
   // snapshot.
-  supported_ |= standard_features;
+  supported_ |= OS::CpuFeaturesImpliedByPlatform();
+  supported_ |= CpuFeaturesImpliedByCompiler();
 
   if (Serializer::enabled()) {
     // No probing for features if we might serialize (generate snapshot).
@@ -301,7 +300,7 @@
     own_buffer_ = false;
   }
 
-  // Set up buffer pointers.
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -337,7 +336,7 @@
 
 void Assembler::GetCode(CodeDesc* desc) {
   ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
-  // Set up code descriptor.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -850,6 +849,7 @@
   return rmode != RelocInfo::NONE;
 }
 
+
 void Assembler::GenInstrRegister(Opcode opcode,
                                  Register rs,
                                  Register rt,
@@ -1244,7 +1244,6 @@
 
 
 void Assembler::andi(Register rt, Register rs, int32_t j) {
-  ASSERT(is_uint16(j));
   GenInstrImmediate(ANDI, rs, rt, j);
 }
 
@@ -1255,7 +1254,6 @@
 
 
 void Assembler::ori(Register rt, Register rs, int32_t j) {
-  ASSERT(is_uint16(j));
   GenInstrImmediate(ORI, rs, rt, j);
 }
 
@@ -1266,7 +1264,6 @@
 
 
 void Assembler::xori(Register rt, Register rs, int32_t j) {
-  ASSERT(is_uint16(j));
   GenInstrImmediate(XORI, rs, rt, j);
 }
 
@@ -1318,7 +1315,7 @@
 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
   // Should be called via MacroAssembler::Ror.
   ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
   emit(instr);
@@ -1328,7 +1325,7 @@
 void Assembler::rotrv(Register rd, Register rt, Register rs) {
   // Should be called via MacroAssembler::Ror.
   ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
   emit(instr);
@@ -1447,7 +1444,6 @@
 
 
 void Assembler::lui(Register rd, int32_t j) {
-  ASSERT(is_uint16(j));
   GenInstrImmediate(LUI, zero_reg, rd, j);
 }
 
@@ -1603,7 +1599,7 @@
 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   // Should be called via MacroAssembler::Ins.
   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
 }
 
@@ -1611,7 +1607,7 @@
 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   // Should be called via MacroAssembler::Ext.
   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
 }
 
@@ -1771,25 +1767,25 @@
 
 
 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
 }
 
 
 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
 }
 
 
 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
 }
 
 
 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
 }
 
@@ -1830,7 +1826,7 @@
 
 
 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
 }
 
@@ -1846,7 +1842,7 @@
 
 
 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  ASSERT(mips32r2);
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
 }
 
@@ -1973,7 +1969,7 @@
   }
   CHECK_GT(desc.buffer_size, 0);  // No overflow.
 
-  // Set up new buffer.
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
 
   desc.instr_size = pc_offset();
@@ -2022,8 +2018,7 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  // We do not try to reuse pool constants.
-  RelocInfo rinfo(pc_, rmode, data, NULL);
+  RelocInfo rinfo(pc_, rmode, data);  // We do not try to reuse pool constants.
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2046,7 +2041,7 @@
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
-      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
       ClearRecordedAstId();
       reloc_info_writer.Write(&reloc_info_with_ast_id);
     } else {
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 8b877f6..38e9537 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 
 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_
@@ -182,7 +182,12 @@
       kNumReservedRegisters;
 
 
-  inline static int ToAllocationIndex(FPURegister reg);
+  static int ToAllocationIndex(FPURegister reg) {
+    ASSERT(reg.code() % 2 == 0);
+    ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+    ASSERT(reg.is_valid());
+    return (reg.code() / 2);
+  }
 
   static FPURegister FromAllocationIndex(int index) {
     ASSERT(index >= 0 && index < kNumAllocatableRegisters);
@@ -297,15 +302,7 @@
 const FPURegister f30 = { 30 };
 const FPURegister f31 = { 31 };
 
-// Register aliases.
-// cp is assumed to be a callee saved register.
-static const Register& kLithiumScratchReg = s3;  // Scratch register.
-static const Register& kLithiumScratchReg2 = s4;  // Scratch register.
-static const Register& kRootRegister = s6;  // Roots array pointer.
-static const Register& cp = s7;     // JavaScript context pointer.
-static const Register& fp = s8_fp;  // Alias for fp.
-static const DoubleRegister& kLithiumScratchDouble = f30;
-static const FPURegister& kDoubleRegZero = f28;
+const FPURegister kDoubleRegZero = f28;
 
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
@@ -553,13 +550,10 @@
   static void JumpLabelToJumpRegister(Address pc);
 
   // This sets the branch destination (which gets loaded at the call address).
-  // This is for calls and branches within generated code.  The serializer
-  // has already deserialized the lui/ori instructions etc.
-  inline static void deserialization_set_special_target_at(
-      Address instruction_payload, Address target) {
-    set_target_address_at(
-        instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
-        target);
+  // This is for calls and branches within generated code.
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
+    set_target_address_at(instruction_payload, target);
   }
 
   // This sets the branch destination.
@@ -581,7 +575,8 @@
   // are split across two consecutive instructions and don't exist separately
   // in the code, so the serializer should not step forwards in memory after
   // a target is resolved and written.
-  static const int kSpecialTargetSize = 0;
+  static const int kCallTargetSize = 0 * kInstrSize;
+  static const int kExternalTargetSize = 0 * kInstrSize;
 
   // Number of consecutive instructions used to store 32bit constant.
   // Before jump-optimizations, this constant was used in
@@ -672,7 +667,7 @@
   // Never use the int16_t b(l)cond version with a branch offset
   // instead of using the Label* version.
 
-  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
+  // Jump targets must be in the current 256 MB-aligned region. ie 28 bits.
   void j(int32_t target);
   void jal(int32_t target);
   void jalr(Register rs, Register rd = ra);
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index eeb84c3..d772304 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,45 +67,33 @@
     ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
-  // JumpToExternalReference expects s0 to contain the number of arguments
+  // JumpToExternalReference expects a0 to contain the number of arguments
   // including the receiver and the extra arguments.
-  __ Addu(s0, a0, num_extra_args + 1);
-  __ sll(s1, s0, kPointerSizeLog2);
-  __ Subu(s1, s1, kPointerSize);
+  __ Addu(a0, a0, Operand(num_extra_args + 1));
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
 
-// Load the built-in InternalArray function from the current context.
-static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
-                                              Register result) {
-  // Load the global context.
-
-  __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  __ lw(result,
-        FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
-  // Load the InternalArray function from the global context.
-  __ lw(result,
-         MemOperand(result,
-                    Context::SlotOffset(
-                        Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
-}
-
-
 // Load the built-in Array function from the current context.
 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
   // Load the global context.
 
   __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ lw(result,
-        FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
+         FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
   // Load the Array function from the global context.
   __ lw(result,
-        MemOperand(result,
-                   Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+         MemOperand(result,
+                    Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
 }
 
 
+// This constant has the same value as JSArray::kPreallocatedArrayElements and
+// if JSArray::kPreallocatedArrayElements is changed handling of loop unfolding
+// below should be reconsidered.
+static const int kLoopUnfoldLimit = 4;
+
+
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. An elements backing store is allocated with size initial_capacity
 // and filled with the hole values.
@@ -115,17 +103,16 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
+                                 int initial_capacity,
                                  Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+  ASSERT(initial_capacity > 0);
+  // Load the initial map from the array function.
+  __ lw(scratch1, FieldMemOperand(array_function,
+                                  JSFunction::kPrototypeOrInitialMapOffset));
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
-  int size = JSArray::kSize;
-  if (initial_capacity > 0) {
-    size += FixedArray::SizeFor(initial_capacity);
-  }
+  int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
   __ AllocateInNewSpace(size,
                         result,
                         scratch2,
@@ -144,11 +131,6 @@
   __ mov(scratch3,  zero_reg);
   __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
 
-  if (initial_capacity == 0) {
-    __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
-    return;
-  }
-
   // Calculate the location of the elements array and set elements array member
   // of the JSArray.
   // result: JSObject
@@ -165,31 +147,21 @@
   // scratch1: elements array (untagged)
   // scratch2: start of next object
   __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
-  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
+  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
   __ sw(scratch3, MemOperand(scratch1));
   __ Addu(scratch1, scratch1, kPointerSize);
   __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
-  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
+  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ sw(scratch3, MemOperand(scratch1));
   __ Addu(scratch1, scratch1, kPointerSize);
 
-  // Fill the FixedArray with the hole value. Inline the code if short.
-  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
+  // Fill the FixedArray with the hole value.
+  ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+  ASSERT(initial_capacity <= kLoopUnfoldLimit);
   __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  static const int kLoopUnfoldLimit = 4;
-  if (initial_capacity <= kLoopUnfoldLimit) {
-    for (int i = 0; i < initial_capacity; i++) {
-      __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
-    }
-  } else {
-    Label loop, entry;
-    __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
-    __ Branch(&entry);
-    __ bind(&loop);
+  for (int i = 0; i < initial_capacity; i++) {
     __ sw(scratch3, MemOperand(scratch1));
     __ Addu(scratch1, scratch1, kPointerSize);
-    __ bind(&entry);
-    __ Branch(&loop, lt, scratch1, Operand(scratch2));
   }
 }
 
@@ -205,7 +177,7 @@
 // register elements_array_storage is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi, cannot be 0.
+                            Register array_size,  // As a smi.
                             Register result,
                             Register elements_array_storage,
                             Register elements_array_end,
@@ -213,16 +185,31 @@
                             Register scratch2,
                             bool fill_with_hole,
                             Label* gc_required) {
-  // Load the initial map from the array function.
-  __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
+  Label not_empty, allocated;
 
-  if (FLAG_debug_code) {  // Assert that array size is not zero.
-    __ Assert(
-        ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
-  }
+  // Load the initial map from the array function.
+  __ lw(elements_array_storage,
+         FieldMemOperand(array_function,
+                         JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ Branch(&not_empty, ne, array_size, Operand(zero_reg));
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize +
+             FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch1,
+                        gc_required,
+                        TAG_OBJECT);
+  __ Branch(&allocated);
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested number of elements.
+  __ bind(&not_empty);
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ li(elements_array_end,
         (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
@@ -241,6 +228,7 @@
   // result: JSObject
   // elements_array_storage: initial map
   // array_size: size of array (smi)
+  __ bind(&allocated);
   __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
   __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
   __ sw(elements_array_storage,
@@ -274,6 +262,8 @@
   // the actual JSArray has length 0 and the size of the JSArray for non-empty
   // JSArrays. The length of a FixedArray is stored as a smi.
   STATIC_ASSERT(kSmiTag == 0);
+  __ li(at, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+  __ movz(array_size, at, array_size);
 
   ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
   __ sw(array_size, MemOperand(elements_array_storage));
@@ -322,22 +312,21 @@
 static void ArrayNativeCode(MacroAssembler* masm,
                             Label* call_generic_code) {
   Counters* counters = masm->isolate()->counters();
-  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
-      has_non_smi_element, finish, cant_transition_map, not_double;
+  Label argc_one_or_more, argc_two_or_more;
 
   // Check for array construction with zero arguments or one.
   __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
   // Handle construction of an empty array.
-  __ bind(&empty_array);
   AllocateEmptyJSArray(masm,
                        a1,
                        a2,
                        a3,
                        t0,
                        t1,
+                       JSArray::kPreallocatedArrayElements,
                        call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
-  // Set up return value, remove receiver from stack and return.
+  // Setup return value, remove receiver from stack and return.
   __ mov(v0, a2);
   __ Addu(sp, sp, Operand(kPointerSize));
   __ Ret();
@@ -349,12 +338,6 @@
 
   STATIC_ASSERT(kSmiTag == 0);
   __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
-  __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
-  __ Drop(1);  // Adjust stack.
-  __ mov(a0, zero_reg);  // Treat this as a call with argc of zero.
-  __ Branch(&empty_array);
-
-  __ bind(&not_empty_array);
   __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
   __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
 
@@ -380,7 +363,7 @@
                   call_generic_code);
   __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
 
-  // Set up return value, remove receiver and argument from stack and return.
+  // Setup return value, remove receiver and argument from stack and return.
   __ mov(v0, a3);
   __ Addu(sp, sp, Operand(2 * kPointerSize));
   __ Ret();
@@ -415,22 +398,14 @@
   // sp[0]: last argument
 
   Label loop, entry;
-  __ Branch(USE_DELAY_SLOT, &entry);
-  __ mov(t3, sp);
+  __ Branch(&entry);
   __ bind(&loop);
-  __ lw(a2, MemOperand(t3));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(a2, &has_non_smi_element);
-  }
-  __ Addu(t3, t3, kPointerSize);
+  __ pop(a2);
   __ Addu(t1, t1, -kPointerSize);
   __ sw(a2, MemOperand(t1));
   __ bind(&entry);
   __ Branch(&loop, lt, t0, Operand(t1));
 
-  __ bind(&finish);
-  __ mov(sp, t3);
-
   // Remove caller arguments and receiver from the stack, setup return value and
   // return.
   // a0: argc
@@ -439,77 +414,6 @@
   __ Addu(sp, sp, Operand(kPointerSize));
   __ mov(v0, a3);
   __ Ret();
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(
-      a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  __ UndoAllocationInNewSpace(a3, t0);
-  __ Branch(call_generic_code);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
-  // a3: JSArray
-  __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         a2,
-                                         t5,
-                                         &cant_transition_map);
-  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ RecordWriteField(a3,
-                      HeapObject::kMapOffset,
-                      a2,
-                      t5,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  Label loop2;
-  __ bind(&loop2);
-  __ lw(a2, MemOperand(t3));
-  __ Addu(t3, t3, kPointerSize);
-  __ Subu(t1, t1, kPointerSize);
-  __ sw(a2, MemOperand(t1));
-  __ Branch(&loop2, lt, t0, Operand(t1));
-  __ Branch(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0     : number of arguments
-  //  -- ra     : return address
-  //  -- sp[...]: constructor arguments
-  // -----------------------------------
-  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
-
-  // Get the InternalArray function.
-  GenerateLoadInternalArrayFunction(masm, a1);
-
-  if (FLAG_debug_code) {
-    // Initial map for the builtin InternalArray functions should be maps.
-    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ And(t0, a2, Operand(kSmiTagMask));
-    __ Assert(ne, "Unexpected initial map for InternalArray function",
-              t0, Operand(zero_reg));
-    __ GetObjectType(a2, a3, t0);
-    __ Assert(eq, "Unexpected initial map for InternalArray function",
-              t0, Operand(MAP_TYPE));
-  }
-
-  // Run the native code for the InternalArray function called as a normal
-  // function.
-  ArrayNativeCode(masm, &generic_array_code);
-
-  // Jump to the generic array code if the specialized code cannot handle the
-  // construction.
-  __ bind(&generic_array_code);
-
-  Handle<Code> array_code =
-      masm->isolate()->builtins()->InternalArrayCodeGeneric();
-  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -683,11 +587,10 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(v0);
-    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  }
+  __ EnterInternalFrame();
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  __ LeaveInternalFrame();
   __ pop(function);
   __ mov(argument, v0);
   __ Branch(&argument_is_string);
@@ -703,18 +606,15 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(argument);
-    __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  }
+  __ EnterInternalFrame();
+  __ push(argument);
+  __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  __ LeaveInternalFrame();
   __ Ret();
 }
 
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool count_constructions) {
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
@@ -722,6 +622,38 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
+  Label non_function_call;
+  // Check that the function is not a smi.
+  __ And(t0, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
+  // Check that the function is a JSFunction.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // Jump to the function-specific construct stub.
+  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+  __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(t9);
+
+  // a0: number of arguments
+  // a1: called object
+  __ bind(&non_function_call);
+  // CALL_NON_FUNCTION expects the non-function constructor as receiver
+  // (instead of the original receiver from the call site). The receiver is
+  // stack element argc.
+  // Set expected number of arguments to zero (not changing a0).
+  __ mov(a2, zero_reg);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(t1, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
@@ -735,323 +667,331 @@
   // -----------------------------------
 
   // Enter a construct frame.
-  {
-    FrameScope scope(masm, StackFrame::CONSTRUCT);
+  __ EnterConstructFrame();
 
-    // Preserve the two incoming parameters on the stack.
-    __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
-    __ MultiPushReversed(a0.bit() | a1.bit());
+  // Preserve the two incoming parameters on the stack.
+  __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
+  __ MultiPushReversed(a0.bit() | a1.bit());
 
-    // Use t7 to hold undefined, which is used in several places below.
-    __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+  // Use t7 to hold undefined, which is used in several places below.
+  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
 
-    Label rt_call, allocated;
-    // Try to allocate the object without transitioning into C code. If any of
-    // the preconditions is not met, the code bails out to the runtime call.
-    if (FLAG_inline_new) {
-      Label undo_allocation;
+  Label rt_call, allocated;
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  if (FLAG_inline_new) {
+    Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-      ExternalReference debug_step_in_fp =
-          ExternalReference::debug_step_in_fp_address(isolate);
-      __ li(a2, Operand(debug_step_in_fp));
-      __ lw(a2, MemOperand(a2));
-      __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address(isolate);
+    __ li(a2, Operand(debug_step_in_fp));
+    __ lw(a2, MemOperand(a2));
+    __ Branch(&rt_call, ne, a2, Operand(zero_reg));
 #endif
 
-      // Load the initial map and verify that it is in fact a map.
-      // a1: constructor function
-      __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-      __ JumpIfSmi(a2, &rt_call);
-      __ GetObjectType(a2, a3, t4);
-      __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+    // Load the initial map and verify that it is in fact a map.
+    // a1: constructor function
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ And(t0, a2, Operand(kSmiTagMask));
+    __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+    __ GetObjectType(a2, a3, t4);
+    __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
 
-      // Check that the constructor is not constructing a JSFunction (see
-      // comments in Runtime_NewObject in runtime.cc). In which case the
-      // initial map's instance type would be JS_FUNCTION_TYPE.
-      // a1: constructor function
-      // a2: initial map
-      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
-      __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+    __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
 
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+      MemOperand constructor_count =
+         FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+      __ lbu(t0, constructor_count);
+      __ Subu(t0, t0, Operand(1));
+      __ sb(t0, constructor_count);
+      __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+      __ Push(a1, a2);
+
+      __ push(a1);  // Constructor.
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(a2);
+      __ pop(a1);
+
+      __ bind(&allocate);
+    }
+
+    // Now allocate the JSObject on the heap.
+    // a1: constructor function
+    // a2: initial map
+    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+    __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+    // Allocated the JSObject, now initialize the fields. Map is set to initial
+    // map and properties and elements are set to empty fixed array.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size
+    // t4: JSObject (not tagged)
+    __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+    __ mov(t5, t4);
+    __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+    __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+    __ Addu(t5, t5, Operand(3*kPointerSize));
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+    // Fill all the in-object properties with appropriate filler.
+    // a1: constructor function
+    // a2: initial map
+    // a3: object size (in words)
+    // t4: JSObject (not tagged)
+    // t5: First in-object property of JSObject (not tagged)
+    __ sll(t0, a3, kPointerSizeLog2);
+    __ addu(t6, t4, t0);   // End of object.
+    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+    { Label loop, entry;
       if (count_constructions) {
-        Label allocate;
-        // Decrease generous allocation count.
-        __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-        MemOperand constructor_count =
-           FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
-        __ lbu(t0, constructor_count);
-        __ Subu(t0, t0, Operand(1));
-        __ sb(t0, constructor_count);
-        __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
-        __ Push(a1, a2);
-
-        __ push(a1);  // Constructor.
-        // The call will replace the stub, so the countdown is only done once.
-        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-        __ pop(a2);
-        __ pop(a1);
-
-        __ bind(&allocate);
-      }
-
-      // Now allocate the JSObject on the heap.
-      // a1: constructor function
-      // a2: initial map
-      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-      __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
-
-      // Allocated the JSObject, now initialize the fields. Map is set to
-      // initial map and properties and elements are set to empty fixed array.
-      // a1: constructor function
-      // a2: initial map
-      // a3: object size
-      // t4: JSObject (not tagged)
-      __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
-      __ mov(t5, t4);
-      __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
-      __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
-      __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
-      __ Addu(t5, t5, Operand(3*kPointerSize));
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
-      // Fill all the in-object properties with appropriate filler.
-      // a1: constructor function
-      // a2: initial map
-      // a3: object size (in words)
-      // t4: JSObject (not tagged)
-      // t5: First in-object property of JSObject (not tagged)
-      __ sll(t0, a3, kPointerSizeLog2);
-      __ addu(t6, t4, t0);   // End of object.
-      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-      __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-      if (count_constructions) {
-        __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
-        __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
-                kBitsPerByte);
-        __ sll(t0, a0, kPointerSizeLog2);
-        __ addu(a0, t5, t0);
-        // a0: offset of first field after pre-allocated fields
-        if (FLAG_debug_code) {
-          __ Assert(le, "Unexpected number of pre-allocated property fields.",
-              a0, Operand(t6));
-        }
-        __ InitializeFieldsWithFiller(t5, a0, t7);
         // To allow for truncation.
         __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+      } else {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
       }
-      __ InitializeFieldsWithFiller(t5, t6, t7);
-
-      // Add the object tag to make the JSObject real, so that we can continue
-      // and jump into the continuation code at any time from now on. Any
-      // failures need to undo the allocation, so that the heap is in a
-      // consistent state and verifiable.
-      __ Addu(t4, t4, Operand(kHeapObjectTag));
-
-      // Check if a non-empty properties array is needed. Continue with
-      // allocated object if not fall through to runtime call if it is.
-      // a1: constructor function
-      // t4: JSObject
-      // t5: start of next object (not tagged)
-      __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-      // The field instance sizes contains both pre-allocated property fields
-      // and in-object properties.
-      __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
-      __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
-             kBitsPerByte);
-      __ Addu(a3, a3, Operand(t6));
-      __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
-              kBitsPerByte);
-      __ subu(a3, a3, t6);
-
-      // Done if no extra properties are to be allocated.
-      __ Branch(&allocated, eq, a3, Operand(zero_reg));
-      __ Assert(greater_equal, "Property allocation count failed.",
-          a3, Operand(zero_reg));
-
-      // Scale the number of elements by pointer size and add the header for
-      // FixedArrays to the start of the next object calculation from above.
-      // a1: constructor
-      // a3: number of elements in properties array
-      // t4: JSObject
-      // t5: start of next object
-      __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
-      __ AllocateInNewSpace(
-          a0,
-          t5,
-          t6,
-          a2,
-          &undo_allocation,
-          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
-      // Initialize the FixedArray.
-      // a1: constructor
-      // a3: number of elements in properties array (untagged)
-      // t4: JSObject
-      // t5: start of next object
-      __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
-      __ mov(a2, t5);
-      __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
-      __ sll(a0, a3, kSmiTagSize);
-      __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
-      __ Addu(a2, a2, Operand(2 * kPointerSize));
-
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-
-      // Initialize the fields to undefined.
-      // a1: constructor
-      // a2: First element of FixedArray (not tagged)
-      // a3: number of elements in properties array
-      // t4: JSObject
-      // t5: FixedArray (not tagged)
-      __ sll(t3, a3, kPointerSizeLog2);
-      __ addu(t6, a2, t3);  // End of object.
-      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-      { Label loop, entry;
-        if (count_constructions) {
-          __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-        } else if (FLAG_debug_code) {
-          __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
-          __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
-        }
-        __ jmp(&entry);
-        __ bind(&loop);
-        __ sw(t7, MemOperand(a2));
-        __ addiu(a2, a2, kPointerSize);
-        __ bind(&entry);
-        __ Branch(&loop, less, a2, Operand(t6));
-      }
-
-      // Store the initialized FixedArray into the properties field of
-      // the JSObject.
-      // a1: constructor function
-      // t4: JSObject
-      // t5: FixedArray (not tagged)
-      __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
-      __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
-
-      // Continue with JSObject being successfully allocated.
-      // a1: constructor function
-      // a4: JSObject
-      __ jmp(&allocated);
-
-      // Undo the setting of the new top so that the heap is verifiable. For
-      // example, the map's unused properties potentially do not match the
-      // allocated objects unused properties.
-      // t4: JSObject (previous new top)
-      __ bind(&undo_allocation);
-      __ UndoAllocationInNewSpace(t4, t5);
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(t5, 0));
+      __ addiu(t5, t5, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, Uless, t5, Operand(t6));
     }
 
-    __ bind(&rt_call);
-    // Allocate the new receiver object using the runtime call.
-    // a1: constructor function
-    __ push(a1);  // Argument for Runtime_NewObject.
-    __ CallRuntime(Runtime::kNewObject, 1);
-    __ mov(t4, v0);
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    __ Addu(t4, t4, Operand(kHeapObjectTag));
 
-    // Receiver for constructor call allocated.
+    // Check if a non-empty properties array is needed. Continue with allocated
+    // object if not fall through to runtime call if it is.
+    // a1: constructor function
     // t4: JSObject
-    __ bind(&allocated);
-    __ push(t4);
-    __ push(t4);
+    // t5: start of next object (not tagged)
+    __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+    // The field instance sizes contains both pre-allocated property fields and
+    // in-object properties.
+    __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+    __ And(t6,
+           a0,
+           Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+    __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+    __ Addu(a3, a3, Operand(t0));
+    __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+    __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+    __ subu(a3, a3, t0);
 
-    // Reload the number of arguments from the stack.
-    // sp[0]: receiver
-    // sp[1]: receiver
-    // sp[2]: constructor function
-    // sp[3]: number of arguments (smi-tagged)
-    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
-    __ lw(a3, MemOperand(sp, 3 * kPointerSize));
+    // Done if no extra properties are to be allocated.
+    __ Branch(&allocated, eq, a3, Operand(zero_reg));
+    __ Assert(greater_equal, "Property allocation count failed.",
+        a3, Operand(zero_reg));
 
-    // Set up pointer to last argument.
-    __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // a1: constructor
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: start of next object
+    __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+    __ AllocateInNewSpace(
+        a0,
+        t5,
+        t6,
+        a2,
+        &undo_allocation,
+        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
 
-    // Set up number of arguments for function call below.
-    __ srl(a0, a3, kSmiTagSize);
+    // Initialize the FixedArray.
+    // a1: constructor
+    // a3: number of elements in properties array (un-tagged)
+    // t4: JSObject
+    // t5: start of next object
+    __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+    __ mov(a2, t5);
+    __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+    __ sll(a0, a3, kSmiTagSize);
+    __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+    __ Addu(a2, a2, Operand(2 * kPointerSize));
 
-    // Copy arguments and receiver to the expression stack.
-    // a0: number of arguments
-    // a1: constructor function
-    // a2: address of last argument (caller sp)
-    // a3: number of arguments (smi-tagged)
-    // sp[0]: receiver
-    // sp[1]: receiver
-    // sp[2]: constructor function
-    // sp[3]: number of arguments (smi-tagged)
-    Label loop, entry;
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(t0, a2, Operand(t0));
-    __ lw(t1, MemOperand(t0));
-    __ push(t1);
-    __ bind(&entry);
-    __ Addu(a3, a3, Operand(-2));
-    __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
 
-    // Call the function.
-    // a0: number of arguments
-    // a1: constructor function
-    if (is_api_function) {
-      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      ParameterCount expected(0);
-      __ InvokeCode(code, expected, expected,
-                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-    } else {
-      ParameterCount actual(a0);
-      __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
+    // Initialize the fields to undefined.
+    // a1: constructor
+    // a2: First element of FixedArray (not tagged)
+    // a3: number of elements in properties array
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ sll(t3, a3, kPointerSizeLog2);
+    __ addu(t6, a2, t3);  // End of object.
+    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+    { Label loop, entry;
+      if (count_constructions) {
+        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+      } else if (FLAG_debug_code) {
+        __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+        __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+      }
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ sw(t7, MemOperand(a2));
+      __ addiu(a2, a2, kPointerSize);
+      __ bind(&entry);
+      __ Branch(&loop, less, a2, Operand(t6));
     }
 
-    // Store offset of return address for deoptimizer.
-    if (!is_api_function && !count_constructions) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
-    }
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject.
+    // a1: constructor function
+    // t4: JSObject
+    // t5: FixedArray (not tagged)
+    __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
+    __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
 
-    // Restore context from the frame.
-    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Continue with JSObject being successfully allocated.
+    // a1: constructor function
+    // a4: JSObject
+    __ jmp(&allocated);
 
-    // If the result is an object (in the ECMA sense), we should get rid
-    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-    // on page 74.
-    Label use_receiver, exit;
-
-    // If the result is a smi, it is *not* an object in the ECMA sense.
-    // v0: result
-    // sp[0]: receiver (newly allocated object)
-    // sp[1]: constructor function
-    // sp[2]: number of arguments (smi-tagged)
-    __ JumpIfSmi(v0, &use_receiver);
-
-    // If the type of the result (stored in its map) is less than
-    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-    __ GetObjectType(v0, a3, a3);
-    __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
-    // Throw away the result of the constructor invocation and use the
-    // on-stack receiver as the result.
-    __ bind(&use_receiver);
-    __ lw(v0, MemOperand(sp));
-
-    // Remove receiver from the stack, remove caller arguments, and
-    // return.
-    __ bind(&exit);
-    // v0: result
-    // sp[0]: receiver (newly allocated object)
-    // sp[1]: constructor function
-    // sp[2]: number of arguments (smi-tagged)
-    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
-
-    // Leave construct frame.
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // t4: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(t4, t5);
   }
 
+  __ bind(&rt_call);
+  // Allocate the new receiver object using the runtime call.
+  // a1: constructor function
+  __ push(a1);  // Argument for Runtime_NewObject.
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ mov(t4, v0);
+
+  // Receiver for constructor call allocated.
+  // t4: JSObject
+  __ bind(&allocated);
+  __ push(t4);
+
+  // Push the function and the allocated receiver from the stack.
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, kPointerSize));
+  __ MultiPushReversed(a1.bit() | t4.bit());
+
+  // Reload the number of arguments from the stack.
+  // a1: constructor function
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ lw(a3, MemOperand(sp, 4 * kPointerSize));
+
+  // Setup pointer to last argument.
+  __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+  // Setup number of arguments for function call below.
+  __ srl(a0, a3, kSmiTagSize);
+
+  // Copy arguments and receiver to the expression stack.
+  // a0: number of arguments
+  // a1: constructor function
+  // a2: address of last argument (caller sp)
+  // a3: number of arguments (smi-tagged)
+  // sp[0]: receiver
+  // sp[1]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  Label loop, entry;
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ lw(t1, MemOperand(t0));
+  __ push(t1);
+  __ bind(&entry);
+  __ Addu(a3, a3, Operand(-2));
+  __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+  // Call the function.
+  // a0: number of arguments
+  // a1: constructor function
+  if (is_api_function) {
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    Handle<Code> code =
+        masm->isolate()->builtins()->HandleApiCallConstruct();
+    ParameterCount expected(0);
+    __ InvokeCode(code, expected, expected,
+                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+  }
+
+  // Pop the function from the stack.
+  // v0: result
+  // sp[0]: constructor function
+  // sp[2]: receiver
+  // sp[3]: constructor function
+  // sp[4]: number of arguments (smi-tagged)
+  __ Pop();
+
+  // Restore context from the frame.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ And(t0, v0, Operand(kSmiTagMask));
+  __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  __ GetObjectType(v0, a3, a3);
+  __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ lw(v0, MemOperand(sp));
+
+  // Remove receiver from the stack, remove caller arguments, and
+  // return.
+  __ bind(&exit);
+  // v0: result
+  // sp[0]: receiver (newly allocated object)
+  // sp[1]: constructor function
+  // sp[2]: number of arguments (smi-tagged)
+  __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+  __ LeaveConstructFrame();
   __ sll(t0, a1, kPointerSizeLog2 - 1);
   __ Addu(sp, sp, t0);
   __ Addu(sp, sp, kPointerSize);
@@ -1082,7 +1022,7 @@
   // ----------- S t a t e -------------
   //  -- a0: code entry
   //  -- a1: function
-  //  -- a2: receiver_pointer
+  //  -- a2: reveiver_pointer
   //  -- a3: argc
   //  -- s0: argv
   // -----------------------------------
@@ -1091,57 +1031,59 @@
   __ mov(cp, zero_reg);
 
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Set up the context from the function argument.
-    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  // Set up the context from the function argument.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
-    // Push the function and the receiver onto the stack.
-    __ Push(a1, a2);
+  // Set up the roots register.
+  ExternalReference roots_address =
+      ExternalReference::roots_address(masm->isolate());
+  __ li(s6, Operand(roots_address));
 
-    // Copy arguments to the stack in a loop.
-    // a3: argc
-    // s0: argv, i.e. points to first arg
-    Label loop, entry;
-    __ sll(t0, a3, kPointerSizeLog2);
-    __ addu(t2, s0, t0);
-    __ b(&entry);
-    __ nop();   // Branch delay slot nop.
-    // t2 points past last arg.
-    __ bind(&loop);
-    __ lw(t0, MemOperand(s0));  // Read next parameter.
-    __ addiu(s0, s0, kPointerSize);
-    __ lw(t0, MemOperand(t0));  // Dereference handle.
-    __ push(t0);  // Push parameter.
-    __ bind(&entry);
-    __ Branch(&loop, ne, s0, Operand(t2));
+  // Push the function and the receiver onto the stack.
+  __ Push(a1, a2);
 
-    // Initialize all JavaScript callee-saved registers, since they will be seen
-    // by the garbage collector as part of handlers.
-    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
-    __ mov(s1, t0);
-    __ mov(s2, t0);
-    __ mov(s3, t0);
-    __ mov(s4, t0);
-    __ mov(s5, t0);
-    // s6 holds the root address. Do not clobber.
-    // s7 is cp. Do not init.
+  // Copy arguments to the stack in a loop.
+  // a3: argc
+  // s0: argv, ie points to first arg
+  Label loop, entry;
+  __ sll(t0, a3, kPointerSizeLog2);
+  __ addu(t2, s0, t0);
+  __ b(&entry);
+  __ nop();   // Branch delay slot nop.
+  // t2 points past last arg.
+  __ bind(&loop);
+  __ lw(t0, MemOperand(s0));  // Read next parameter.
+  __ addiu(s0, s0, kPointerSize);
+  __ lw(t0, MemOperand(t0));  // Dereference handle.
+  __ push(t0);  // Push parameter.
+  __ bind(&entry);
+  __ Branch(&loop, ne, s0, Operand(t2));
 
-    // Invoke the code and pass argc as a0.
-    __ mov(a0, a3);
-    if (is_construct) {
-      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
-      __ CallStub(&stub);
-    } else {
-      ParameterCount actual(a0);
-      __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
-    }
+  // Initialize all JavaScript callee-saved registers, since they will be seen
+  // by the garbage collector as part of handlers.
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ mov(s1, t0);
+  __ mov(s2, t0);
+  __ mov(s3, t0);
+  __ mov(s4, t0);
+  __ mov(s5, t0);
+  // s6 holds the root address. Do not clobber.
+  // s7 is cp. Do not init.
 
-    // Leave internal frame.
+  // Invoke the code and pass argc as a0.
+  __ mov(a0, a3);
+  if (is_construct) {
+    __ Call(masm->isolate()->builtins()->JSConstructCall());
+  } else {
+    ParameterCount actual(a0);
+    __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
+  __ LeaveInternalFrame();
+
   __ Jump(ra);
 }
 
@@ -1158,28 +1100,27 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Preserve the function.
-    __ push(a1);
-    // Push call kind information.
-    __ push(t1);
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
 
-    // Push the function on the stack as the argument to the runtime function.
-    __ push(a1);
-    // Call the runtime function.
-    __ CallRuntime(Runtime::kLazyCompile, 1);
-    // Calculate the entry point.
-    __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  // Call the runtime function.
+  __ CallRuntime(Runtime::kLazyCompile, 1);
+  // Calculate the entry point.
+  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
 
-    // Restore call kind information.
-    __ pop(t1);
-    // Restore saved function.
-    __ pop(a1);
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
 
-    // Tear down temporary frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
@@ -1188,120 +1129,50 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Preserve the function.
-    __ push(a1);
-    // Push call kind information.
-    __ push(t1);
+  // Preserve the function.
+  __ push(a1);
+  // Push call kind information.
+  __ push(t1);
 
-    // Push the function on the stack as the argument to the runtime function.
-    __ push(a1);
-    __ CallRuntime(Runtime::kLazyRecompile, 1);
-    // Calculate the entry point.
-    __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Push the function on the stack as the argument to the runtime function.
+  __ push(a1);
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
+  // Calculate the entry point.
+  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-    // Restore call kind information.
-    __ pop(t1);
-    // Restore saved function.
-    __ pop(a1);
+  // Restore call kind information.
+  __ pop(t1);
+  // Restore saved function.
+  __ pop(a1);
 
-    // Tear down temporary frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
 }
 
 
-static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
-                                             Deoptimizer::BailoutType type) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    // Pass the function and deoptimization type to the runtime system.
-    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
-    __ push(a0);
-    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  }
-
-  // Get the full codegen state from the stack and untag it -> t2.
-  __ lw(t2, MemOperand(sp, 0 * kPointerSize));
-  __ SmiUntag(t2);
-  // Switch on the state.
-  Label with_tos_register, unknown_state;
-  __ Branch(&with_tos_register,
-            ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
-  __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
-  __ Ret();
-
-  __ bind(&with_tos_register);
-  __ lw(v0, MemOperand(sp, 1 * kPointerSize));
-  __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
-
-  __ Addu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
-  __ Ret();
-
-  __ bind(&unknown_state);
-  __ stop("no cases left");
-}
-
-
+// These functions are called from C++ but cannot be used in live code.
 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
-  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
-  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
-  // For now, we are relying on the fact that Runtime::NotifyOSR
-  // doesn't do any garbage collection which allows us to save/restore
-  // the registers without worrying about which of them contain
-  // pointers. This seems a bit fragile.
-  RegList saved_regs =
-      (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
-  __ MultiPush(saved_regs);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kNotifyOSR, 0);
-  }
-  __ MultiPop(saved_regs);
-  __ Ret();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
-  CpuFeatures::TryForceFeatureScope scope(VFP3);
-  if (!CpuFeatures::IsSupported(FPU)) {
-    __ Abort("Unreachable code: Cannot optimize without FPU support.");
-    return;
-  }
-
-  // Lookup the function in the JavaScript frame and push it as an
-  // argument to the on-stack replacement function.
-  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(a0);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  }
-
-  // If the result was -1 it means that we couldn't optimize the
-  // function. Just return and continue in the unoptimized version.
-  __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
-
-  // Untag the AST id and push it on the stack.
-  __ SmiUntag(v0);
-  __ push(v0);
-
-  // Generate the code for doing the frame-to-frame translation using
-  // the deoptimizer infrastructure.
-  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
-  generator.Generate();
+  __ Abort("Call to unimplemented function in builtins-mips.cc");
 }
 
 
@@ -1319,19 +1190,19 @@
   // 2. Get the function to call (passed as receiver) from the stack, check
   //    if it is a function.
   // a0: actual number of arguments
-  Label slow, non_function;
+  Label non_function;
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(at, sp, at);
   __ lw(a1, MemOperand(at));
-  __ JumpIfSmi(a1, &non_function);
+  __ And(at, a1, Operand(kSmiTagMask));
+  __ Branch(&non_function, eq, at, Operand(zero_reg));
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
 
   // 3a. Patch the first argument if necessary when calling a function.
   // a0: actual number of arguments
   // a1: function
   Label shift_arguments;
-  __ li(t0, Operand(0, RelocInfo::NONE));  // Indicate regular JS_FUNCTION.
   { Label convert_to_object, use_global_receiver, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1339,13 +1210,13 @@
     // Do not transform the receiver for strict mode functions.
     __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                                  kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
 
     // Do not transform the receiver for native (Compilerhints already in a3).
-    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
+    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
 
     // Compute the receiver in non-strict mode.
     // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
@@ -1367,25 +1238,21 @@
     __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
 
     __ bind(&convert_to_object);
-    // Enter an internal frame in order to preserve argument count.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
-      __ push(a0);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+    __ push(a0);
 
-      __ push(a2);
-      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-      __ mov(a2, v0);
+    __ push(a2);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a2, v0);
 
-      __ pop(a0);
-      __ sra(a0, a0, kSmiTagSize);  // Un-tag.
-      // Leave internal frame.
-    }
-    // Restore the function to a1, and the flag to t0.
+    __ pop(a0);
+    __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+    __ LeaveInternalFrame();
+    // Restore the function to a1.
     __ sll(at, a0, kPointerSizeLog2);
     __ addu(at, sp, at);
     __ lw(a1, MemOperand(at));
-    __ li(t0, Operand(0, RelocInfo::NONE));
     __ Branch(&patch_receiver);
 
     // Use the global receiver object from the called function as the
@@ -1406,31 +1273,25 @@
     __ Branch(&shift_arguments);
   }
 
-  // 3b. Check for function proxy.
-  __ bind(&slow);
-  __ li(t0, Operand(1, RelocInfo::NONE));  // Indicate function proxy.
-  __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
-
-  __ bind(&non_function);
-  __ li(t0, Operand(2, RelocInfo::NONE));  // Indicate non-function.
-
-  // 3c. Patch the first argument when calling a non-function.  The
+  // 3b. Patch the first argument when calling a non-function.  The
   //     CALL_NON_FUNCTION builtin expects the non-function callee as
   //     receiver, so overwrite the first argument which will ultimately
   //     become the receiver.
   // a0: actual number of arguments
   // a1: function
-  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
+  __ bind(&non_function);
+  // Restore the function in case it has been modified.
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(a2, sp, at);
   __ sw(a1, MemOperand(a2, -kPointerSize));
+  // Clear a1 to indicate a non-function being called.
+  __ mov(a1, zero_reg);
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
   //    the original first argument the new receiver.
   // a0: actual number of arguments
   // a1: function
-  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
   __ bind(&shift_arguments);
   { Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
@@ -1448,26 +1309,14 @@
     __ Pop();
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
-  //     or a function proxy via CALL_FUNCTION_PROXY.
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
   // a0: actual number of arguments
   // a1: function
-  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
-  { Label function, non_proxy;
-    __ Branch(&function, eq, t0, Operand(zero_reg));
-    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
-    __ mov(a2, zero_reg);
-    __ SetCallKind(t1, CALL_AS_METHOD);
-    __ Branch(&non_proxy, ne, t0, Operand(1));
-
-    __ push(a1);  // Re-add proxy object as additional argument.
-    __ Addu(a0, a0, Operand(1));
-    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
-    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-            RelocInfo::CODE_TARGET);
-
-    __ bind(&non_proxy);
+  { Label function;
+    __ Branch(&function, ne, a1, Operand(zero_reg));
+    __ mov(a2, zero_reg);  // expected arguments is 0 for CALL_NON_FUNCTION
     __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+    __ SetCallKind(t1, CALL_AS_METHOD);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
     __ bind(&function);
@@ -1501,158 +1350,134 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
-    __ push(a0);
-    __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
-    __ push(a0);
-    // Returns (in v0) number of arguments to copy to stack as Smi.
-    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+  __ EnterInternalFrame();
 
-    // Check the stack for overflow. We are not trying to catch
-    // interruptions (e.g. debug break and preemption) here, so the "real stack
-    // limit" is checked.
-    Label okay;
-    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
-    // Make a2 the space we have left. The stack might already be overflowed
-    // here which will cause a2 to become negative.
-    __ subu(a2, sp, a2);
-    // Check if the arguments will overflow the stack.
-    __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
-    __ Branch(&okay, gt, a2, Operand(t3));  // Signed comparison.
+  __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+  __ push(a0);
+  __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+  __ push(a0);
+  // Returns (in v0) number of arguments to copy to stack as Smi.
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-    // Out of stack space.
-    __ lw(a1, MemOperand(fp, kFunctionOffset));
-    __ push(a1);
-    __ push(v0);
-    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-    // End of stack check.
+  // Check the stack for overflow. We are not trying need to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+  // Make a2 the space we have left. The stack might already be overflowed
+  // here which will cause a2 to become negative.
+  __ subu(a2, sp, a2);
+  // Check if the arguments will overflow the stack.
+  __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
+  __ Branch(&okay, gt, a2, Operand(t0));  // Signed comparison.
 
-    // Push current limit and index.
-    __ bind(&okay);
-    __ push(v0);  // Limit.
-    __ mov(a1, zero_reg);  // Initial index.
-    __ push(a1);
+  // Out of stack space.
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ push(a1);
+  __ push(v0);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  // End of stack check.
 
-    // Get the receiver.
-    __ lw(a0, MemOperand(fp, kRecvOffset));
+  // Push current limit and index.
+  __ bind(&okay);
+  __ push(v0);  // Limit.
+  __ mov(a1, zero_reg);  // Initial index.
+  __ push(a1);
 
-    // Check that the function is a JS function (otherwise it must be a proxy).
-    Label push_receiver;
-    __ lw(a1, MemOperand(fp, kFunctionOffset));
-    __ GetObjectType(a1, a2, a2);
-    __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
+  // Change context eagerly to get the right global object if necessary.
+  __ lw(a0, MemOperand(fp, kFunctionOffset));
+  __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
+  // Load the shared function info while the function is still in a0.
+  __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
 
-    // Change context eagerly to get the right global object if necessary.
-    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-    // Load the shared function info while the function is still in a1.
-    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  // Compute the receiver.
+  Label call_to_object, use_global_receiver, push_receiver;
+  __ lw(a0, MemOperand(fp, kRecvOffset));
 
-    // Compute the receiver.
-    // Do not transform the receiver for strict mode functions.
-    Label call_to_object, use_global_receiver;
-    __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                                 kSmiTagSize)));
-    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
+  // Do not transform the receiver for strict mode functions.
+  __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                               kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
 
-    // Do not transform the receiver for native (Compilerhints already in a2).
-    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
+  // Do not transform the receiver for native (Compilerhints already in a2).
+  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
 
-    // Compute the receiver in non-strict mode.
-    __ JumpIfSmi(a0, &call_to_object);
-    __ LoadRoot(a1, Heap::kNullValueRootIndex);
-    __ Branch(&use_global_receiver, eq, a0, Operand(a1));
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+  // Compute the receiver in non-strict mode.
+  __ And(t0, a0, Operand(kSmiTagMask));
+  __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  __ Branch(&use_global_receiver, eq, a0, Operand(a2));
 
-    // Check if the receiver is already a JavaScript object.
-    // a0: receiver
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ GetObjectType(a0, a1, a1);
-    __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+  // Check if the receiver is already a JavaScript object.
+  // a0: receiver
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-    // Convert the receiver to a regular object.
-    // a0: receiver
-    __ bind(&call_to_object);
-    __ push(a0);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
-    __ Branch(&push_receiver);
+  // Convert the receiver to a regular object.
+  // a0: receiver
+  __ bind(&call_to_object);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+  __ Branch(&push_receiver);
 
-    // Use the current global receiver object as the receiver.
-    __ bind(&use_global_receiver);
-    const int kGlobalOffset =
-        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-    __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
-    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
-    __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
-    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+  __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
 
-    // Push the receiver.
-    // a0: receiver
-    __ bind(&push_receiver);
-    __ push(a0);
+  // Push the receiver.
+  // a0: receiver
+  __ bind(&push_receiver);
+  __ push(a0);
 
-    // Copy all arguments from the array to the stack.
-    Label entry, loop;
-    __ lw(a0, MemOperand(fp, kIndexOffset));
-    __ Branch(&entry);
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Branch(&entry);
 
-    // Load the current argument from the arguments array and push it to the
-    // stack.
-    // a0: current argument index
-    __ bind(&loop);
-    __ lw(a1, MemOperand(fp, kArgsOffset));
-    __ push(a1);
-    __ push(a0);
+  // Load the current argument from the arguments array and push it to the
+  // stack.
+  // a0: current argument index
+  __ bind(&loop);
+  __ lw(a1, MemOperand(fp, kArgsOffset));
+  __ push(a1);
+  __ push(a0);
 
-    // Call the runtime to access the property in the arguments array.
-    __ CallRuntime(Runtime::kGetProperty, 2);
-    __ push(v0);
+  // Call the runtime to access the property in the arguments array.
+  __ CallRuntime(Runtime::kGetProperty, 2);
+  __ push(v0);
 
-    // Use inline caching to access the arguments.
-    __ lw(a0, MemOperand(fp, kIndexOffset));
-    __ Addu(a0, a0, Operand(1 << kSmiTagSize));
-    __ sw(a0, MemOperand(fp, kIndexOffset));
+  // Use inline caching to access the arguments.
+  __ lw(a0, MemOperand(fp, kIndexOffset));
+  __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+  __ sw(a0, MemOperand(fp, kIndexOffset));
 
-    // Test if the copy loop has finished copying all the elements from the
-    // arguments object.
-    __ bind(&entry);
-    __ lw(a1, MemOperand(fp, kLimitOffset));
-    __ Branch(&loop, ne, a0, Operand(a1));
+  // Test if the copy loop has finished copying all the elements from the
+  // arguments object.
+  __ bind(&entry);
+  __ lw(a1, MemOperand(fp, kLimitOffset));
+  __ Branch(&loop, ne, a0, Operand(a1));
+  // Invoke the function.
+  ParameterCount actual(a0);
+  __ sra(a0, a0, kSmiTagSize);
+  __ lw(a1, MemOperand(fp, kFunctionOffset));
+  __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
-    // Invoke the function.
-    Label call_proxy;
-    ParameterCount actual(a0);
-    __ sra(a0, a0, kSmiTagSize);
-    __ lw(a1, MemOperand(fp, kFunctionOffset));
-    __ GetObjectType(a1, a2, a2);
-    __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
-
-    __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-
-    frame_scope.GenerateLeaveFrame();
-    __ Ret(USE_DELAY_SLOT);
-    __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
-
-    // Invoke the function proxy.
-    __ bind(&call_proxy);
-    __ push(a1);  // Add function proxy as last argument.
-    __ Addu(a0, a0, Operand(1));
-    __ li(a2, Operand(0, RelocInfo::NONE));
-    __ SetCallKind(t1, CALL_AS_METHOD);
-    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
-    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-            RelocInfo::CODE_TARGET);
-    // Tear down the internal frame and remove function, receiver and args.
-  }
-
-  __ Ret(USE_DELAY_SLOT);
-  __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
+  // Tear down the internal frame and remove function, receiver and args.
+  __ LeaveInternalFrame();
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
 }
 
 
@@ -1735,6 +1560,8 @@
     __ bind(&too_few);
     EnterArgumentsAdaptorFrame(masm);
 
+    // TODO(MIPS): Optimize these loops.
+
     // Calculate copy start address into a0 and copy end address is fp.
     // a0: actual number of arguments as a smi
     // a1: function
@@ -1756,10 +1583,9 @@
     Label copy;
     __ bind(&copy);
     __ lw(t0, MemOperand(a0));  // Adjusted above for return addr and receiver.
-    __ Subu(sp, sp, kPointerSize);
+    __ push(t0);
     __ Subu(a0, a0, kPointerSize);
-    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
-    __ sw(t0, MemOperand(sp));  // In the delay slot.
+    __ Branch(&copy, ne, a0, Operand(t3));
 
     // Fill the remaining expected arguments with undefined.
     // a1: function
@@ -1772,9 +1598,8 @@
 
     Label fill;
     __ bind(&fill);
-    __ Subu(sp, sp, kPointerSize);
-    __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
-    __ sw(t0, MemOperand(sp));
+    __ push(t0);
+    __ Branch(&fill, ne, sp, Operand(a2));
   }
 
   // Call the entry point.
@@ -1782,9 +1607,6 @@
 
   __ Call(a3);
 
-  // Store offset of return address for deoptimizer.
-  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
   // Exit frame and return.
   LeaveArgumentsAdaptorFrame(masm);
   __ Ret();
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 1b3242c..c3c3874 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -70,13 +70,13 @@
   // The ToNumber stub takes one argument in a0.
   Label check_heap_number, call_builtin;
   __ JumpIfNotSmi(a0, &check_heap_number);
-  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
+  __ Ret();
 
   __ bind(&check_heap_number);
   EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
-  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
+  __ Ret();
 
   __ bind(&call_builtin);
   __ push(a0);
@@ -100,9 +100,9 @@
                         &gc,
                         TAG_OBJECT);
 
-  int map_index = (language_mode_ == CLASSIC_MODE)
-      ? Context::FUNCTION_MAP_INDEX
-      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+  int map_index = strict_mode_ == kStrictMode
+      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+      : Context::FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -128,9 +128,9 @@
   // found in the shared function info object.
   __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
   __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
 
   // Return result. The argument function info has been popped already.
-  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
   __ Ret();
 
   // Create a new closure through the slower runtime call.
@@ -157,19 +157,21 @@
   // Load the function from the stack.
   __ lw(a3, MemOperand(sp, 0));
 
-  // Set up the object header.
-  __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
+  // Setup the object header.
+  __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
+  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ li(a2, Operand(Smi::FromInt(length)));
   __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-  __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
 
-  // Set up the fixed slots, copy the global object from the previous context.
-  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Setup the fixed slots.
   __ li(a1, Operand(Smi::FromInt(0)));
   __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
   __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-  __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+  // Copy the global object from the previous context.
+  __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
 
   // Initialize the rest of the slots to undefined.
   __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
@@ -179,7 +181,8 @@
 
   // Remove the on-stack argument and return.
   __ mov(cp, v0);
-  __ DropAndRet(1);
+  __ Pop();
+  __ Ret();
 
   // Need to collect. Call into runtime system.
   __ bind(&gc);
@@ -187,123 +190,16 @@
 }
 
 
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [sp]: function.
-  // [sp + kPointerSize]: serialized scope info
-
-  // Try to allocate the context in new space.
-  Label gc;
-  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-  __ AllocateInNewSpace(FixedArray::SizeFor(length),
-                        v0, a1, a2, &gc, TAG_OBJECT);
-
-  // Load the function from the stack.
-  __ lw(a3, MemOperand(sp, 0));
-
-  // Load the serialized scope info from the stack.
-  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
-
-  // Set up the object header.
-  __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
-  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
-  __ li(a2, Operand(Smi::FromInt(length)));
-  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-
-  // If this block context is nested in the global context we get a smi
-  // sentinel instead of a function. The block context should get the
-  // canonical empty function of the global context as its closure which
-  // we still have to look up.
-  Label after_sentinel;
-  __ JumpIfNotSmi(a3, &after_sentinel);
-  if (FLAG_debug_code) {
-    const char* message = "Expected 0 as a Smi sentinel";
-    __ Assert(eq, message, a3, Operand(zero_reg));
-  }
-  __ lw(a3, GlobalObjectOperand());
-  __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
-  __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
-  __ bind(&after_sentinel);
-
-  // Set up the fixed slots, copy the global object from the previous context.
-  __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
-  __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
-  __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
-  __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
-
-  // Initialize the rest of the slots to the hole value.
-  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
-  for (int i = 0; i < slots_; i++) {
-    __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
-  }
-
-  // Remove the on-stack argument and return.
-  __ mov(cp, v0);
-  __ DropAndRet(2);
-
-  // Need to collect. Call into runtime system.
-  __ bind(&gc);
-  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
-    MacroAssembler* masm,
-    int length,
-    FastCloneShallowArrayStub::Mode mode,
-    Label* fail) {
-  // Registers on entry:
-  // a3: boilerplate literal array.
-  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = 0;
-  if (length > 0) {
-    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        ? FixedDoubleArray::SizeFor(length)
-        : FixedArray::SizeFor(length);
-  }
-  int size = JSArray::kSize + elements_size;
-
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size,
-                        v0,
-                        a1,
-                        a2,
-                        fail,
-                        TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length == 0)) {
-      __ lw(a1, FieldMemOperand(a3, i));
-      __ sw(a1, FieldMemOperand(v0, i));
-    }
-  }
-
-  if (length > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
-    __ Addu(a2, v0, Operand(JSArray::kSize));
-    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
-    // Copy the elements array.
-    ASSERT((elements_size % kPointerSize) == 0);
-    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
-  }
-}
-
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
-  //
   // [sp]: constant elements.
   // [sp + kPointerSize]: literal index.
   // [sp + (2 * kPointerSize)]: literals array.
 
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
   // Load boilerplate object into r3 and check if we need to create a
   // boilerplate.
   Label slow_case;
@@ -316,42 +212,14 @@
   __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
   __ Branch(&slow_case, eq, a3, Operand(t1));
 
-  FastCloneShallowArrayStub::Mode mode = mode_;
-  if (mode == CLONE_ANY_ELEMENTS) {
-    Label double_elements, check_fast_elements;
-    __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
-    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
-    __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
-    __ Branch(&check_fast_elements, ne, v0, Operand(t1));
-    GenerateFastCloneShallowArrayCommon(masm, 0,
-                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
-    // Return and remove the on-stack parameters.
-    __ DropAndRet(3);
-
-    __ bind(&check_fast_elements);
-    __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
-    __ Branch(&double_elements, ne, v0, Operand(t1));
-    GenerateFastCloneShallowArrayCommon(masm, length_,
-                                        CLONE_ELEMENTS, &slow_case);
-    // Return and remove the on-stack parameters.
-    __ DropAndRet(3);
-
-    __ bind(&double_elements);
-    mode = CLONE_DOUBLE_ELEMENTS;
-    // Fall through to generate the code to handle double elements.
-  }
-
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode == CLONE_ELEMENTS) {
+    if (mode_ == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
-    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
-      message = "Expected (writable) fixed double array";
-      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
@@ -363,56 +231,41 @@
     __ pop(a3);
   }
 
-  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  // Return new object in v0.
+  __ AllocateInNewSpace(size,
+                        v0,
+                        a1,
+                        a2,
+                        &slow_case,
+                        TAG_OBJECT);
 
-  // Return and remove the on-stack parameters.
-  __ DropAndRet(3);
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ lw(a1, FieldMemOperand(a3, i));
+      __ sw(a1, FieldMemOperand(v0, i));
+    }
+  }
 
-  __ bind(&slow_case);
-  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+    __ Addu(a2, v0, Operand(JSArray::kSize));
+    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
 
-
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [sp]: object literal flags.
-  // [sp + kPointerSize]: constant properties.
-  // [sp + (2 * kPointerSize)]: literal index.
-  // [sp + (3 * kPointerSize)]: literals array.
-
-  // Load boilerplate object into a3 and check if we need to create a
-  // boilerplate.
-  Label slow_case;
-  __ lw(a3, MemOperand(sp, 3 * kPointerSize));
-  __ lw(a0, MemOperand(sp, 2 * kPointerSize));
-  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(a3, t0, a3);
-  __ lw(a3, MemOperand(a3));
-  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
-  __ Branch(&slow_case, eq, a3, Operand(t0));
-
-  // Check that the boilerplate contains only fast properties and we can
-  // statically determine the instance size.
-  int size = JSObject::kHeaderSize + length_ * kPointerSize;
-  __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
-  __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
-
-  // Allocate the JS object and copy header together with all in-object
-  // properties from the boilerplate.
-  __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ lw(a1, FieldMemOperand(a3, i));
-    __ sw(a1, FieldMemOperand(v0, i));
+    // Copy the elements array.
+    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
   }
 
   // Return and remove the on-stack parameters.
-  __ DropAndRet(4);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
 
   __ bind(&slow_case);
-  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
+  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
 }
 
 
@@ -473,7 +326,7 @@
   __ And(exponent, source_, Operand(HeapNumber::kSignMask));
   // Subtract from 0 if source was negative.
   __ subu(at, zero_reg, source_);
-  __ Movn(source_, at, exponent);
+  __ movn(source_, at, exponent);
 
   // We have -1, 0 or 1, which we treat specially. Register source_ contains
   // absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -485,15 +338,15 @@
       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
   // Safe to use 'at' as dest reg here.
   __ Or(at, exponent, Operand(exponent_word_for_1));
-  __ Movn(exponent, at, source_);  // Write exp when source not 0.
+  __ movn(exponent, at, source_);  // Write exp when source not 0.
   // 1, 0 and -1 all have 0 for the second word.
-  __ Ret(USE_DELAY_SLOT);
   __ mov(mantissa, zero_reg);
+  __ Ret();
 
   __ bind(&not_special);
   // Count leading zeros.
   // Gets the wrong answer for 0, but we already checked for that case above.
-  __ Clz(zeros_, source_);
+  __ clz(zeros_, source_);
   // Compute exponent and or it into the exponent register.
   // We use mantissa as a scratch register here.
   __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
@@ -509,9 +362,9 @@
   __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
   // And the top (top 20 bits).
   __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
-
-  __ Ret(USE_DELAY_SLOT);
   __ or_(exponent, exponent, source_);
+
+  __ Ret();
 }
 
 
@@ -583,9 +436,7 @@
 
   Label is_smi, done;
 
-  // Smi-check
-  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
-  // Heap number check
+  __ JumpIfSmi(object, &is_smi);
   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
 
   // Handle loading a double from a heap number.
@@ -612,6 +463,7 @@
   if (CpuFeatures::IsSupported(FPU)) {
     CpuFeatures::Scope scope(FPU);
     // Convert smi to double using FPU instructions.
+    __ SmiUntag(scratch1, object);
     __ mtc1(scratch1, dst);
     __ cvt_d_w(dst, dst);
     if (destination == kCoreRegisters) {
@@ -646,10 +498,11 @@
                            Heap::kHeapNumberMapRootIndex,
                            "HeapNumberMap register clobbered.");
   }
+  Label is_smi;
   Label done;
   Label not_in_int32_range;
 
-  __ UntagAndJumpIfSmi(dst, object, &done);
+  __ JumpIfSmi(object, &is_smi);
   __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
   __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
   __ ConvertToInt32(object,
@@ -669,6 +522,10 @@
                                  scratch2,
                                  scratch3);
 
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ SmiUntag(dst, object);
   __ bind(&done);
 }
 
@@ -713,10 +570,10 @@
     __ Subu(int_scratch, zero_reg, int_scratch);
     __ bind(&skip_sub);
 
-    // Get mantissa[51:20].
+    // Get mantisssa[51:20].
 
     // Get the position of the first set bit.
-    __ Clz(dst1, int_scratch);
+    __ clz(dst1, int_scratch);
     __ li(scratch2, 31);
     __ Subu(dst1, scratch2, dst1);
 
@@ -758,7 +615,7 @@
 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
                                                   Register object,
                                                   Destination destination,
-                                                  DoubleRegister double_dst,
+                                                  FPURegister double_dst,
                                                   Register dst1,
                                                   Register dst2,
                                                   Register heap_number_map,
@@ -794,16 +651,25 @@
     // Load the double value.
     __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    Register except_flag = scratch2;
-    __ EmitFPUTruncate(kRoundToZero,
-                       single_scratch,
-                       double_dst,
-                       scratch1,
-                       except_flag,
-                       kCheckForInexactConversion);
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(single_scratch, double_dst);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion or exception.
+    __ And(scratch2, scratch2, kFCSRFlagMask);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
 
     if (destination == kCoreRegisters) {
       __ Move(dst1, dst2, double_dst);
@@ -840,7 +706,7 @@
                                             Register scratch1,
                                             Register scratch2,
                                             Register scratch3,
-                                            DoubleRegister double_scratch,
+                                            FPURegister double_scratch,
                                             Label* not_int32) {
   ASSERT(!dst.is(object));
   ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -850,7 +716,10 @@
 
   Label done;
 
-  __ UntagAndJumpIfSmi(dst, object, &done);
+  // Untag the object into the destination register.
+  __ SmiUntag(dst, object);
+  // Just return if the object is a smi.
+  __ JumpIfSmi(object, &done);
 
   if (FLAG_debug_code) {
     __ AbortIfNotRootValue(heap_number_map,
@@ -866,19 +735,27 @@
     // Load the double value.
     __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    FPURegister single_scratch = double_scratch.low();
-    Register except_flag = scratch2;
-    __ EmitFPUTruncate(kRoundToZero,
-                       single_scratch,
-                       double_scratch,
-                       scratch1,
-                       except_flag,
-                       kCheckForInexactConversion);
+    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+    // On MIPS a lot of things cannot be implemented the same way so right
+    // now it makes a lot more sense to just do things manually.
+
+    // Save FCSR.
+    __ cfc1(scratch1, FCSR);
+    // Disable FPU exceptions.
+    __ ctc1(zero_reg, FCSR);
+    __ trunc_w_d(double_scratch, double_scratch);
+    // Retrieve FCSR.
+    __ cfc1(scratch2, FCSR);
+    // Restore FCSR.
+    __ ctc1(scratch1, FCSR);
+
+    // Check for inexact conversion or exception.
+    __ And(scratch2, scratch2, kFCSRFlagMask);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
     // Get the result in the destination register.
-    __ mfc1(dst, single_scratch);
+    __ mfc1(dst, double_scratch);
 
   } else {
     // Load the double value in the destination registers.
@@ -955,7 +832,7 @@
   // non zero bits left. So we need the (30 - exponent) last bits of the
   // 31 higher bits of the mantissa to be null.
   // Because bits [21:0] are null, we can check instead that the
-  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
+  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
 
   // Get the 32 higher bits of the mantissa in dst.
   __ Ext(dst,
@@ -1004,11 +881,9 @@
     __ Move(f12, a0, a1);
     __ Move(f14, a2, a3);
   }
-  {
-    AllowExternalCallThatCantCauseGC scope(masm);
-    __ CallCFunction(
-        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
-  }
+  // Call C routine that may not cause GC or other trouble.
+  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
+                   4);
   // Store answer in the overwritable heap number.
   if (!IsMipsSoftFloatABI) {
     CpuFeatures::Scope scope(FPU);
@@ -1020,38 +895,9 @@
     __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
   }
   // Place heap_number_result in v0 and return to the pushed return address.
-  __ pop(ra);
-  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, heap_number_result);
-}
-
-
-bool WriteInt32ToHeapNumberStub::IsPregenerated() {
-  // These variants are compiled ahead of time.  See next method.
-  if (the_int_.is(a1) &&
-      the_heap_number_.is(v0) &&
-      scratch_.is(a2) &&
-      sign_.is(a3)) {
-    return true;
-  }
-  if (the_int_.is(a2) &&
-      the_heap_number_.is(v0) &&
-      scratch_.is(a3) &&
-      sign_.is(a0)) {
-    return true;
-  }
-  // Other register combinations are generated as and when they are needed,
-  // so it is unsafe to call them from stubs (we can't generate a stub while
-  // we are generating a stub).
-  return false;
-}
-
-
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
-  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
-  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
-  stub1.GetCode()->set_is_pregenerated(true);
-  stub2.GetCode()->set_is_pregenerated(true);
+  __ pop(ra);
+  __ Ret();
 }
 
 
@@ -1074,7 +920,7 @@
   __ or_(scratch_, scratch_, sign_);
   // Subtract from 0 if the value was negative.
   __ subu(at, zero_reg, the_int_);
-  __ Movn(the_int_, at, sign_);
+  __ movn(the_int_, at, sign_);
   // We should be masking the implict first digit of the mantissa away here,
   // but it just ends up combining harmlessly with the last digit of the
   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
@@ -1158,7 +1004,6 @@
   }
 
   __ bind(&return_equal);
-
   if (cc == less) {
     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
   } else if (cc == greater) {
@@ -1223,15 +1068,16 @@
          (lhs.is(a1) && rhs.is(a0)));
 
   Label lhs_is_smi;
-  __ JumpIfSmi(lhs, &lhs_is_smi);
+  __ And(t0, lhs, Operand(kSmiTagMask));
+  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
   // Rhs is a Smi.
   // Check whether the non-smi is a heap number.
   __ GetObjectType(lhs, t4, t4);
   if (strict) {
     // If lhs was not a number and rhs was a Smi then strict equality cannot
     // succeed. Return non-equal (lhs is already not zero).
-    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
     __ mov(v0, lhs);
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
   } else {
     // Smi compared non-strictly with a non-Smi non-heap-number. Call
     // the runtime.
@@ -1269,8 +1115,8 @@
   if (strict) {
     // If lhs was not a number and rhs was a Smi then strict equality cannot
     // succeed. Return non-equal.
-    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
     __ li(v0, Operand(1));
+    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
   } else {
     // Smi compared non-strictly with a non-Smi non-heap-number. Call
     // the runtime.
@@ -1350,13 +1196,12 @@
   __ bind(&one_is_nan);
   // NaN comparisons always fail.
   // Load whatever we need in v0 to make the comparison fail.
-
   if (cc == lt || cc == le) {
     __ li(v0, Operand(GREATER));
   } else {
     __ li(v0, Operand(LESS));
   }
-  __ Ret();
+  __ Ret();  // Return.
 
   __ bind(&neither_is_nan);
 }
@@ -1405,7 +1250,6 @@
     __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
 
     __ bind(&return_result_equal);
-
     __ li(v0, Operand(EQUAL));
     __ Ret();
   }
@@ -1414,7 +1258,7 @@
 
   if (!CpuFeatures::IsSupported(FPU)) {
     __ push(ra);
-    __ PrepareCallCFunction(0, 2, t4);
+    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
     if (!IsMipsSoftFloatABI) {
       // We are not using MIPS FPU instructions, and parameters for the runtime
       // function call are prepaired in a0-a3 registers, but function we are
@@ -1424,20 +1268,21 @@
       __ Move(f12, a0, a1);
       __ Move(f14, a2, a3);
     }
-
-    AllowExternalCallThatCantCauseGC scope(masm);
-    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
-       0, 2);
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
     __ pop(ra);  // Because this function returns int, result is in v0.
     __ Ret();
   } else {
     CpuFeatures::Scope scope(FPU);
     Label equal, less_than;
-    __ BranchF(&equal, NULL, eq, f12, f14);
-    __ BranchF(&less_than, NULL, lt, f12, f14);
+    __ c(EQ, D, f12, f14);
+    __ bc1t(&equal);
+    __ nop();
+
+    __ c(OLT, D, f12, f14);
+    __ bc1t(&less_than);
+    __ nop();
 
     // Not equal, not less, not NaN, must be greater.
-
     __ li(v0, Operand(GREATER));
     __ Ret();
 
@@ -1458,7 +1303,7 @@
     // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into a2 and compare it with
     // FIRST_SPEC_OBJECT_TYPE.
@@ -1468,8 +1313,8 @@
     // Return non-zero.
     Label return_not_equal;
     __ bind(&return_not_equal);
-    __ Ret(USE_DELAY_SLOT);
     __ li(v0, Operand(1));
+    __ Ret();
 
     __ bind(&first_non_object);
     // Check for oddballs: true, false, null, undefined.
@@ -1548,8 +1393,8 @@
 
   // Both are symbols. We already checked they weren't the same pointer
   // so they are not equal.
-  __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(1));   // Non-zero indicates not equal.
+  __ Ret();
 
   __ bind(&object_test);
   __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
@@ -1564,8 +1409,8 @@
   __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
   __ and_(a0, a2, a3);
   __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
-  __ Ret(USE_DELAY_SLOT);
-  __ xori(v0, a0, 1 << Map::kIsUndetectable);
+  __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Ret();
 }
 
 
@@ -1628,7 +1473,9 @@
       __ JumpIfSmi(probe, not_found);
       __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
       __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
-      __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+      __ c(EQ, D, f12, f14);
+      __ bc1t(&load_result_from_cache);
+      __ nop();   // bc1t() requires explicit fill of branch delay slot.
       __ Branch(not_found);
     } else {
       // Note that there is no cache check for non-FPU case, even though
@@ -1672,7 +1519,8 @@
 
   // Generate code to lookup number in the number string cache.
   GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
-  __ DropAndRet(1);
+  __ Addu(sp, sp, Operand(1 * kPointerSize));
+  __ Ret();
 
   __ bind(&runtime);
   // Handle number to string in the runtime system if not found in the cache.
@@ -1694,8 +1542,8 @@
     __ JumpIfNotSmi(a2, &not_two_smis);
     __ sra(a1, a1, 1);
     __ sra(a0, a0, 1);
-    __ Ret(USE_DELAY_SLOT);
-    __ subu(v0, a1, a0);
+    __ Subu(v0, a1, a0);
+    __ Ret();
     __ bind(&not_two_smis);
   } else if (FLAG_debug_code) {
     __ Or(a2, a1, a0);
@@ -1743,20 +1591,22 @@
     __ li(t2, Operand(EQUAL));
 
     // Check if either rhs or lhs is NaN.
-    __ BranchF(NULL, &nan, eq, f12, f14);
+    __ c(UN, D, f12, f14);
+    __ bc1t(&nan);
+    __ nop();
 
     // Check if LESS condition is satisfied. If true, move conditionally
     // result to v0.
     __ c(OLT, D, f12, f14);
-    __ Movt(v0, t0);
+    __ movt(v0, t0);
     // Use previous check to store conditionally to v0 oposite condition
     // (GREATER). If rhs is equal to lhs, this will be corrected in next
     // check.
-    __ Movf(v0, t1);
+    __ movf(v0, t1);
     // Check if EQUAL condition is satisfied. If true, move conditionally
     // result to v0.
     __ c(EQ, D, f12, f14);
-    __ Movt(v0, t2);
+    __ movt(v0, t2);
 
     __ Ret();
 
@@ -1861,144 +1711,88 @@
 }
 
 
-// The stub expects its argument in the tos_ register and returns its result in
-// it, too: zero for false, and a non-zero value for true.
+// The stub returns zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses FPU instructions.
   CpuFeatures::Scope scope(FPU);
 
-  Label patch;
-  const Register map = t5.is(tos_) ? t3 : t5;
+  Label false_result;
+  Label not_heap_number;
+  Register scratch0 = t5.is(tos_) ? t3 : t5;
 
-  // undefined -> false.
-  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
+  // undefined -> false
+  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
 
-  // Boolean -> its value.
-  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
-  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
+  // Boolean -> its value
+  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the equal condition is satisfied.
+  __ Ret(eq, tos_, Operand(scratch0));
 
-  // 'null' -> false.
-  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
+  // Smis: 0 -> false, all other -> true
+  __ And(scratch0, tos_, tos_);
+  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
+  __ And(scratch0, tos_, Operand(kSmiTagMask));
+  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
+  // return true if the not equal condition is satisfied.
+  __ Ret(eq, scratch0, Operand(zero_reg));
 
-  if (types_.Contains(SMI)) {
-    // Smis: 0 -> false, all other -> true
-    __ And(at, tos_, kSmiTagMask);
-    // tos_ contains the correct return value already
-    __ Ret(eq, at, Operand(zero_reg));
-  } else if (types_.NeedsMap()) {
-    // If we need a map later and have a Smi -> patch.
-    __ JumpIfSmi(tos_, &patch);
-  }
+  // 'null' -> false
+  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, tos_, Operand(scratch0));
 
-  if (types_.NeedsMap()) {
-    __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  // HeapNumber => false if +0, -0, or NaN.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
 
-    if (types_.CanBeUndetectable()) {
-      __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
-      __ And(at, at, Operand(1 << Map::kIsUndetectable));
-      // Undetectable -> false.
-      __ Movn(tos_, zero_reg, at);
-      __ Ret(ne, at, Operand(zero_reg));
-    }
-  }
+  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+  __ fcmp(f12, 0.0, UEQ);
 
-  if (types_.Contains(SPEC_OBJECT)) {
-    // Spec object -> true.
-    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
-    // tos_ contains the correct non-zero return value already.
-    __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
-  }
+  // "tos_" is a register, and contains a non zero value by default.
+  // Hence we only need to overwrite "tos_" with zero to return false for
+  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+  __ movt(tos_, zero_reg);
+  __ Ret();
 
-  if (types_.Contains(STRING)) {
-    // String value -> false iff empty.
-    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
-    Label skip;
-    __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
-    __ Ret(USE_DELAY_SLOT);  // the string length is OK as the return value
-    __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
-    __ bind(&skip);
-  }
+  __ bind(&not_heap_number);
 
-  if (types_.Contains(HEAP_NUMBER)) {
-    // Heap number -> false iff +0, -0, or NaN.
-    Label not_heap_number;
-    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-    __ Branch(&not_heap_number, ne, map, Operand(at));
-    Label zero_or_nan, number;
-    __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
-    __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
-    // "tos_" is a register, and contains a non zero value by default.
-    // Hence we only need to overwrite "tos_" with zero to return false for
-    // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
-    __ bind(&zero_or_nan);
-    __ mov(tos_, zero_reg);
-    __ bind(&number);
-    __ Ret();
-    __ bind(&not_heap_number);
-  }
+  // It can be an undetectable object.
+  // Undetectable => false.
+  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
+  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
+  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
 
-  __ bind(&patch);
-  GenerateTypeTransition(masm);
-}
+  // JavaScript object => true.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
 
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-void ToBooleanStub::CheckOddball(MacroAssembler* masm,
-                                 Type type,
-                                 Heap::RootListIndex value,
-                                 bool result) {
-  if (types_.Contains(type)) {
-    // If we see an expected oddball, return its ToBoolean value tos_.
-    __ LoadRoot(at, value);
-    __ Subu(at, at, tos_);  // This is a check for equality for the movz below.
-    // The value of a root is never NULL, so we can avoid loading a non-null
-    // value into tos_ when we want to return 'true'.
-    if (!result) {
-      __ Movz(tos_, zero_reg, at);
-    }
-    __ Ret(eq, at, Operand(zero_reg));
-  }
-}
+  // Check for string.
+  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+  // "tos_" is a register and contains a non-zero value.
+  // Hence we implicitly return true if the greater than
+  // condition is satisfied.
+  __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
 
+  // String value => false iff empty, i.e., length is zero.
+  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+  // If length is zero, "tos_" contains zero ==> false.
+  // If length is not zero, "tos_" contains a non-zero value ==> true.
+  __ Ret();
 
-void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
-  __ Move(a3, tos_);
-  __ li(a2, Operand(Smi::FromInt(tos_.code())));
-  __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
-  __ Push(a3, a2, a1);
-  // Patch the caller to an appropriate specialized stub and return the
-  // operation result to the caller of the stub.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
-      3,
-      1);
-}
-
-
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
-  // We don't allow a GC during a store buffer overflow so there is no need to
-  // store the registers in any particular way, but we do have to store and
-  // restore them.
-  __ MultiPush(kJSCallerSaved | ra.bit());
-  if (save_doubles_ == kSaveFPRegs) {
-    CpuFeatures::Scope scope(FPU);
-    __ MultiPushFPU(kCallerSavedFPU);
-  }
-  const int argument_count = 1;
-  const int fp_argument_count = 0;
-  const Register scratch = a1;
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
-  __ li(a0, Operand(ExternalReference::isolate_address()));
-  __ CallCFunction(
-      ExternalReference::store_buffer_overflow_function(masm->isolate()),
-      argument_count);
-  if (save_doubles_ == kSaveFPRegs) {
-    CpuFeatures::Scope scope(FPU);
-    __ MultiPopFPU(kCallerSavedFPU);
-  }
-
-  __ MultiPop(kJSCallerSaved | ra.bit());
+  // Return 0 in "tos_" for false.
+  __ bind(&false_result);
+  __ mov(tos_, zero_reg);
   __ Ret();
 }
 
@@ -2090,8 +1884,8 @@
   __ Branch(slow, eq, t0, Operand(zero_reg));
 
   // Return '0 - value'.
-  __ Ret(USE_DELAY_SLOT);
-  __ subu(v0, zero_reg, a0);
+  __ Subu(v0, zero_reg, a0);
+  __ Ret();
 }
 
 
@@ -2157,13 +1951,12 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(a0);
-      __ CallRuntime(Runtime::kNumberAlloc, 0);
-      __ mov(a1, v0);
-      __ pop(a0);
-    }
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(a1, v0);
+    __ pop(a0);
+    __ LeaveInternalFrame();
 
     __ bind(&heapnumber_allocated);
     __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
@@ -2205,14 +1998,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(v0);  // Push the heap number, not the untagged int32.
-      __ CallRuntime(Runtime::kNumberAlloc, 0);
-      __ mov(a2, v0);  // Move the new heap number into a2.
-      // Get the heap number into v0, now that the new heap number is in a2.
-      __ pop(v0);
-    }
+    __ EnterInternalFrame();
+    __ push(v0);  // Push the heap number, not the untagged int32.
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ mov(a2, v0);  // Move the new heap number into a2.
+    // Get the heap number into v0, now that the new heap number is in a2.
+    __ pop(v0);
+    __ LeaveInternalFrame();
 
     // Convert the heap number in v0 to an untagged integer in a1.
     // This can't go slow-case because it's the same number we already
@@ -2323,9 +2115,6 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
-  // Explicitly allow generation of nested stubs. It is safe here because
-  // generation code does not use any raw pointers.
-  AllowStubCallsScope allow_stub_calls(masm, true);
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -2421,8 +2210,8 @@
       // Negating it results in 'lt'.
       __ Branch(&skip, lt, scratch2, Operand(zero_reg));
       ASSERT(Smi::FromInt(0) == 0);
-      __ Ret(USE_DELAY_SLOT);
-      __ mov(v0, zero_reg);  // Return smi 0 if the non-zero one was positive.
+      __ mov(v0, zero_reg);
+      __ Ret();  // Return smi 0 if the non-zero one was positive.
       __ bind(&skip);
       // We fall through here if we multiplied a negative number with 0, because
       // that would mean we should produce -0.
@@ -2477,23 +2266,23 @@
       }
       break;
     case Token::BIT_OR:
-      __ Ret(USE_DELAY_SLOT);
-      __ or_(v0, left, right);
+      __ Or(v0, left, Operand(right));
+      __ Ret();
       break;
     case Token::BIT_AND:
-      __ Ret(USE_DELAY_SLOT);
-      __ and_(v0, left, right);
+      __ And(v0, left, Operand(right));
+      __ Ret();
       break;
     case Token::BIT_XOR:
-      __ Ret(USE_DELAY_SLOT);
-      __ xor_(v0, left, right);
+      __ Xor(v0, left, Operand(right));
+      __ Ret();
       break;
     case Token::SAR:
       // Remove tags from right operand.
       __ GetLeastBitsFromSmi(scratch1, right, 5);
       __ srav(scratch1, left, scratch1);
       // Smi tag result.
-      __ And(v0, scratch1, ~kSmiTagMask);
+      __ And(v0, scratch1, Operand(~kSmiTagMask));
       __ Ret();
       break;
     case Token::SHR:
@@ -2605,8 +2394,8 @@
         // kValueOffset. On MIPS this workaround is built into sdc1 so
         // there's no point in generating even more instructions.
         __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
-        __ Ret(USE_DELAY_SLOT);
         __ mov(v0, result);
+        __ Ret();
       } else {
         // Call the C function to handle the double operation.
         FloatingPointHelper::CallCCodeForDoubleOperation(masm,
@@ -2928,16 +2717,26 @@
           // Otherwise return a heap number if allowed, or jump to type
           // transition.
 
-          Register except_flag = scratch2;
-          __ EmitFPUTruncate(kRoundToZero,
-                             single_scratch,
-                             f10,
-                             scratch1,
-                             except_flag);
+          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
+          // On MIPS a lot of things cannot be implemented the same way so right
+          // now it makes a lot more sense to just do things manually.
+
+          // Save FCSR.
+          __ cfc1(scratch1, FCSR);
+          // Disable FPU exceptions.
+          __ ctc1(zero_reg, FCSR);
+          __ trunc_w_d(single_scratch, f10);
+          // Retrieve FCSR.
+          __ cfc1(scratch2, FCSR);
+          // Restore FCSR.
+          __ ctc1(scratch1, FCSR);
+
+          // Check for inexact conversion or exception.
+          __ And(scratch2, scratch2, kFCSRFlagMask);
 
           if (result_type_ <= BinaryOpIC::INT32) {
-            // If except_flag != 0, result does not fit in a 32-bit integer.
-            __ Branch(&transition, ne, except_flag, Operand(zero_reg));
+            // If scratch2 != 0, result does not fit in a 32-bit integer.
+            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
           }
 
           // Check if the result fits in a smi.
@@ -3130,9 +2929,9 @@
         __ Ret();
       } else {
         // Tail call that writes the int32 in a2 to the heap number in v0, using
-        // a3 and a0 as scratch. v0 is preserved and returned.
+        // a3 and a1 as scratch. v0 is preserved and returned.
         __ mov(a0, t1);
-        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
+        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
         __ TailCallStub(&stub);
       }
 
@@ -3426,12 +3225,10 @@
     __ lw(t0, MemOperand(cache_entry, 0));
     __ lw(t1, MemOperand(cache_entry, 4));
     __ lw(t2, MemOperand(cache_entry, 8));
+    __ Addu(cache_entry, cache_entry, 12);
     __ Branch(&calculate, ne, a2, Operand(t0));
     __ Branch(&calculate, ne, a3, Operand(t1));
     // Cache hit. Load result, cleanup and return.
-    Counters* counters = masm->isolate()->counters();
-    __ IncrementCounter(
-        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
     if (tagged) {
       // Pop input value from stack and load result into v0.
       __ Drop(1);
@@ -3444,9 +3241,6 @@
   }  // if (CpuFeatures::IsSupported(FPU))
 
   __ bind(&calculate);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(
-      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
   if (tagged) {
     __ bind(&invalid_cache);
     __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
@@ -3465,13 +3259,13 @@
     // Register a0 holds precalculated cache entry address; preserve
     // it on the stack and pop it into register cache_entry after the
     // call.
-    __ Push(cache_entry, a2, a3);
+    __ push(cache_entry);
     GenerateCallCFunction(masm, scratch0);
     __ GetCFunctionDoubleResult(f4);
 
     // Try to update the cache. If we cannot allocate a
     // heap number, we return the result without updating.
-    __ Pop(cache_entry, a2, a3);
+    __ pop(cache_entry);
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
     __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -3480,8 +3274,8 @@
     __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
     __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
 
-    __ Ret(USE_DELAY_SLOT);
     __ mov(v0, cache_entry);
+    __ Ret();
 
     __ bind(&invalid_cache);
     // The cache is invalid. Call runtime which will recreate the
@@ -3489,11 +3283,10 @@
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
     __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(a0);
-      __ CallRuntime(RuntimeFunction(), 1);
-    }
+    __ EnterInternalFrame();
+    __ push(a0);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
     __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3506,15 +3299,14 @@
 
     // We return the value in f4 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
+    __ EnterInternalFrame();
 
-      // Allocate an aligned object larger than a HeapNumber.
-      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-      __ li(scratch0, Operand(4 * kPointerSize));
-      __ push(scratch0);
-      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    }
+    // Allocate an aligned object larger than a HeapNumber.
+    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+    __ li(scratch0, Operand(4 * kPointerSize));
+    __ push(scratch0);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
     __ Ret();
   }
 }
@@ -3525,31 +3317,22 @@
   __ push(ra);
   __ PrepareCallCFunction(2, scratch);
   if (IsMipsSoftFloatABI) {
-    __ Move(a0, a1, f4);
+    __ Move(v0, v1, f4);
   } else {
     __ mov_d(f12, f4);
   }
-  AllowExternalCallThatCantCauseGC scope(masm);
-  Isolate* isolate = masm->isolate();
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(
-          ExternalReference::math_sin_double_function(isolate),
-          0, 1);
+          ExternalReference::math_sin_double_function(masm->isolate()), 2);
       break;
     case TranscendentalCache::COS:
       __ CallCFunction(
-          ExternalReference::math_cos_double_function(isolate),
-          0, 1);
-      break;
-    case TranscendentalCache::TAN:
-      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
-          0, 1);
+          ExternalReference::math_cos_double_function(masm->isolate()), 2);
       break;
     case TranscendentalCache::LOG:
       __ CallCFunction(
-          ExternalReference::math_log_double_function(isolate),
-          0, 1);
+          ExternalReference::math_log_double_function(masm->isolate()), 2);
       break;
     default:
       UNIMPLEMENTED();
@@ -3564,7 +3347,6 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -3578,224 +3360,106 @@
 }
 
 
-void InterruptStub::Generate(MacroAssembler* masm) {
-  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
 void MathPowStub::Generate(MacroAssembler* masm) {
-  CpuFeatures::Scope fpu_scope(FPU);
-  const Register base = a1;
-  const Register exponent = a2;
-  const Register heapnumbermap = t1;
-  const Register heapnumber = v0;
-  const DoubleRegister double_base = f2;
-  const DoubleRegister double_exponent = f4;
-  const DoubleRegister double_result = f0;
-  const DoubleRegister double_scratch = f6;
-  const FPURegister single_scratch = f8;
-  const Register scratch = t5;
-  const Register scratch2 = t3;
+  Label call_runtime;
 
-  Label call_runtime, done, int_exponent;
-  if (exponent_type_ == ON_STACK) {
-    Label base_is_smi, unpack_exponent;
-    // The exponent and base are supplied as arguments on the stack.
-    // This can only happen if the stub is called from non-optimized code.
-    // Load input parameters from stack to double registers.
+  if (CpuFeatures::IsSupported(FPU)) {
+    CpuFeatures::Scope scope(FPU);
+
+    Label base_not_smi;
+    Label exponent_not_smi;
+    Label convert_exponent;
+
+    const Register base = a0;
+    const Register exponent = a2;
+    const Register heapnumbermap = t1;
+    const Register heapnumber = s0;  // Callee-saved register.
+    const Register scratch = t2;
+    const Register scratch2 = t3;
+
+    // Alocate FP values in the ABI-parameter-passing regs.
+    const DoubleRegister double_base = f12;
+    const DoubleRegister double_exponent = f14;
+    const DoubleRegister double_result = f0;
+    const DoubleRegister double_scratch = f2;
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
     __ lw(base, MemOperand(sp, 1 * kPointerSize));
     __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
 
-    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+    // Convert base to double value and store it in f0.
+    __ JumpIfNotSmi(base, &base_not_smi);
+    // Base is a Smi. Untag and convert it.
+    __ SmiUntag(base);
+    __ mtc1(base, double_scratch);
+    __ cvt_d_w(double_base, double_scratch);
+    __ Branch(&convert_exponent);
 
-    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+    __ bind(&base_not_smi);
     __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-
+    // Base is a heapnumber. Load it into double register.
     __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
-    __ jmp(&unpack_exponent);
 
-    __ bind(&base_is_smi);
-    __ mtc1(scratch, single_scratch);
-    __ cvt_d_w(double_base, single_scratch);
-    __ bind(&unpack_exponent);
+    __ bind(&convert_exponent);
+    __ JumpIfNotSmi(exponent, &exponent_not_smi);
+    __ SmiUntag(exponent);
 
-    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
-    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
-    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
-    __ ldc1(double_exponent,
-            FieldMemOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
-    // Base is already in double_base.
-    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
-
-    __ ldc1(double_exponent,
-            FieldMemOperand(exponent, HeapNumber::kValueOffset));
-  }
-
-  if (exponent_type_ != INTEGER) {
-    Label int_exponent_convert;
-    // Detect integer exponents stored as double.
-    __ EmitFPUTruncate(kRoundToMinusInf,
-                       single_scratch,
-                       double_exponent,
-                       scratch,
-                       scratch2,
-                       kCheckForInexactConversion);
-    // scratch2 == 0 means there was no conversion error.
-    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
-
-    if (exponent_type_ == ON_STACK) {
-      // Detect square root case.  Crankshaft detects constant +/-0.5 at
-      // compile time and uses DoMathPowHalf instead.  We then skip this check
-      // for non-constant cases of +/-0.5 as these hardly occur.
-      Label not_plus_half;
-
-      // Test for 0.5.
-      __ Move(double_scratch, 0.5);
-      __ BranchF(USE_DELAY_SLOT,
-                 &not_plus_half,
-                 NULL,
-                 ne,
-                 double_exponent,
-                 double_scratch);
-      // double_scratch can be overwritten in the delay slot.
-      // Calculates square root of base.  Check for the special case of
-      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
-      __ Move(double_scratch, -V8_INFINITY);
-      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
-      __ neg_d(double_result, double_scratch);
-
-      // Add +0 to convert -0 to +0.
-      __ add_d(double_scratch, double_base, kDoubleRegZero);
-      __ sqrt_d(double_result, double_scratch);
-      __ jmp(&done);
-
-      __ bind(&not_plus_half);
-      __ Move(double_scratch, -0.5);
-      __ BranchF(USE_DELAY_SLOT,
-                 &call_runtime,
-                 NULL,
-                 ne,
-                 double_exponent,
-                 double_scratch);
-      // double_scratch can be overwritten in the delay slot.
-      // Calculates square root of base.  Check for the special case of
-      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
-      __ Move(double_scratch, -V8_INFINITY);
-      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
-      __ Move(double_result, kDoubleRegZero);
-
-      // Add +0 to convert -0 to +0.
-      __ add_d(double_scratch, double_base, kDoubleRegZero);
-      __ Move(double_result, 1);
-      __ sqrt_d(double_scratch, double_scratch);
-      __ div_d(double_result, double_result, double_scratch);
-      __ jmp(&done);
-    }
-
+    // The base is in a double register and the exponent is
+    // an untagged smi. Allocate a heap number and call a
+    // C function for integer exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
     __ push(ra);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ PrepareCallCFunction(0, 2, scratch);
-      __ SetCallCDoubleArguments(double_base, double_exponent);
-      __ CallCFunction(
-          ExternalReference::power_double_double_function(masm->isolate()),
-          0, 2);
-    }
+    __ PrepareCallCFunction(3, scratch);
+    __ SetCallCDoubleArguments(double_base, exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(masm->isolate()), 3);
     __ pop(ra);
     __ GetCFunctionDoubleResult(double_result);
-    __ jmp(&done);
-
-    __ bind(&int_exponent_convert);
-    __ mfc1(scratch, single_scratch);
-  }
-
-  // Calculate power with integer exponent.
-  __ bind(&int_exponent);
-
-  // Get two copies of exponent in the registers scratch and exponent.
-  if (exponent_type_ == INTEGER) {
-    __ mov(scratch, exponent);
-  } else {
-    // Exponent has previously been stored into scratch as untagged integer.
-    __ mov(exponent, scratch);
-  }
-
-  __ mov_d(double_scratch, double_base);  // Back up base.
-  __ Move(double_result, 1.0);
-
-  // Get absolute value of exponent.
-  Label positive_exponent;
-  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
-  __ Subu(scratch, zero_reg, scratch);
-  __ bind(&positive_exponent);
-
-  Label while_true, no_carry, loop_end;
-  __ bind(&while_true);
-
-  __ And(scratch2, scratch, 1);
-
-  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
-  __ mul_d(double_result, double_result, double_scratch);
-  __ bind(&no_carry);
-
-  __ sra(scratch, scratch, 1);
-
-  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
-  __ mul_d(double_scratch, double_scratch, double_scratch);
-
-  __ Branch(&while_true);
-
-  __ bind(&loop_end);
-
-  __ Branch(&done, ge, exponent, Operand(zero_reg));
-  __ Move(double_scratch, 1.0);
-  __ div_d(double_result, double_scratch, double_result);
-  // Test whether result is zero.  Bail out to check for subnormal result.
-  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
-  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
-
-  // double_exponent may not contain the exponent value if the input was a
-  // smi.  We set it with exponent value before bailing out.
-  __ mtc1(exponent, single_scratch);
-  __ cvt_d_w(double_exponent, single_scratch);
-
-  // Returning or bailing out.
-  Counters* counters = masm->isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
-    // The arguments are still on the stack.
-    __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
-
-    // The stub is called from non-optimized code, which expects the result
-    // as heap number in exponent.
-    __ bind(&done);
-    __ AllocateHeapNumber(
-        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    ASSERT(heapnumber.is(v0));
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
-    __ DropAndRet(2);
-  } else {
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
+
+    __ bind(&exponent_not_smi);
+    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    // Exponent is a heapnumber. Load it into double register.
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
+
+    // The base and the exponent are in double registers.
+    // Allocate a heap number and call a C function for
+    // double exponents. The register containing
+    // the heap number is callee-saved.
+    __ AllocateHeapNumber(heapnumber,
+                          scratch,
+                          scratch2,
+                          heapnumbermap,
+                          &call_runtime);
     __ push(ra);
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ PrepareCallCFunction(0, 2, scratch);
-      __ SetCallCDoubleArguments(double_base, double_exponent);
-      __ CallCFunction(
-          ExternalReference::power_double_double_function(masm->isolate()),
-          0, 2);
-    }
+    __ PrepareCallCFunction(4, scratch);
+    // ABI (o32) for func(double a, double b): a in f12, b in f14.
+    ASSERT(double_base.is(f12));
+    ASSERT(double_exponent.is(f14));
+    __ SetCallCDoubleArguments(double_base, double_exponent);
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(masm->isolate()), 4);
     __ pop(ra);
     __ GetCFunctionDoubleResult(double_result);
-
-    __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
-    __ Ret();
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    __ mov(v0, heapnumber);
+    __ DropAndRet(2 * kPointerSize);
   }
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 }
 
 
@@ -3804,34 +3468,14 @@
 }
 
 
-bool CEntryStub::IsPregenerated() {
-  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
-          result_size_ == 1;
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  __ Throw(v0);
 }
 
 
-void CodeStub::GenerateStubsAheadOfTime() {
-  CEntryStub::GenerateAheadOfTime();
-  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
-  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
-  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  Handle<Code> code = save_doubles.GetCode();
-  code->set_is_pregenerated(true);
-  StoreBufferOverflowStub stub(kSaveFPRegs);
-  stub.GetCode()->set_is_pregenerated(true);
-  code->GetIsolate()->set_fp_stubs_generated(true);
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
-  CEntryStub stub(1, kDontSaveFPRegs);
-  Handle<Code> code = stub.GetCode();
-  code->set_is_pregenerated(true);
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  __ ThrowUncatchable(type, v0);
 }
 
 
@@ -3846,17 +3490,16 @@
   // s1: pointer to the first argument          (C callee-saved)
   // s2: pointer to builtin function            (C callee-saved)
 
-  Isolate* isolate = masm->isolate();
-
   if (do_gc) {
     // Move result passed in v0 into a0 to call PerformGC.
     __ mov(a0, v0);
-    __ PrepareCallCFunction(1, 0, a1);
-    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+    __ PrepareCallCFunction(1, a1);
+    __ CallCFunction(
+        ExternalReference::perform_gc_function(masm->isolate()), 1);
   }
 
   ExternalReference scope_depth =
-      ExternalReference::heap_always_allocate_scope_depth(isolate);
+      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
   if (always_allocate) {
     __ li(a0, Operand(scope_depth));
     __ lw(a1, MemOperand(a0));
@@ -3864,10 +3507,9 @@
     __ sw(a1, MemOperand(a0));
   }
 
-  // Prepare arguments for C routine.
-  // a0 = argc
+  // Prepare arguments for C routine: a0 = argc, a1 = argv
   __ mov(a0, s0);
-  // a1 = argv (set in the delay slot after find_ra below).
+  __ mov(a1, s1);
 
   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
   // also need to reserve the 4 argument slots on the stack.
@@ -3887,28 +3529,30 @@
     // coverage code can interfere with the proper calculation of ra.
     Label find_ra;
     masm->bal(&find_ra);  // bal exposes branch delay slot.
-    masm->mov(a1, s1);
+    masm->nop();  // Branch delay slot nop.
     masm->bind(&find_ra);
 
     // Adjust the value in ra to point to the correct return location, 2nd
     // instruction past the real call into C code (the jalr(t9)), and push it.
     // This is the return address of the exit frame.
-    const int kNumInstructionsToJump = 5;
+    const int kNumInstructionsToJump = 6;
     masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
     masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
-    // Stack space reservation moved to the branch delay slot below.
+    masm->Subu(sp, sp, kCArgsSlotsSize);
     // Stack is still aligned.
 
     // Call the C routine.
     masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
     masm->jalr(t9);
-    // Set up sp in the delay slot.
-    masm->addiu(sp, sp, -kCArgsSlotsSize);
+    masm->nop();    // Branch delay slot nop.
     // Make sure the stored 'ra' points to this position.
     ASSERT_EQ(kNumInstructionsToJump,
               masm->InstructionsGeneratedSince(&find_ra));
   }
 
+  // Restore stack (remove arg slots).
+  __ Addu(sp, sp, kCArgsSlotsSize);
+
   if (always_allocate) {
     // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
     __ li(a2, Operand(scope_depth));
@@ -3922,16 +3566,14 @@
   STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
   __ addiu(a2, v0, 1);
   __ andi(t0, a2, kFailureTagMask);
-  __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
-  // Restore stack (remove arg slots) in branch delay slot.
-  __ addiu(sp, sp, kCArgsSlotsSize);
-
+  __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
 
   // Exit C frame and return.
   // v0:v1: result
   // sp: stack pointer
   // fp: frame pointer
-  __ LeaveExitFrame(save_doubles_, s0, true);
+  __ LeaveExitFrame(save_doubles_, s0);
+  __ Ret();
 
   // Check if we should retry or throw exception.
   Label retry;
@@ -3942,25 +3584,22 @@
 
   // Special handling of out of memory exceptions.
   Failure* out_of_memory = Failure::OutOfMemoryException();
-  __ Branch(USE_DELAY_SLOT,
-            throw_out_of_memory_exception,
-            eq,
-            v0,
-            Operand(reinterpret_cast<int32_t>(out_of_memory)));
-  // If we throw the OOM exception, the value of a3 doesn't matter.
-  // Any instruction can be in the delay slot that's not a jump.
+  __ Branch(throw_out_of_memory_exception, eq,
+            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
 
   // Retrieve the pending exception and clear the variable.
-  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a3, MemOperand(t0));
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      isolate)));
+                                      masm->isolate())));
   __ lw(v0, MemOperand(t0));
   __ sw(a3, MemOperand(t0));
 
   // Special handling of termination exceptions which are uncatchable
   // by javascript code.
-  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
-  __ Branch(throw_termination_exception, eq, v0, Operand(t0));
+  __ Branch(throw_termination_exception, eq,
+            v0, Operand(masm->isolate()->factory()->termination_exception()));
 
   // Handle normal exception.
   __ jmp(throw_normal_exception);
@@ -3972,9 +3611,8 @@
 
 void CEntryStub::Generate(MacroAssembler* masm) {
   // Called from JavaScript; parameters are on stack as if calling JS function
-  // s0: number of arguments including receiver
-  // s1: size of arguments excluding receiver
-  // s2: pointer to builtin function
+  // a0: number of arguments including receiver
+  // a1: pointer to builtin function
   // fp: frame pointer    (restored after C call)
   // sp: stack pointer    (restored as callee's sp after C call)
   // cp: current context  (C callee-saved)
@@ -3984,18 +3622,18 @@
   // this by performing a garbage collection and retrying the
   // builtin once.
 
-  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
-  // The reason for this is that these arguments would need to be saved anyway
-  // so it's faster to set them up directly.
-  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
   // Compute the argv pointer in a callee-saved register.
+  __ sll(s1, a0, kPointerSizeLog2);
   __ Addu(s1, sp, s1);
+  __ Subu(s1, s1, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
+  // Setup argc and the builtin function in callee-saved registers.
+  __ mov(s0, a0);
+  __ mov(s2, a1);
+
   // s0: number of arguments (C callee-saved)
   // s1: pointer to first argument (C callee-saved)
   // s2: pointer to builtin function (C callee-saved)
@@ -4031,38 +3669,23 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  // Set external caught exception to false.
-  Isolate* isolate = masm->isolate();
-  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
-                                    isolate);
-  __ li(a0, Operand(false, RelocInfo::NONE));
-  __ li(a2, Operand(external_caught));
-  __ sw(a0, MemOperand(a2));
-
-  // Set pending exception and v0 to out of memory exception.
-  Failure* out_of_memory = Failure::OutOfMemoryException();
-  __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      isolate)));
-  __ sw(v0, MemOperand(a2));
-  // Fall through to the next label.
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
 
   __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(v0);
+  GenerateThrowUncatchable(masm, TERMINATION);
 
   __ bind(&throw_normal_exception);
-  __ Throw(v0);
+  GenerateThrowTOS(masm);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, handler_entry, exit;
-  Isolate* isolate = masm->isolate();
+  Label invoke, exit;
 
   // Registers:
   // a0: entry address
   // a1: function
-  // a2: receiver
+  // a2: reveiver
   // a3: argc
   //
   // Stack:
@@ -4076,18 +3699,14 @@
     CpuFeatures::Scope scope(FPU);
     // Save callee-saved FPU registers.
     __ MultiPushFPU(kCalleeSavedFPU);
-    // Set up the reserved register for 0.0.
-    __ Move(kDoubleRegZero, 0.0);
   }
 
-
   // Load argv in s0 register.
   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   if (CpuFeatures::IsSupported(FPU)) {
     offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
   }
 
-  __ InitializeRootRegister();
   __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
 
   // We build an EntryFrame.
@@ -4096,16 +3715,16 @@
   __ li(t2, Operand(Smi::FromInt(marker)));
   __ li(t1, Operand(Smi::FromInt(marker)));
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
-                                      isolate)));
+                                      masm->isolate())));
   __ lw(t0, MemOperand(t0));
   __ Push(t3, t2, t1, t0);
-  // Set up frame pointer for the frame to be pushed.
+  // Setup frame pointer for the frame to be pushed.
   __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
 
   // Registers:
   // a0: entry_address
   // a1: function
-  // a2: receiver_pointer
+  // a2: reveiver_pointer
   // a3: argc
   // s0: argv
   //
@@ -4120,7 +3739,8 @@
 
   // If this is the outermost JS call, set js_entry_sp value.
   Label non_outermost_js;
-  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
+                                masm->isolate());
   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   __ lw(t2, MemOperand(t1));
   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
@@ -4134,35 +3754,35 @@
   __ bind(&cont);
   __ push(t0);
 
-  // Jump to a faked try block that does the invoke, with a faked catch
-  // block that sets the pending exception.
-  __ jmp(&invoke);
-  __ bind(&handler_entry);
-  handler_offset_ = handler_entry.pos();
-  // Caught exception: Store result (exception) in the pending exception
-  // field in the JSEnv and return a failure sentinel.  Coming in here the
-  // fp will be invalid because the PushTryHandler below sets it to 0 to
-  // signal the existence of the JSEntry frame.
+  // Call a faked try-block that does the invoke.
+  __ bal(&invoke);  // bal exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
+  // Coming in here the fp will be invalid because the PushTryHandler below
+  // sets it to 0 to signal the existence of the JSEntry frame.
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      isolate)));
+                                      masm->isolate())));
   __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
   __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
   __ b(&exit);  // b exposes branch delay slot.
   __ nop();   // Branch delay slot nop.
 
-  // Invoke: Link this frame into the handler chain.  There's only one
-  // handler block in this code object, so its index is 0.
+  // Invoke: Link this frame into the handler chain.
   __ bind(&invoke);
-  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
   // If an exception not caught by another handler occurs, this handler
   // returns control to the code after the bal(&invoke) above, which
   // restores all kCalleeSaved registers (including cp and fp) to their
   // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
-  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
+  __ li(t0,
+        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(t1, MemOperand(t0));
   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      isolate)));
+                                      masm->isolate())));
   __ sw(t1, MemOperand(t0));
 
   // Invoke the function by calling through JS entry trampoline builtin.
@@ -4172,7 +3792,7 @@
   // Registers:
   // a0: entry_address
   // a1: function
-  // a2: receiver_pointer
+  // a2: reveiver_pointer
   // a3: argc
   // s0: argv
   //
@@ -4185,7 +3805,7 @@
 
   if (is_construct) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
-                                      isolate);
+                                      masm->isolate());
     __ li(t0, Operand(construct_entry));
   } else {
     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
@@ -4204,9 +3824,7 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(t1);
-  __ Branch(&non_outermost_js_2,
-            ne,
-            t1,
+  __ Branch(&non_outermost_js_2, ne, t1,
             Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   __ sw(zero_reg, MemOperand(t1));
@@ -4215,7 +3833,7 @@
   // Restore the top frame descriptors from the stack.
   __ pop(t1);
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
-                                      isolate)));
+                                      masm->isolate())));
   __ sw(t1, MemOperand(t0));
 
   // Reset the stack to the callee saved registers.
@@ -4239,10 +3857,11 @@
 // * object: a0 or at sp + 1 * kPointerSize.
 // * function: a1 or at sp.
 //
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register t0.
+// Inlined call site patching is a crankshaft-specific feature that is not
+// implemented on MIPS.
 void InstanceofStub::Generate(MacroAssembler* masm) {
+  // This is a crankshaft-specific feature that has not been implemented yet.
+  ASSERT(!HasCallSiteInlineCheck());
   // Call site inlining and patching implies arguments in registers.
   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
   // ReturnTrueFalse is only implemented for inlined call sites.
@@ -4256,8 +3875,6 @@
   const Register inline_site = t5;
   const Register scratch = a2;
 
-  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
-
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
   if (!HasArgsInRegisters()) {
@@ -4273,10 +3890,10 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     Label miss;
-    __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
-    __ Branch(&miss, ne, function, Operand(at));
-    __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
-    __ Branch(&miss, ne, map, Operand(at));
+    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(t1));
+    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(t1));
     __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -4284,7 +3901,7 @@
   }
 
   // Get the prototype of the function.
-  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(prototype, &slow);
@@ -4296,16 +3913,7 @@
     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
-    ASSERT(HasArgsInRegisters());
-    // Patch the (relocated) inlined map check.
-
-    // The offset was stored in t0 safepoint slot.
-    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
-    __ LoadFromSafepointRegisterSlot(scratch, t0);
-    __ Subu(inline_site, ra, scratch);
-    // Get the map location in scratch and patch it.
-    __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
-    __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+    UNIMPLEMENTED_MIPS();
   }
 
   // Register mapping: a3 is object map and t0 is function prototype.
@@ -4331,16 +3939,7 @@
     __ mov(v0, zero_reg);
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    // Patch the call site to return true.
-    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
-    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
-    // Get the boolean result location in scratch and patch it.
-    __ PatchRelocatedValue(inline_site, scratch, v0);
-
-    if (!ReturnTrueFalseObject()) {
-      ASSERT_EQ(Smi::FromInt(0), 0);
-      __ mov(v0, zero_reg);
-    }
+    UNIMPLEMENTED_MIPS();
   }
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -4349,17 +3948,8 @@
     __ li(v0, Operand(Smi::FromInt(1)));
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    // Patch the call site to return false.
-    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
-    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
-    // Get the boolean result location in scratch and patch it.
-    __ PatchRelocatedValue(inline_site, scratch, v0);
-
-    if (!ReturnTrueFalseObject()) {
-      __ li(v0, Operand(Smi::FromInt(1)));
-    }
+    UNIMPLEMENTED_MIPS();
   }
-
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
   Label object_not_null, object_not_null_or_smi;
@@ -4371,10 +3961,8 @@
   __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
 
   // Null is not instance of anything.
-  __ Branch(&object_not_null,
-            ne,
-            scratch,
-            Operand(masm->isolate()->factory()->null_value()));
+  __ Branch(&object_not_null, ne, scratch,
+      Operand(masm->isolate()->factory()->null_value()));
   __ li(v0, Operand(Smi::FromInt(1)));
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -4398,11 +3986,10 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(a0, a1);
-      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    }
+    __ EnterInternalFrame();
+    __ Push(a0, a1);
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    __ LeaveInternalFrame();
     __ mov(a0, v0);
     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
@@ -4479,10 +4066,8 @@
   Label runtime;
   __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
-  __ Branch(&runtime,
-            ne,
-            a2,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Branch(&runtime, ne,
+            a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Patch the arguments.length and the parameters pointer in the current frame.
   __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -4514,9 +4099,7 @@
   Label adaptor_frame, try_allocate;
   __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
-  __ Branch(&adaptor_frame,
-            eq,
-            a2,
+  __ Branch(&adaptor_frame, eq, a2,
             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // No adaptor, parameter count = argument count.
@@ -4595,7 +4178,7 @@
     __ sw(a3, FieldMemOperand(v0, i));
   }
 
-  // Set up the callee in-object property.
+  // Setup the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ lw(a3, MemOperand(sp, 2 * kPointerSize));
   const int kCalleeOffset = JSObject::kHeaderSize +
@@ -4608,7 +4191,7 @@
       Heap::kArgumentsLengthIndex * kPointerSize;
   __ sw(a2, FieldMemOperand(v0, kLengthOffset));
 
-  // Set up the elements pointer in the allocated arguments object.
+  // Setup the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, t0 will point there, otherwise
   // it will point to the backing store.
   __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
@@ -4706,10 +4289,11 @@
   __ Branch(&arguments_loop, lt, t5, Operand(a2));
 
   // Return and remove the on-stack parameters.
-  __ DropAndRet(3);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
 
   // Do the runtime call to allocate the arguments object.
-  // a2 = argument count (tagged)
+  // a2 = argument count (taggged)
   __ bind(&runtime);
   __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
@@ -4784,7 +4368,7 @@
   // Get the parameters pointer from the stack.
   __ lw(a2, MemOperand(sp, 1 * kPointerSize));
 
-  // Set up the elements pointer in the allocated arguments object and
+  // Setup the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
   __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
@@ -4796,7 +4380,7 @@
 
   // Copy the fixed array slots.
   Label loop;
-  // Set up t0 to point to the first array slot.
+  // Setup t0 to point to the first array slot.
   __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ bind(&loop);
   // Pre-decrement a2 with kPointerSize on each iteration.
@@ -4811,7 +4395,8 @@
 
   // Return and remove the on-stack parameters.
   __ bind(&done);
-  __ DropAndRet(3);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
@@ -4826,6 +4411,10 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
 
   // Stack frame on entry.
   //  sp[0]: last_match_info (expected JSArray)
@@ -4838,8 +4427,6 @@
   static const int kSubjectOffset = 2 * kPointerSize;
   static const int kJSRegExpOffset = 3 * kPointerSize;
 
-  Isolate* isolate = masm->isolate();
-
   Label runtime, invoke_regexp;
 
   // Allocation of registers for this function. These are in callee save
@@ -4855,9 +4442,9 @@
   // Ensure that a RegExp stack is allocated.
   ExternalReference address_of_regexp_stack_memory_address =
       ExternalReference::address_of_regexp_stack_memory_address(
-          isolate);
+          masm->isolate());
   ExternalReference address_of_regexp_stack_memory_size =
-      ExternalReference::address_of_regexp_stack_memory_size(isolate);
+      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
   __ li(a0, Operand(address_of_regexp_stack_memory_size));
   __ lw(a0, MemOperand(a0, 0));
   __ Branch(&runtime, eq, a0, Operand(zero_reg));
@@ -4921,7 +4508,8 @@
   // Check that the third argument is a positive smi less than the subject
   // string length. A negative value will be greater (unsigned comparison).
   __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
-  __ JumpIfNotSmi(a0, &runtime);
+  __ And(at, a0, Operand(kSmiTagMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
   __ Branch(&runtime, ls, a3, Operand(a0));
 
   // a2: Number of capture registers
@@ -4937,7 +4525,7 @@
          FieldMemOperand(a0, JSArray::kElementsOffset));
   __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
   __ Branch(&runtime, ne, a0, Operand(
-      isolate->factory()->fixed_array_map()));
+      masm->isolate()->factory()->fixed_array_map()));
   // Check that the last match info has space for the capture registers and the
   // additional information.
   __ lw(a0,
@@ -4954,38 +4542,25 @@
   Label seq_string;
   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  // First check for flat string.  None of the following string type tests will
-  // succeed if subject is not a string or a short external string.
-  __ And(a1,
-         a0,
-         Operand(kIsNotStringMask |
-                 kStringRepresentationMask |
-                 kShortExternalStringMask));
+  // First check for flat string.
+  __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ Branch(&seq_string, eq, a1, Operand(zero_reg));
 
   // subject: Subject string
   // a0: instance type if Subject string
   // regexp_data: RegExp data (FixedArray)
-  // a1: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, external_string, check_encoding;
+  Label cons_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
-  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
-  __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
-
-  // Catch non-string subject or short external string.
-  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
-  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
-  __ Branch(&runtime, ne, at, Operand(zero_reg));
+  __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
 
   // String is sliced.
   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
@@ -5005,7 +4580,7 @@
   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSeqStringTag == 0);
   __ And(at, a0, Operand(kStringRepresentationMask));
-  __ Branch(&external_string, ne, at, Operand(zero_reg));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
 
   __ bind(&seq_string);
   // subject: Subject string
@@ -5015,11 +4590,11 @@
   STATIC_ASSERT(kAsciiStringTag == 4);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   // Find the code object based on the assumptions above.
-  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ASCII.
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
-  __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
+  __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
-  __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+  __ movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
 
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
@@ -5041,7 +4616,7 @@
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // All checks done. Now push arguments for native regexp code.
-  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
+  __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
                       1, a0, a2);
 
   // Isolates: note we add an additional parameter here (isolate pointer).
@@ -5081,12 +4656,13 @@
 
   // Argument 5: static offsets vector buffer.
   __ li(a0, Operand(
-        ExternalReference::address_of_static_offsets_vector(isolate)));
+        ExternalReference::address_of_static_offsets_vector(masm->isolate())));
   __ sw(a0, MemOperand(sp, 1 * kPointerSize));
 
   // For arguments 4 and 3 get string length, calculate start of string data
   // and calculate the shift of the index (0 for ASCII and 1 for two byte).
-  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -5127,18 +4703,23 @@
   // Check the result.
 
   Label success;
-  __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
+  __ Branch(&success, eq,
+            v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
   Label failure;
-  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  __ Branch(&failure, eq,
+            v0, Operand(NativeRegExpMacroAssembler::FAILURE));
   // If not exception it can only be retry. Handle that in the runtime system.
-  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  __ Branch(&runtime, ne,
+            v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
   // Result must now be exception. If there is no pending exception already a
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  __ li(a1, Operand(isolate->factory()->the_hole_value()));
+  __ li(a1, Operand(
+      ExternalReference::the_hole_value_location(masm->isolate())));
+  __ lw(a1, MemOperand(a1, 0));
   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
-                                      isolate)));
+                                      masm->isolate())));
   __ lw(v0, MemOperand(a2, 0));
   __ Branch(&runtime, eq, v0, Operand(a1));
 
@@ -5149,15 +4730,16 @@
   Label termination_exception;
   __ Branch(&termination_exception, eq, v0, Operand(a0));
 
-  __ Throw(v0);
+  __ Throw(v0);  // Expects thrown value in v0.
 
   __ bind(&termination_exception);
-  __ ThrowUncatchable(v0);
+  __ ThrowUncatchable(TERMINATION, v0);  // Expects thrown value in v0.
 
   __ bind(&failure);
   // For failure and exception return null.
-  __ li(v0, Operand(isolate->factory()->null_value()));
-  __ DropAndRet(4);
+  __ li(v0, Operand(masm->isolate()->factory()->null_value()));
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
 
   // Process the result from the native regexp code.
   __ bind(&success);
@@ -5175,29 +4757,20 @@
   __ sw(a2, FieldMemOperand(last_match_info_elements,
                              RegExpImpl::kLastCaptureCountOffset));
   // Store last subject and last input.
+  __ mov(a3, last_match_info_elements);  // Moved up to reduce latency.
   __ sw(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastSubjectOffset));
-  __ mov(a2, subject);
-  __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastSubjectOffset,
-                      a2,
-                      t3,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs);
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
   __ sw(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastInputOffset));
-  __ RecordWriteField(last_match_info_elements,
-                      RegExpImpl::kLastInputOffset,
-                      subject,
-                      t3,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs);
+  __ mov(a3, last_match_info_elements);
+  __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
-      ExternalReference::address_of_static_offsets_vector(isolate);
+      ExternalReference::address_of_static_offsets_vector(masm->isolate());
   __ li(a2, Operand(address_of_static_offsets_vector));
 
   // a1: number of capture registers
@@ -5218,36 +4791,14 @@
   __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
   __ sw(a3, MemOperand(a0, 0));
   __ Branch(&next_capture, USE_DELAY_SLOT);
-  __ addiu(a0, a0, kPointerSize);  // In branch delay slot.
+  __ addiu(a0, a0, kPointerSize);   // In branch delay slot.
 
   __ bind(&done);
 
   // Return last match info.
   __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
-  __ DropAndRet(4);
-
-  // External string.  Short external strings have already been ruled out.
-  // a0: scratch
-  __ bind(&external_string);
-  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
-  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ And(at, a0, Operand(kIsIndirectStringMask));
-    __ Assert(eq,
-              "external string expected, but not found",
-              at,
-              Operand(zero_reg));
-  }
-  __ lw(subject,
-        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ Subu(subject,
-          subject,
-          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-  __ jmp(&seq_string);
+  __ Addu(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
@@ -5301,11 +4852,11 @@
 
   // Set input, index and length fields from arguments.
   __ lw(a1, MemOperand(sp, kPointerSize * 0));
-  __ lw(a2, MemOperand(sp, kPointerSize * 1));
-  __ lw(t2, MemOperand(sp, kPointerSize * 2));
   __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
-  __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
-  __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 1));
+  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+  __ lw(a1, MemOperand(sp, kPointerSize * 2));
+  __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
 
   // Fill out the elements FixedArray.
   // v0: JSArray, tagged.
@@ -5336,59 +4887,16 @@
   __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
 
   __ bind(&done);
-  __ DropAndRet(3);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
 
   __ bind(&slowcase);
   __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
 }
 
 
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // a1 : the function to call
-  // a2 : cache cell for call target
-  Label done;
-
-  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
-            masm->isolate()->heap()->undefined_value());
-  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
-            masm->isolate()->heap()->the_hole_value());
-
-  // Load the cache state into a3.
-  __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ Branch(&done, eq, a3, Operand(a1));
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&done, eq, a3, Operand(at));
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
-  __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
-  // An uninitialized cache is patched with the function.
-  // Store a1 in the delay slot. This may or may not get overwritten depending
-  // on the result of the comparison.
-  __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-  // No need for a write barrier here - cells are rescanned.
-
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-
-  __ bind(&done);
-}
-
-
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  // a1 : the function to call
-  // a2 : cache cell for call target
-  Label slow, non_function;
+  Label slow;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
@@ -5402,15 +4910,19 @@
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
     __ Branch(&call, ne, t0, Operand(at));
     // Patch the receiver on the stack with the global receiver object.
-    __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
-    __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
+    __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
+    __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
     __ bind(&call);
   }
 
+  // Get the function to call from the stack.
+  // function, receiver [, arguments]
+  __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
   // Check that the function is really a JavaScript function.
   // a1: pushed function (to be verified)
-  __ JumpIfSmi(a1, &non_function);
+  __ JumpIfSmi(a1, &slow);
   // Get the map of the function object.
   __ GetObjectType(a1, a2, a2);
   __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -5438,24 +4950,10 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
-  // Check for function proxy.
-  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
-  __ push(a1);  // Put proxy as additional argument.
-  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
-  __ li(a2, Operand(0, RelocInfo::NONE));
-  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
-  __ SetCallKind(t1, CALL_AS_METHOD);
-  {
-    Handle<Code> adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
-    __ Jump(adaptor, RelocInfo::CODE_TARGET);
-  }
-
   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   // of the original receiver from the call site).
-  __ bind(&non_function);
   __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
-  __ li(a0, Operand(argc_));  // Set up the number of arguments.
+  __ li(a0, Operand(argc_));  // Setup the number of arguments.
   __ mov(a2, zero_reg);
   __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
   __ SetCallKind(t1, CALL_AS_METHOD);
@@ -5464,48 +4962,6 @@
 }
 
 
-void CallConstructStub::Generate(MacroAssembler* masm) {
-  // a0 : number of arguments
-  // a1 : the function to call
-  // a2 : cache cell for call target
-  Label slow, non_function_call;
-
-  // Check that the function is not a smi.
-  __ JumpIfSmi(a1, &non_function_call);
-  // Check that the function is a JSFunction.
-  __ GetObjectType(a1, a3, a3);
-  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
-
-  if (RecordCallTarget()) {
-    GenerateRecordCallTarget(masm);
-  }
-
-  // Jump to the function-specific construct stub.
-  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
-  __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
-
-  // a0: number of arguments
-  // a1: called object
-  // a3: object type
-  Label do_call;
-  __ bind(&slow);
-  __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
-  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
-  __ jmp(&do_call);
-
-  __ bind(&non_function_call);
-  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ bind(&do_call);
-  // Set expected number of arguments to zero (not changing r0).
-  __ li(a2, Operand(0, RelocInfo::NONE));
-  __ SetCallKind(t1, CALL_AS_METHOD);
-  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-}
-
-
 // Unfortunately you have to run without snapshots to see most of these
 // names in the profile since most compare stubs end up in the snapshot.
 void CompareStub::PrintName(StringStream* stream) {
@@ -5552,6 +5008,7 @@
   Label got_char_code;
   Label sliced_string;
 
+  ASSERT(!t0.is(scratch_));
   ASSERT(!t0.is(index_));
   ASSERT(!t0.is(result_));
   ASSERT(!t0.is(object_));
@@ -5569,41 +5026,102 @@
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
 
+  // Put smi-tagged index into scratch register.
+  __ mov(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
   __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
-  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
+  __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
 
-  __ sra(index_, index_, kSmiTagSize);
+  // We need special handling for non-flat strings.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
 
-  StringCharLoadGenerator::Generate(masm,
-                                    object_,
-                                    index_,
-                                    result_,
-                                    &call_runtime_);
+  // Handle non-flat strings.
+  __ And(result_, result_, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
+  __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
 
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  Label assure_seq_string;
+  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+  __ Branch(&call_runtime_, ne, result_, Operand(t0));
+
+  // Get the first of the two strings and load its instance type.
+  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
+  __ addu(scratch_, scratch_, result_);
+  __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
+
+  // Assure that we are dealing with a sequential string. Go to runtime if not.
+  __ bind(&assure_seq_string);
+  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // Check that parent is not an external string. Go to runtime otherwise.
+  STATIC_ASSERT(kSeqStringTag == 0);
+
+  __ And(t0, result_, Operand(kStringRepresentationMask));
+  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
+
+  // Check for 1-byte or 2-byte string.
+  __ bind(&flat_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ And(t0, result_, Operand(kStringEncodingMask));
+  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register. We can
+  // add without shifting since the smi tag size is the log2 of the
+  // number of bytes in a two-byte character.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+  __ Addu(scratch_, object_, Operand(scratch_));
+  __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+  __ Branch(&got_char_code);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+
+  __ srl(t0, scratch_, kSmiTagSize);
+  __ Addu(scratch_, object_, t0);
+
+  __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
+
+  __ bind(&got_char_code);
   __ sll(result_, result_, kSmiTagSize);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   // Index is not a smi.
   __ bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
   __ CheckMap(index_,
-              result_,
+              scratch_,
               Heap::kHeapNumberMapRootIndex,
               index_not_number_,
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   // Consumed by runtime conversion function:
-  __ Push(object_, index_);
+  __ Push(object_, index_, index_);
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
@@ -5615,14 +5133,16 @@
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
 
-  __ Move(index_, v0);
+  __ Move(scratch_, v0);
+
+  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(index_, index_out_of_range_);
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ Branch(&got_smi_index_);
 
@@ -5631,7 +5151,6 @@
   // is too complex (e.g., when the string needs to be flattened).
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
-  __ sll(index_, index_, kSmiTagSize);
   __ Push(object_, index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
 
@@ -5675,8 +5194,7 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -5702,13 +5220,76 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersLong adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     bool ascii);
+
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharactersLong(MacroAssembler* masm,
+                                         Register dest,
+                                         Register src,
+                                         Register count,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Register scratch3,
+                                         Register scratch4,
+                                         Register scratch5,
+                                         int flags);
+
+
+  // Probe the symbol table for a two character string. If the string is
+  // not found by probing a jump to the label not_found is performed. This jump
+  // does not guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in register r0.
+  // Contents of both c1 and c2 registers are modified. At the exit c1 is
+  // guaranteed to contain halfword with low and high bytes equal to
+  // initial contents of c1 and c2 respectively.
+  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                   Register c1,
+                                                   Register c2,
+                                                   Register scratch1,
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4,
+                                                   Register scratch5,
+                                                   Label* not_found);
+
+  // Generate string hash.
+  static void GenerateHashInit(MacroAssembler* masm,
+                               Register hash,
+                               Register character);
+
+  static void GenerateHashAddCharacter(MacroAssembler* masm,
+                                       Register hash,
+                                       Register character);
+
+  static void GenerateHashGetHash(MacroAssembler* masm,
+                                  Register hash);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
                                           Register dest,
                                           Register src,
@@ -5884,8 +5465,10 @@
   __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
   __ Or(c1, c1, scratch1);
   __ bind(&tmp);
-  __ Branch(
-      not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
+  __ Branch(not_found,
+            Uless_equal,
+            scratch,
+            Operand(static_cast<int>('9' - '0')));
 
   __ bind(&not_array_index);
   // Calculate the two character string hash.
@@ -5957,10 +5540,10 @@
     __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
 
     __ Branch(not_found, eq, undefined, Operand(candidate));
-    // Must be the hole (deleted entry).
+    // Must be null (deleted entry).
     if (FLAG_debug_code) {
-      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-      __ Assert(eq, "oddball in symbol table is not undefined or the hole",
+      __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+      __ Assert(eq, "oddball in symbol table is not undefined or null",
           scratch, Operand(candidate));
     }
     __ jmp(&next_probe[i]);
@@ -6004,7 +5587,7 @@
   __ sll(at, hash, 10);
   __ addu(hash, hash, at);
   // hash ^= hash >> 6;
-  __ srl(at, hash, 6);
+  __ sra(at, hash, 6);
   __ xor_(hash, hash, at);
 }
 
@@ -6018,7 +5601,7 @@
   __ sll(at, hash, 10);
   __ addu(hash, hash, at);
   // hash ^= hash >> 6;
-  __ srl(at, hash, 6);
+  __ sra(at, hash, 6);
   __ xor_(hash, hash, at);
 }
 
@@ -6029,23 +5612,20 @@
   __ sll(at, hash, 3);
   __ addu(hash, hash, at);
   // hash ^= hash >> 11;
-  __ srl(at, hash, 11);
+  __ sra(at, hash, 11);
   __ xor_(hash, hash, at);
   // hash += hash << 15;
   __ sll(at, hash, 15);
   __ addu(hash, hash, at);
 
-  __ li(at, Operand(String::kHashBitMask));
-  __ and_(hash, hash, at);
-
   // if (hash == 0) hash = 27;
-  __ ori(at, zero_reg, StringHasher::kZeroHash);
-  __ Movz(hash, at, hash);
+  __ ori(at, zero_reg, 27);
+  __ movz(hash, at, hash);
 }
 
 
 void SubStringStub::Generate(MacroAssembler* masm) {
-  Label runtime;
+  Label sub_string_runtime;
   // Stack frame on entry.
   //  ra: return address
   //  sp[0]: to
@@ -6063,31 +5643,53 @@
   static const int kFromOffset = 1 * kPointerSize;
   static const int kStringOffset = 2 * kPointerSize;
 
-  __ lw(a2, MemOperand(sp, kToOffset));
-  __ lw(a3, MemOperand(sp, kFromOffset));
+  Register to = t2;
+  Register from = t3;
+
+  // Check bounds and smi-ness.
+  __ lw(to, MemOperand(sp, kToOffset));
+  __ lw(from, MemOperand(sp, kFromOffset));
   STATIC_ASSERT(kFromOffset == kToOffset + 4);
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
 
-  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
-  // safe in this case.
-  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
-  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
-  // Both a2 and a3 are untagged integers.
+  __ JumpIfNotSmi(from, &sub_string_runtime);
+  __ JumpIfNotSmi(to, &sub_string_runtime);
 
-  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
+  __ sra(a3, from, kSmiTagSize);  // Remove smi tag.
+  __ sra(t5, to, kSmiTagSize);  // Remove smi tag.
 
-  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
-  __ Subu(a2, a2, a3);
+  // a3: from index (untagged smi)
+  // t5: to index (untagged smi)
 
-  // Make sure first argument is a string.
+  __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg));  // From < 0.
+
+  __ subu(a2, t5, a3);
+  __ Branch(&sub_string_runtime, gt, a3, Operand(t5));  // Fail if from > to.
+
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache in
+  // generated code.
+  __ Branch(&sub_string_runtime, lt, a2, Operand(2));
+
+  // Both to and from are smis.
+
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Make sure first argument is a sequential (or flat) string.
   __ lw(v0, MemOperand(sp, kStringOffset));
-  __ JumpIfSmi(v0, &runtime);
+  __ Branch(&sub_string_runtime, eq, v0, Operand(kSmiTagMask));
+
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ And(t0, a1, Operand(kIsNotStringMask));
+  __ And(t4, v0, Operand(kIsNotStringMask));
 
-  __ Branch(&runtime, ne, t0, Operand(zero_reg));
+  __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
 
   // Short-cut for the case of trivial substring.
   Label return_v0;
@@ -6097,16 +5699,74 @@
   __ sra(t0, t0, 1);
   __ Branch(&return_v0, eq, a2, Operand(t0));
 
+  Label create_slice;
+  if (FLAG_string_slices) {
+    __ Branch(&create_slice, ge, a2, Operand(SlicedString::kMinLength));
+  }
+
+  // v0: original string
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  Label seq_string;
+  __ And(t0, a1, Operand(kStringRepresentationMask));
+  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kConsStringTag < kSlicedStringTag);
+
+  // Slices and external strings go to runtime.
+  __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
+
+  // Sequential strings are handled directly.
+  __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
+
+  // Cons string. Try to recurse (once) on the first substring.
+  // (This adds a little more generality than necessary to handle flattened
+  // cons strings, but not much).
+  __ lw(v0, FieldMemOperand(v0, ConsString::kFirstOffset));
+  __ lw(t0, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  // Cons, slices and external strings go to runtime.
+  __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
+
+  // Definitly a sequential string.
+  __ bind(&seq_string);
+
+  // v0: original string
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t2: (a.k.a. to): to (smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
+  __ Branch(&sub_string_runtime, lt, t0, Operand(to));  // Fail if to > length.
+  to = no_reg;
+
+  // v0: original string or left hand side of the original cons string.
+  // a1: instance type
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t3: (a.k.a. from): from offset (smi)
+  // t5: to index (untagged smi)
+
+  // Check for flat ASCII string.
+  Label non_ascii_flat;
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+
+  __ And(t4, a1, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
 
   Label result_longer_than_two;
-  // Check for special case of two character ASCII string, in which case
-  // we do a lookup in the symbol table first.
-  __ li(t0, 2);
-  __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
-  __ Branch(&runtime, lt, a2, Operand(t0));
+  __ Branch(&result_longer_than_two, gt, a2, Operand(2));
 
-  __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime);
-
+  // Sub string of length 2 requested.
   // Get the two characters forming the sub string.
   __ Addu(v0, v0, Operand(a3));
   __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
@@ -6116,126 +5776,31 @@
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
       masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
+  Counters* counters = masm->isolate()->counters();
   __ jmp(&return_v0);
 
   // a2: result string length.
   // a3: two characters combined into halfword in little endian byte order.
   __ bind(&make_two_character_string);
-  __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime);
+  __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
   __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
   __ jmp(&return_v0);
 
   __ bind(&result_longer_than_two);
 
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into t1.
-  // v0: original string
-  // a1: instance type
-  // a2: length
-  // a3: from index (untagged)
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ And(t0, a1, Operand(kIsIndirectStringMask));
-  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
-  // t0 is used as a scratch register and can be overwritten in either case.
-  __ And(t0, a1, Operand(kSlicedNotConsMask));
-  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
-  // Cons string.  Check whether it is flat, then fetch first part.
-  __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
-  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
-  __ Branch(&runtime, ne, t1, Operand(t0));
-  __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
-  // Update instance type.
-  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
+  // Locate 'from' character of string.
+  __ Addu(t1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ sra(t4, from, 1);
+  __ Addu(t1, t1, t4);
 
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
-  __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-  __ sra(t0, t0, 1);  // Add offset to index.
-  __ Addu(a3, a3, t0);
-  // Update instance type.
-  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
-  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked);
+  // Allocate the result.
+  __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
 
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the expected register.
-  __ mov(t1, v0);
-
-  __ bind(&underlying_unpacked);
-
-  if (FLAG_string_slices) {
-    Label copy_routine;
-    // t1: underlying subject string
-    // a1: instance type of underlying subject string
-    // a2: length
-    // a3: adjusted start index (untagged)
-    // Short slice.  Copy instead of slicing.
-    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
-    // Allocate new sliced string.  At this point we do not reload the instance
-    // type including the string encoding because we simply rely on the info
-    // provided by the original string.  It does not matter if the original
-    // string's encoding is wrong because we always have to recheck encoding of
-    // the newly created string's parent anyways due to externalized strings.
-    Label two_byte_slice, set_slice_header;
-    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-    __ And(t0, a1, Operand(kStringEncodingMask));
-    __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
-    __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
-    __ jmp(&set_slice_header);
-    __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
-    __ bind(&set_slice_header);
-    __ sll(a3, a3, 1);
-    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
-    __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
-    __ jmp(&return_v0);
-
-    __ bind(&copy_routine);
-  }
-
-  // t1: underlying subject string
-  // a1: instance type of underlying subject string
-  // a2: length
-  // a3: adjusted start index (untagged)
-  Label two_byte_sequential, sequential_string, allocate_result;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(t0, a1, Operand(kExternalStringTag));
-  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ And(t0, a1, Operand(kShortExternalStringTag));
-  __ Branch(&runtime, ne, t0, Operand(zero_reg));
-  __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
-  // t1 already points to the first character of underlying string.
-  __ jmp(&allocate_result);
-
-  __ bind(&sequential_string);
-  // Locate first character of underlying subject string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&allocate_result);
-  // Sequential acii string.  Allocate the result.
-  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
-  __ And(t0, a1, Operand(kStringEncodingMask));
-  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
-
-  // Allocate and copy the resulting ASCII string.
-  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
-
-  // Locate first character of substring to copy.
-  __ Addu(t1, t1, a3);
-
+  // v0: result string
+  // a2: result string length
+  // a3: from index (untagged smi)
+  // t1: first character of substring to copy
+  // t3: (a.k.a. from): from offset (smi)
   // Locate first character of result.
   __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
 
@@ -6248,17 +5813,30 @@
       masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
   __ jmp(&return_v0);
 
-  // Allocate and copy the resulting two-byte string.
-  __ bind(&two_byte_sequential);
-  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
+  __ bind(&non_ascii_flat);
+  // a2: result string length
+  // t1: string
+  // t3: (a.k.a. from): from offset (smi)
+  // Check for flat two byte string.
 
-  // Locate first character of substring to copy.
+  // Locate 'from' character of string.
+  __ Addu(t1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // As "from" is a smi it is 2 times the value which matches the size of a two
+  // byte character.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  __ sll(t0, a3, 1);
-  __ Addu(t1, t1, t0);
+  __ Addu(t1, t1, Operand(from));
+
+  // Allocate the result.
+  __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
+
+  // v0: result string
+  // a2: result string length
+  // t1: first character of substring to copy
   // Locate first character of result.
   __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
 
+  from = no_reg;
+
   // v0: result string.
   // a1: first character of result.
   // a2: result length.
@@ -6266,14 +5844,77 @@
   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(
       masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
+  __ jmp(&return_v0);
+
+  if (FLAG_string_slices) {
+    __ bind(&create_slice);
+    // v0: original string
+    // a1: instance type
+    // a2: length
+    // a3: from index (untagged smi)
+    // t2 (a.k.a. to): to (smi)
+    // t3 (a.k.a. from): from offset (smi)
+    Label allocate_slice, sliced_string, seq_string;
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ And(t4, a1, Operand(kStringRepresentationMask));
+    __ Branch(&seq_string, eq, t4, Operand(zero_reg));
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ And(t4, a1, Operand(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
+
+    __ And(t4, a1, Operand(kSlicedNotConsMask));
+    __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
+    __ LoadRoot(t5, Heap::kEmptyStringRootIndex);
+    __ Branch(&sub_string_runtime, ne, t1, Operand(t5));
+    __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+    __ addu(t3, t3, t1);
+    __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ mov(t1, v0);
+
+    __ bind(&allocate_slice);
+    // a1: instance type of original string
+    // a2: length
+    // t1: underlying subject string
+    // t3 (a.k.a. from): from offset (smi)
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ And(t4, a1, Operand(kStringEncodingMask));
+    __ Branch(&two_byte_slice, eq, t4, Operand(zero_reg));
+    __ AllocateAsciiSlicedString(v0, a2, a3, t0, &sub_string_runtime);
+    __ jmp(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(v0, a2, a3, t0, &sub_string_runtime);
+    __ bind(&set_slice_header);
+    __ sw(t3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+  }
 
   __ bind(&return_v0);
-  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
-  __ DropAndRet(3);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
 
   // Just jump to runtime to create the sub string.
-  __ bind(&runtime);
+  __ bind(&sub_string_runtime);
   __ TailCallRuntime(Runtime::kSubString, 3, 1);
 }
 
@@ -6330,7 +5971,7 @@
   __ Subu(scratch3, scratch1, Operand(scratch2));
   Register length_delta = scratch3;
   __ slt(scratch4, scratch2, scratch1);
-  __ Movn(scratch1, scratch2, scratch4);
+  __ movn(scratch1, scratch2, scratch4);
   Register min_length = scratch1;
   STATIC_ASSERT(kSmiTag == 0);
   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
@@ -6412,7 +6053,8 @@
   STATIC_ASSERT(kSmiTag == 0);
   __ li(v0, Operand(Smi::FromInt(EQUAL)));
   __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
-  __ DropAndRet(2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
 
   __ bind(&not_same);
 
@@ -6430,7 +6072,7 @@
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label call_runtime, call_builtin;
+  Label string_add_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   Counters* counters = masm->isolate()->counters();
@@ -6445,7 +6087,7 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfEitherSmi(a0, a1, &call_runtime);
+    __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
     // Load instance types.
     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
     __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
@@ -6455,7 +6097,7 @@
     // If either is not a string, go to runtime.
     __ Or(t4, t0, Operand(t1));
     __ And(t4, t4, Operand(kIsNotStringMask));
-    __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
+    __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -6487,14 +6129,15 @@
     __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
     __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
     __ mov(v0, a0);       // Assume we'll return first string (from a0).
-    __ Movz(v0, a1, a2);  // If first is empty, return second (from a1).
+    __ movz(v0, a1, a2);  // If first is empty, return second (from a1).
     __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
     __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
     __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
     __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
 
     __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-    __ DropAndRet(2);
+    __ Addu(sp, sp, Operand(2 * kPointerSize));
+    __ Ret();
 
     __ bind(&strings_not_empty);
   }
@@ -6527,7 +6170,7 @@
     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
   }
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
-                                                 &call_runtime);
+                                                 &string_add_runtime);
 
   // Get the two characters forming the sub string.
   __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
@@ -6537,9 +6180,10 @@
   // just allocate a new one.
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
+      masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ DropAndRet(2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
 
   __ bind(&make_two_character_string);
   // Resulting string has length 2 and first chars of two strings
@@ -6548,19 +6192,21 @@
   // halfword store instruction (which assumes that processor is
   // in a little endian mode).
   __ li(t2, Operand(2));
-  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
+  __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
   __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ DropAndRet(2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
+  __ Branch(&string_add_flat_result, lt, t2,
+           Operand(String::kMinNonFlatLength));
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   ASSERT(IsPowerOf2(String::kMaxLength + 1));
   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
-  __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
+  __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
 
   // If result is not supposed to be flat, allocate a cons string object.
   // If both strings are ASCII the result is an ASCII cons string.
@@ -6572,20 +6218,22 @@
   }
   Label non_ascii, allocated, ascii_data;
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
+  // Branch to non_ascii if either string-encoding field is zero (non-ascii).
   __ And(t4, t0, Operand(t1));
   __ And(t4, t4, Operand(kStringEncodingMask));
   __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
 
   // Allocate an ASCII cons string.
   __ bind(&ascii_data);
-  __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
+  __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
-  __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
-  __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
+  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
+  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
+  __ mov(v0, t3);
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ DropAndRet(2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
 
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
@@ -6603,13 +6251,11 @@
   __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
 
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
+  __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
   __ Branch(&allocated);
 
-  // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
-  // Handle creating a flat result from either external or sequential strings.
-  // Locate the first characters' locations.
+  // Handle creating a flat result. First check that both strings are
+  // sequential and that they have the same encoding.
   // a0: first string
   // a1: second string
   // a2: length of first string
@@ -6617,7 +6263,6 @@
   // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
   // t2: sum of lengths.
-  Label first_prepared, second_prepared;
   __ bind(&string_add_flat_result);
   if (flags_ != NO_STRING_ADD_FLAGS) {
     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
@@ -6625,86 +6270,101 @@
     __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
   }
-  // Check whether both strings have same encoding
-  __ Xor(t3, t0, Operand(t1));
-  __ And(t3, t3, Operand(kStringEncodingMask));
-  __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
-
+  // Check that both strings are sequential, meaning that we
+  // branch to runtime if either string tag is non-zero.
   STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(t4, t0, Operand(kStringRepresentationMask));
+  __ Or(t4, t0, Operand(t1));
+  __ And(t4, t4, Operand(kStringRepresentationMask));
+  __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
 
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  Label skip_first_add;
-  __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
-  __ Branch(USE_DELAY_SLOT, &first_prepared);
-  __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
-  __ bind(&skip_first_add);
-  // External string: rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ And(t4, t0, Operand(kShortExternalStringMask));
-  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
-  __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
-  __ bind(&first_prepared);
-
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(t4, t1, Operand(kStringRepresentationMask));
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  Label skip_second_add;
-  __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
-  __ Branch(USE_DELAY_SLOT, &second_prepared);
-  __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
-  __ bind(&skip_second_add);
-  // External string: rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ And(t4, t1, Operand(kShortExternalStringMask));
-  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
-  __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
-  __ bind(&second_prepared);
-
-  Label non_ascii_string_add_flat_result;
-  // t3: first character of first string
-  // a1: first character of second string
+  // Now check if both strings have the same encoding (ASCII/Two-byte).
+  // a0: first string
+  // a1: second string
   // a2: length of first string
   // a3: length of second string
+  // t0: first string instance type
+  // t1: second string instance type
   // t2: sum of lengths.
-  // Both strings have the same encoding.
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ And(t4, t1, Operand(kStringEncodingMask));
-  __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
+  Label non_ascii_string_add_flat_result;
+  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
+  __ xor_(t3, t1, t0);
+  __ And(t3, t3, Operand(kStringEncodingMask));
+  __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
+  // And see if it's ASCII (0) or two-byte (1).
+  __ And(t3, t0, Operand(kStringEncodingMask));
+  __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
 
-  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
-  __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  // v0: result string.
-  // t3: first character of first string.
-  // a1: first character of second string
+  // Both strings are sequential ASCII strings. We also know that they are
+  // short (since the sum of the lengths is less than kMinNonFlatLength).
+  // t2: length of resulting flat string
+  __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a0: first character of first string.
+  // a1: second string.
   // a2: length of first string.
   // a3: length of second string.
   // t2: first character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
 
-  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
+  // Load second argument and locate first character.
+  __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // a1: first character of second string.
+  // a3: length of second string.
   // t2: next character of result.
+  // t3: result string.
   StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
+  __ mov(v0, t3);
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ DropAndRet(2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
 
   __ bind(&non_ascii_string_add_flat_result);
-  __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
-  __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  // v0: result string.
-  // t3: first character of first string.
-  // a1: first character of second string.
+  // Both strings are sequential two byte strings.
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t2: sum of length of strings.
+  __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
+  // a0: first string.
+  // a1: second string.
+  // a2: length of first string.
+  // a3: length of second string.
+  // t3: result string.
+
+  // Locate first character of result.
+  __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a0: first character of first string.
+  // a1: second string.
   // a2: length of first string.
   // a3: length of second string.
   // t2: first character of result.
-  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
-  // t2: next character of result.
+  // t3: result string.
+  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
+
+  // Locate first character of second argument.
+  __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // a1: first character of second string.
+  // a3: length of second string.
+  // t2: next character of result (after copy of first string).
+  // t3: result string.
   StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
 
+  __ mov(v0, t3);
   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
-  __ DropAndRet(2);
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
 
   // Just jump to runtime to add the two strings.
-  __ bind(&call_runtime);
+  __ bind(&string_add_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -6786,15 +6446,15 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered, maybe_undefined1, maybe_undefined2;
+  Label unordered;
   Label miss;
   __ And(a2, a1, Operand(a0));
   __ JumpIfSmi(a2, &generic_stub);
 
   __ GetObjectType(a0, a2, a2);
-  __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
 
   // Inlining the double comparison and falling back to the general compare
   // stub if NaN is involved or FPU is unsupported.
@@ -6807,18 +6467,28 @@
     __ Subu(a2, a0, Operand(kHeapObjectTag));
     __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
 
-    // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
-    Label fpu_eq, fpu_lt;
-    // Test if equal, and also handle the unordered/NaN case.
-    __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
+    Label fpu_eq, fpu_lt, fpu_gt;
+    // Compare operands (test if unordered).
+    __ c(UN, D, f0, f2);
+    // Don't base result on status bits when a NaN is involved.
+    __ bc1t(&unordered);
+    __ nop();
 
-    // Test if less (unordered case is already handled).
-    __ BranchF(&fpu_lt, NULL, lt, f0, f2);
+    // Test if equal.
+    __ c(EQ, D, f0, f2);
+    __ bc1t(&fpu_eq);
+    __ nop();
 
-    // Otherwise it's greater, so just fall thru, and return.
-    __ li(v0, Operand(GREATER));
-    __ Ret();
+    // Test if unordered or less (unordered case is already handled).
+    __ c(ULT, D, f0, f2);
+    __ bc1t(&fpu_lt);
+    __ nop();
 
+    // Otherwise it's greater.
+    __ bc1f(&fpu_gt);
+    __ nop();
+
+    // Return a result of -1, 0, or 1.
     __ bind(&fpu_eq);
     __ li(v0, Operand(EQUAL));
     __ Ret();
@@ -6826,29 +6496,18 @@
     __ bind(&fpu_lt);
     __ li(v0, Operand(LESS));
     __ Ret();
-  }
 
-  __ bind(&unordered);
+    __ bind(&fpu_gt);
+    __ li(v0, Operand(GREATER));
+    __ Ret();
+
+    __ bind(&unordered);
+  }
 
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
   __ bind(&generic_stub);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
-  __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(&miss, ne, a0, Operand(at));
-    __ GetObjectType(a1, a2, a2);
-    __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
-    __ jmp(&unordered);
-  }
-
-  __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(&unordered, eq, a1, Operand(at));
-  }
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -6896,8 +6555,6 @@
   ASSERT(state_ == CompareIC::STRINGS);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
-
   // Registers containing left and right operands respectively.
   Register left = a1;
   Register right = a0;
@@ -6925,52 +6582,41 @@
   Label left_ne_right;
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
-  __ Branch(&left_ne_right, ne, left, Operand(right));
-  __ Ret(USE_DELAY_SLOT);
+  __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
   __ mov(v0, zero_reg);  // In the delay slot.
+  __ Ret();
   __ bind(&left_ne_right);
 
   // Handle not identical strings.
 
   // Check that both strings are symbols. If they are, we're done
   // because we already know they are not identical.
-  if (equality) {
-    ASSERT(GetCondition() == eq);
-    STATIC_ASSERT(kSymbolTag != 0);
-    __ And(tmp3, tmp1, Operand(tmp2));
-    __ And(tmp5, tmp3, Operand(kIsSymbolMask));
-    Label is_symbol;
-    __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
-    // Make sure a0 is non-zero. At this point input operands are
-    // guaranteed to be non-zero.
-    ASSERT(right.is(a0));
-    __ Ret(USE_DELAY_SLOT);
-    __ mov(v0, a0);  // In the delay slot.
-    __ bind(&is_symbol);
-  }
+  ASSERT(GetCondition() == eq);
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ And(tmp3, tmp1, Operand(tmp2));
+  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+  Label is_symbol;
+  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
+  __ mov(v0, a0);  // In the delay slot.
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(a0));
+  __ Ret();
+  __ bind(&is_symbol);
 
   // Check that both strings are sequential ASCII.
   Label runtime;
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
-      tmp1, tmp2, tmp3, tmp4, &runtime);
+  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
+                                                  &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2, tmp3);
-  } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3, tmp4);
-  }
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2, tmp3);
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
   __ Push(left, right);
-  if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
-  } else {
-    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-  }
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -6989,48 +6635,33 @@
   __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
 
   ASSERT(GetCondition() == eq);
-  __ Ret(USE_DELAY_SLOT);
-  __ subu(v0, a0, a1);
+  __ Subu(v0, a0, Operand(a1));
+  __ Ret();
 
   __ bind(&miss);
   GenerateMiss(masm);
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
-  Label miss;
-  __ And(a2, a1, a0);
-  __ JumpIfSmi(a2, &miss);
-  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
-  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, a2, Operand(known_map_));
-  __ Branch(&miss, ne, a3, Operand(known_map_));
-
-  __ Ret(USE_DELAY_SLOT);
-  __ subu(v0, a0, a1);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  {
-    // Call the runtime system in a fresh internal frame.
-    ExternalReference miss =
-        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Push(a1, a0);
-    __ push(ra);
-    __ Push(a1, a0);
-    __ li(t0, Operand(Smi::FromInt(op_)));
-    __ addiu(sp, sp, -kPointerSize);
-    __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
-    __ sw(t0, MemOperand(sp));  // In the delay slot.
-    // Compute the entry point of the rewritten stub.
-    __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-    // Restore registers.
-    __ Pop(a1, a0, ra);
-  }
+  __ Push(a1, a0);
+  __ push(ra);
+
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                             masm->isolate());
+  __ EnterInternalFrame();
+  __ Push(a1, a0);
+  __ li(t0, Operand(Smi::FromInt(op_)));
+  __ push(t0);
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
+  // Compute the entry point of the rewritten stub.
+  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  // Restore registers.
+  __ pop(ra);
+  __ pop(a0);
+  __ pop(a1);
   __ Jump(a2);
 }
 
@@ -7041,7 +6672,7 @@
   // The saved ra is after the reserved stack space for the 4 args.
   __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
 
-  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+  if (FLAG_debug_code && EnableSlowAsserts()) {
     // In case of an error the return address may point to a memory area
     // filled with kZapValue by the GC.
     // Dereference the address and check for this.
@@ -7082,10 +6713,8 @@
   // Push return address (accessible to GC through exit frame pc).
   // This spot for ra was reserved in EnterExitFrame.
   masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
-  masm->li(ra,
-           Operand(reinterpret_cast<intptr_t>(GetCode().location()),
-                   RelocInfo::CODE_TARGET),
-           CONSTANT_SIZE);
+  masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
+                    RelocInfo::CODE_TARGET), true);
   // Call the function.
   masm->Jump(t9);
   // Make sure the stored 'ra' points to this position.
@@ -7093,18 +6722,19 @@
 }
 
 
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
-                                                        Label* miss,
-                                                        Label* done,
-                                                        Register receiver,
-                                                        Register properties,
-                                                        Handle<String> name,
-                                                        Register scratch0) {
-  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register receiver,
+    Register properties,
+    String* name,
+    Register scratch0) {
+// If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the hole value).
+  // (their names are the null value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // scratch0 points to properties hash.
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -7113,17 +6743,20 @@
     __ lw(index, FieldMemOperand(properties, kCapacityOffset));
     __ Subu(index, index, Operand(1));
     __ And(index, index, Operand(
-        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
+         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
-    __ sll(at, index, 1);
+    // index *= 3.
+    __ mov(at, index);
+    __ sll(index, index, 1);
     __ Addu(index, index, at);
 
     Register entity_name = scratch0;
     // Having undefined at this place means the name is not contained.
     ASSERT_EQ(kSmiTagSize, 1);
     Register tmp = properties;
+
     __ sll(scratch0, index, 1);
     __ Addu(tmp, properties, scratch0);
     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
@@ -7133,15 +6766,9 @@
     __ Branch(done, eq, entity_name, Operand(tmp));
 
     if (i != kInlinedProbes - 1) {
-      // Load the hole ready for use below:
-      __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
-
       // Stop if found the property.
       __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
 
-      Label the_hole;
-      __ Branch(&the_hole, eq, entity_name, Operand(tmp));
-
       // Check if the entry name is not a symbol.
       __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
       __ lbu(entity_name,
@@ -7149,8 +6776,6 @@
       __ And(scratch0, entity_name, Operand(kIsSymbolMask));
       __ Branch(miss, eq, scratch0, Operand(zero_reg));
 
-      __ bind(&the_hole);
-
       // Restore the properties.
       __ lw(properties,
             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
@@ -7159,18 +6784,19 @@
 
   const int spill_mask =
       (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
-       a2.bit() | a1.bit() | a0.bit() | v0.bit());
+       a2.bit() | a1.bit() | a0.bit());
 
   __ MultiPush(spill_mask);
   __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ li(a1, Operand(Handle<String>(name)));
   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ mov(at, v0);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
   __ MultiPop(spill_mask);
 
-  __ Branch(done, eq, at, Operand(zero_reg));
-  __ Branch(miss, ne, at, Operand(zero_reg));
+  __ Branch(done, eq, v0, Operand(zero_reg));
+  __ Branch(miss, ne, v0, Operand(zero_reg));
+  return result;
 }
 
 
@@ -7185,11 +6811,6 @@
                                                         Register name,
                                                         Register scratch1,
                                                         Register scratch2) {
-  ASSERT(!elements.is(scratch1));
-  ASSERT(!elements.is(scratch2));
-  ASSERT(!name.is(scratch1));
-  ASSERT(!name.is(scratch2));
-
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -7220,7 +6841,8 @@
     ASSERT(StringDictionary::kEntrySize == 3);
     // scratch2 = scratch2 * 3.
 
-    __ sll(at, scratch2, 1);
+    __ mov(at, scratch2);
+    __ sll(scratch2, scratch2, 1);
     __ Addu(scratch2, scratch2, at);
 
     // Check if the key is identical to the name.
@@ -7232,32 +6854,23 @@
 
   const int spill_mask =
       (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
-       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
+       a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
       ~(scratch1.bit() | scratch2.bit());
 
   __ MultiPush(spill_mask);
-  if (name.is(a0)) {
-    ASSERT(!elements.is(a1));
-    __ Move(a1, name);
-    __ Move(a0, elements);
-  } else {
-    __ Move(a0, elements);
-    __ Move(a1, name);
-  }
+  __ Move(a0, elements);
+  __ Move(a1, name);
   StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
   __ CallStub(&stub);
   __ mov(scratch2, a2);
-  __ mov(at, v0);
   __ MultiPop(spill_mask);
 
-  __ Branch(done, ne, at, Operand(zero_reg));
-  __ Branch(miss, eq, at, Operand(zero_reg));
+  __ Branch(done, ne, v0, Operand(zero_reg));
+  __ Branch(miss, eq, v0, Operand(zero_reg));
 }
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
-  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
-  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  a1: key
@@ -7337,354 +6950,17 @@
   // treated as a lookup success. For positive lookup probing failure
   // should be treated as lookup failure.
   if (mode_ == POSITIVE_LOOKUP) {
-    __ Ret(USE_DELAY_SLOT);
     __ mov(result, zero_reg);
+    __ Ret();
   }
 
   __ bind(&in_dictionary);
-  __ Ret(USE_DELAY_SLOT);
   __ li(result, 1);
+  __ Ret();
 
   __ bind(&not_in_dictionary);
-  __ Ret(USE_DELAY_SLOT);
   __ mov(result, zero_reg);
-}
-
-
-struct AheadOfTimeWriteBarrierStubList {
-  Register object, value, address;
-  RememberedSetAction action;
-};
-
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
-  // Used in RegExpExecStub.
-  { s2, s0, t3, EMIT_REMEMBERED_SET },
-  { s2, a2, t3, EMIT_REMEMBERED_SET },
-  // Used in CompileArrayPushCall.
-  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
-  // Also used in KeyedStoreIC::GenerateGeneric.
-  { a3, t0, t1, EMIT_REMEMBERED_SET },
-  // Used in CompileStoreGlobal.
-  { t0, a1, a2, OMIT_REMEMBERED_SET },
-  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { a1, a2, a3, EMIT_REMEMBERED_SET },
-  { a3, a2, a1, EMIT_REMEMBERED_SET },
-  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { a2, a1, a3, EMIT_REMEMBERED_SET },
-  { a3, a1, a2, EMIT_REMEMBERED_SET },
-  // KeyedStoreStubCompiler::GenerateStoreFastElement.
-  { a3, a2, t0, EMIT_REMEMBERED_SET },
-  { a2, a3, t0, EMIT_REMEMBERED_SET },
-  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
-  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
-  // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { a2, a3, t5, EMIT_REMEMBERED_SET },
-  { a2, a3, t5, OMIT_REMEMBERED_SET },
-  // ElementsTransitionGenerator::GenerateDoubleToObject
-  { t2, a2, a0, EMIT_REMEMBERED_SET },
-  { a2, t2, t5, EMIT_REMEMBERED_SET },
-  // StoreArrayLiteralElementStub::Generate
-  { t1, a0, t2, EMIT_REMEMBERED_SET },
-  // Null termination.
-  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
-};
-
-
-bool RecordWriteStub::IsPregenerated() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    if (object_.is(entry->object) &&
-        value_.is(entry->value) &&
-        address_.is(entry->address) &&
-        remembered_set_action_ == entry->action &&
-        save_fp_regs_mode_ == kDontSaveFPRegs) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-bool StoreBufferOverflowStub::IsPregenerated() {
-  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
-  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
-  stub1.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    RecordWriteStub stub(entry->object,
-                         entry->value,
-                         entry->address,
-                         entry->action,
-                         kDontSaveFPRegs);
-    stub.GetCode()->set_is_pregenerated(true);
-  }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed.  The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
-  Label skip_to_incremental_noncompacting;
-  Label skip_to_incremental_compacting;
-
-  // The first two branch+nop instructions are generated with labels so as to
-  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
-  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
-  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
-  // incremental heap marking.
-  // See RecordWriteStub::Patch for details.
-  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
-  __ nop();
-  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
-  __ nop();
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  }
   __ Ret();
-
-  __ bind(&skip_to_incremental_noncompacting);
-  GenerateIncremental(masm, INCREMENTAL);
-
-  __ bind(&skip_to_incremental_compacting);
-  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
-  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
-  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-
-  PatchBranchIntoNop(masm, 0);
-  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
-  regs_.Save(masm);
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    Label dont_need_remembered_set;
-
-    __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
-    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
-                           regs_.scratch0(),
-                           &dont_need_remembered_set);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     ne,
-                     &dont_need_remembered_set);
-
-    // First notify the incremental marker if necessary, then update the
-    // remembered set.
-    CheckNeedsToInformIncrementalMarker(
-        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
-    InformIncrementalMarker(masm, mode);
-    regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-
-    __ bind(&dont_need_remembered_set);
-  }
-
-  CheckNeedsToInformIncrementalMarker(
-      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
-  InformIncrementalMarker(masm, mode);
-  regs_.Restore(masm);
-  __ Ret();
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-  int argument_count = 3;
-  __ PrepareCallCFunction(argument_count, regs_.scratch0());
-  Register address =
-      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
-  ASSERT(!address.is(regs_.object()));
-  ASSERT(!address.is(a0));
-  __ Move(address, regs_.address());
-  __ Move(a0, regs_.object());
-  if (mode == INCREMENTAL_COMPACTION) {
-    __ Move(a1, address);
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ lw(a1, MemOperand(address, 0));
-  }
-  __ li(a2, Operand(ExternalReference::isolate_address()));
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  if (mode == INCREMENTAL_COMPACTION) {
-    __ CallCFunction(
-        ExternalReference::incremental_evacuation_record_write_function(
-            masm->isolate()),
-        argument_count);
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ CallCFunction(
-        ExternalReference::incremental_marking_record_write_function(
-            masm->isolate()),
-        argument_count);
-  }
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
-    MacroAssembler* masm,
-    OnNoNeedToInformIncrementalMarker on_no_need,
-    Mode mode) {
-  Label on_black;
-  Label need_incremental;
-  Label need_incremental_pop_scratch;
-
-  // Let's look at the color of the object:  If it is not black we don't have
-  // to inform the incremental marker.
-  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ Ret();
-  }
-
-  __ bind(&on_black);
-
-  // Get the value from the slot.
-  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
-
-  if (mode == INCREMENTAL_COMPACTION) {
-    Label ensure_not_white;
-
-    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kEvacuationCandidateMask,
-                     eq,
-                     &ensure_not_white);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
-                     eq,
-                     &need_incremental);
-
-    __ bind(&ensure_not_white);
-  }
-
-  // We need extra registers for this, so we push the object and the address
-  // register temporarily.
-  __ Push(regs_.object(), regs_.address());
-  __ EnsureNotWhite(regs_.scratch0(),  // The value.
-                    regs_.scratch1(),  // Scratch.
-                    regs_.object(),  // Scratch.
-                    regs_.address(),  // Scratch.
-                    &need_incremental_pop_scratch);
-  __ Pop(regs_.object(), regs_.address());
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ Ret();
-  }
-
-  __ bind(&need_incremental_pop_scratch);
-  __ Pop(regs_.object(), regs_.address());
-
-  __ bind(&need_incremental);
-
-  // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0    : element value to store
-  //  -- a1    : array literal
-  //  -- a2    : map of array literal
-  //  -- a3    : element index as smi
-  //  -- t0    : array literal index in function as smi
-  // -----------------------------------
-
-  Label element_done;
-  Label double_elements;
-  Label smi_element;
-  Label slow_elements;
-  Label fast_elements;
-
-  __ CheckFastElements(a2, t1, &double_elements);
-  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
-  __ JumpIfSmi(a0, &smi_element);
-  __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
-
-  // Store into the array literal requires a elements transition. Call into
-  // the runtime.
-  __ bind(&slow_elements);
-  // call.
-  __ Push(a1, a3, a0);
-  __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
-  __ Push(t1, t0);
-  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
-  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
-  __ bind(&fast_elements);
-  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
-  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t2, t1, t2);
-  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sw(a0, MemOperand(t2, 0));
-  // Update the write barrier for the array store.
-  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-
-  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
-  // FAST_ELEMENTS, and value is Smi.
-  __ bind(&smi_element);
-  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
-  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t2, t1, t2);
-  __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-
-  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
-  __ bind(&double_elements);
-  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
-  __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
-                                 &slow_elements);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
 }
 
 
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index e0954d8..aa224bc 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -59,25 +59,6 @@
 };
 
 
-class StoreBufferOverflowStub: public CodeStub {
- public:
-  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
-      : save_doubles_(save_fp) { }
-
-  void Generate(MacroAssembler* masm);
-
-  virtual bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -137,7 +118,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -236,7 +217,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -245,70 +226,6 @@
 };
 
 
-class StringHelper : public AllStatic {
- public:
-  // Generate code for copying characters using a simple loop. This should only
-  // be used in places where the number of characters is small and the
-  // additional setup and checking in GenerateCopyCharactersLong adds too much
-  // overhead. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     bool ascii);
-
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharactersLong(MacroAssembler* masm,
-                                         Register dest,
-                                         Register src,
-                                         Register count,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Register scratch4,
-                                         Register scratch5,
-                                         int flags);
-
-
-  // Probe the symbol table for a two character string. If the string is
-  // not found by probing a jump to the label not_found is performed. This jump
-  // does not guarantee that the string is not in the symbol table. If the
-  // string is found the code falls through with the string in register r0.
-  // Contents of both c1 and c2 registers are modified. At the exit c1 is
-  // guaranteed to contain halfword with low and high bytes equal to
-  // initial contents of c1 and c2 respectively.
-  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
-                                                   Register c1,
-                                                   Register c2,
-                                                   Register scratch1,
-                                                   Register scratch2,
-                                                   Register scratch3,
-                                                   Register scratch4,
-                                                   Register scratch5,
-                                                   Label* not_found);
-
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character);
-
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character);
-
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
@@ -407,15 +324,7 @@
       : the_int_(the_int),
         the_heap_number_(the_heap_number),
         scratch_(scratch),
-        sign_(scratch2) {
-    ASSERT(IntRegisterBits::is_valid(the_int_.code()));
-    ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
-    ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
-    ASSERT(SignRegisterBits::is_valid(sign_.code()));
-  }
-
-  bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
+        sign_(scratch2) { }
 
  private:
   Register the_int_;
@@ -427,15 +336,13 @@
   class IntRegisterBits: public BitField<int, 0, 4> {};
   class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
   class ScratchRegisterBits: public BitField<int, 8, 4> {};
-  class SignRegisterBits: public BitField<int, 12, 4> {};
 
   Major MajorKey() { return WriteInt32ToHeapNumber; }
   int MinorKey() {
     // Encode the parameters in a unique 16 bit value.
     return IntRegisterBits::encode(the_int_.code())
            | HeapNumberRegisterBits::encode(the_heap_number_.code())
-           | ScratchRegisterBits::encode(scratch_.code())
-           | SignRegisterBits::encode(sign_.code());
+           | ScratchRegisterBits::encode(scratch_.code());
   }
 
   void Generate(MacroAssembler* masm);
@@ -468,208 +375,6 @@
 };
 
 
-class RecordWriteStub: public CodeStub {
- public:
-  RecordWriteStub(Register object,
-                  Register value,
-                  Register address,
-                  RememberedSetAction remembered_set_action,
-                  SaveFPRegsMode fp_mode)
-      : object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
-        regs_(object,   // An input reg.
-              address,  // An input reg.
-              value) {  // One scratch reg.
-  }
-
-  enum Mode {
-    STORE_BUFFER_ONLY,
-    INCREMENTAL,
-    INCREMENTAL_COMPACTION
-  };
-
-  virtual bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
-  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
-    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
-    masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
-        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
-    ASSERT(Assembler::IsBne(masm->instr_at(pos)));
-  }
-
-  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
-    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
-    masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
-        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
-    ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
-  }
-
-  static Mode GetMode(Code* stub) {
-    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
-    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
-                                                   2 * Assembler::kInstrSize);
-
-    if (Assembler::IsBeq(first_instruction)) {
-      return INCREMENTAL;
-    }
-
-    ASSERT(Assembler::IsBne(first_instruction));
-
-    if (Assembler::IsBeq(second_instruction)) {
-      return INCREMENTAL_COMPACTION;
-    }
-
-    ASSERT(Assembler::IsBne(second_instruction));
-
-    return STORE_BUFFER_ONLY;
-  }
-
-  static void Patch(Code* stub, Mode mode) {
-    MacroAssembler masm(NULL,
-                        stub->instruction_start(),
-                        stub->instruction_size());
-    switch (mode) {
-      case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
-               GetMode(stub) == INCREMENTAL_COMPACTION);
-        PatchBranchIntoNop(&masm, 0);
-        PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
-        break;
-      case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        PatchNopIntoBranch(&masm, 0);
-        break;
-      case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
-        break;
-    }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
-  }
-
- private:
-  // This is a helper class for freeing up 3 scratch registers.  The input is
-  // two registers that must be preserved and one scratch register provided by
-  // the caller.
-  class RegisterAllocation {
-   public:
-    RegisterAllocation(Register object,
-                       Register address,
-                       Register scratch0)
-        : object_(object),
-          address_(address),
-          scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
-      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
-    }
-
-    void Save(MacroAssembler* masm) {
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
-      // We don't have to save scratch0_ because it was given to us as
-      // a scratch register.
-      masm->push(scratch1_);
-    }
-
-    void Restore(MacroAssembler* masm) {
-      masm->pop(scratch1_);
-    }
-
-    // If we have to call into C then we need to save and restore all caller-
-    // saved registers that were not already preserved.  The scratch registers
-    // will be restored by other means so we don't bother pushing them here.
-    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
-      masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
-      if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(FPU);
-        masm->MultiPushFPU(kCallerSavedFPU);
-      }
-    }
-
-    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
-                                           SaveFPRegsMode mode) {
-      if (mode == kSaveFPRegs) {
-        CpuFeatures::Scope scope(FPU);
-        masm->MultiPopFPU(kCallerSavedFPU);
-      }
-      masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
-    }
-
-    inline Register object() { return object_; }
-    inline Register address() { return address_; }
-    inline Register scratch0() { return scratch0_; }
-    inline Register scratch1() { return scratch1_; }
-
-   private:
-    Register object_;
-    Register address_;
-    Register scratch0_;
-    Register scratch1_;
-
-    Register GetRegThatIsNotOneOf(Register r1,
-                                  Register r2,
-                                  Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
-        Register candidate = Register::FromAllocationIndex(i);
-        if (candidate.is(r1)) continue;
-        if (candidate.is(r2)) continue;
-        if (candidate.is(r3)) continue;
-        return candidate;
-      }
-      UNREACHABLE();
-      return no_reg;
-    }
-    friend class RecordWriteStub;
-  };
-
-  enum OnNoNeedToInformIncrementalMarker {
-    kReturnOnNoNeedToInformIncrementalMarker,
-    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
-  };
-
-  void Generate(MacroAssembler* masm);
-  void GenerateIncremental(MacroAssembler* masm, Mode mode);
-  void CheckNeedsToInformIncrementalMarker(
-      MacroAssembler* masm,
-      OnNoNeedToInformIncrementalMarker on_no_need,
-      Mode mode);
-  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
-  void Activate(Code* code) {
-    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-  }
-
-  class ObjectBits: public BitField<int, 0, 5> {};
-  class ValueBits: public BitField<int, 5, 5> {};
-  class AddressBits: public BitField<int, 10, 5> {};
-  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
-  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
-
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
-  Label slow_;
-  RegisterAllocation regs_;
-};
-
-
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM and MIPS.
@@ -856,13 +561,14 @@
 
   void Generate(MacroAssembler* masm);
 
-  static void GenerateNegativeLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register receiver,
-                                     Register properties,
-                                     Handle<String> name,
-                                     Register scratch0);
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register receiver,
+      Register properties,
+      String* name,
+      Register scratch0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -872,8 +578,6 @@
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -886,7 +590,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryLookup; }
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 9acccdc..4400b64 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,423 +30,22 @@
 #if defined(V8_TARGET_ARCH_MIPS)
 
 #include "codegen.h"
-#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm)
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
-  switch (type) {
-    case TranscendentalCache::SIN: return &sin;
-    case TranscendentalCache::COS: return &cos;
-    case TranscendentalCache::TAN: return &tan;
-    case TranscendentalCache::LOG: return &log;
-    default: UNIMPLEMENTED();
-  }
-  return NULL;
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
-  return &sqrt;
-}
-
 // -------------------------------------------------------------------------
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
-  masm->set_has_frame(true);
+  masm->EnterInternalFrame();
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
-  masm->set_has_frame(false);
+  masm->LeaveInternalFrame();
 }
 
-// -------------------------------------------------------------------------
-// Code generators
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : key
-  //  -- a2    : receiver
-  //  -- ra    : return address
-  //  -- a3    : target map, scratch for subsequent call
-  //  -- t0    : scratch (elements)
-  // -----------------------------------
-  // Set transitioned map.
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
-                      HeapObject::kMapOffset,
-                      a3,
-                      t5,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : key
-  //  -- a2    : receiver
-  //  -- ra    : return address
-  //  -- a3    : target map, scratch for subsequent call
-  //  -- t0    : scratch (elements)
-  // -----------------------------------
-  Label loop, entry, convert_hole, gc_required, only_change_map, done;
-  bool fpu_supported = CpuFeatures::IsSupported(FPU);
-
-  Register scratch = t6;
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(t0));
-
-  __ push(ra);
-  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
-  // t0: source FixedArray
-  // t1: number of elements (smi-tagged)
-
-  // Allocate new FixedDoubleArray.
-  __ sll(scratch, t1, 2);
-  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
-  __ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
-  // t2: destination FixedDoubleArray, not tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
-  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
-  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
-  // Update receiver's map.
-
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
-                      HeapObject::kMapOffset,
-                      a3,
-                      t5,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ Addu(a3, t2, Operand(kHeapObjectTag));
-  __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
-  __ RecordWriteField(a2,
-                      JSObject::kElementsOffset,
-                      a3,
-                      t5,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-
-  // Prepare for conversion loop.
-  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
-  __ sll(t2, t1, 2);
-  __ Addu(t2, t2, t3);
-  __ li(t0, Operand(kHoleNanLower32));
-  __ li(t1, Operand(kHoleNanUpper32));
-  // t0: kHoleNanLower32
-  // t1: kHoleNanUpper32
-  // t2: end of destination FixedDoubleArray, not tagged
-  // t3: begin of FixedDoubleArray element fields, not tagged
-
-  if (!fpu_supported) __ Push(a1, a0);
-
-  __ Branch(&entry);
-
-  __ bind(&only_change_map);
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
-                      HeapObject::kMapOffset,
-                      a3,
-                      t5,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Branch(&done);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ pop(ra);
-  __ Branch(fail);
-
-  // Convert and copy elements.
-  __ bind(&loop);
-  __ lw(t5, MemOperand(a3));
-  __ Addu(a3, a3, kIntSize);
-  // t5: current element
-  __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
-
-  // Normal smi, convert to double and store.
-  if (fpu_supported) {
-    CpuFeatures::Scope scope(FPU);
-    __ mtc1(t5, f0);
-    __ cvt_d_w(f0, f0);
-    __ sdc1(f0, MemOperand(t3));
-    __ Addu(t3, t3, kDoubleSize);
-  } else {
-    FloatingPointHelper::ConvertIntToDouble(masm,
-                                            t5,
-                                            FloatingPointHelper::kCoreRegisters,
-                                            f0,
-                                            a0,
-                                            a1,
-                                            t7,
-                                            f0);
-    __ sw(a0, MemOperand(t3));  // mantissa
-    __ sw(a1, MemOperand(t3, kIntSize));  // exponent
-    __ Addu(t3, t3, kDoubleSize);
-  }
-  __ Branch(&entry);
-
-  // Hole found, store the-hole NaN.
-  __ bind(&convert_hole);
-  if (FLAG_debug_code) {
-    // Restore a "smi-untagged" heap object.
-    __ SmiTag(t5);
-    __ Or(t5, t5, Operand(1));
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, "object found in smi-only array", at, Operand(t5));
-  }
-  __ sw(t0, MemOperand(t3));  // mantissa
-  __ sw(t1, MemOperand(t3, kIntSize));  // exponent
-  __ Addu(t3, t3, kDoubleSize);
-
-  __ bind(&entry);
-  __ Branch(&loop, lt, t3, Operand(t2));
-
-  if (!fpu_supported) __ Pop(a1, a0);
-  __ pop(ra);
-  __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : key
-  //  -- a2    : receiver
-  //  -- ra    : return address
-  //  -- a3    : target map, scratch for subsequent call
-  //  -- t0    : scratch (elements)
-  // -----------------------------------
-  Label entry, loop, convert_hole, gc_required, only_change_map;
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(t0));
-
-  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
-
-  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
-  // t0: source FixedArray
-  // t1: number of elements (smi-tagged)
-
-  // Allocate new FixedArray.
-  __ sll(a0, t1, 1);
-  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
-  __ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
-  // t2: destination FixedArray, not tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
-  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
-  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
-
-  // Prepare for conversion loop.
-  __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
-  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
-  __ Addu(t2, t2, Operand(kHeapObjectTag));
-  __ sll(t1, t1, 1);
-  __ Addu(t1, a3, t1);
-  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
-  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses.
-  // a3: begin of destination FixedArray element fields, not tagged
-  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
-  // t1: end of destination FixedArray, not tagged
-  // t2: destination FixedArray
-  // t3: the-hole pointer
-  // t5: heap number map
-  __ Branch(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
-
-  __ Branch(fail);
-
-  __ bind(&loop);
-  __ lw(a1, MemOperand(t0));
-  __ Addu(t0, t0, kDoubleSize);
-  // a1: current element's upper 32 bit
-  // t0: address of next element's upper 32 bit
-  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
-
-  // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
-  // a2: new heap number
-  __ lw(a0, MemOperand(t0, -12));
-  __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
-  __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
-  __ mov(a0, a3);
-  __ sw(a2, MemOperand(a3));
-  __ Addu(a3, a3, kIntSize);
-  __ RecordWrite(t2,
-                 a0,
-                 a2,
-                 kRAHasBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ Branch(&entry);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ sw(t3, MemOperand(a3));
-  __ Addu(a3, a3, kIntSize);
-
-  __ bind(&entry);
-  __ Branch(&loop, lt, a3, Operand(t1));
-
-  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
-  __ RecordWriteField(a2,
-                      JSObject::kElementsOffset,
-                      t2,
-                      t5,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(ra);
-
-  __ bind(&only_change_map);
-  // Update receiver's map.
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
-                      HeapObject::kMapOffset,
-                      a3,
-                      t5,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
-                                       Register string,
-                                       Register index,
-                                       Register result,
-                                       Label* call_runtime) {
-  // Fetch the instance type of the receiver into result register.
-  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ And(at, result, Operand(kIsIndirectStringMask));
-  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ And(at, result, Operand(kSlicedNotConsMask));
-  __ Branch(&cons_string, eq, at, Operand(zero_reg));
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
-  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
-  __ sra(at, result, kSmiTagSize);
-  __ Addu(index, index, at);
-  __ jmp(&indirect_string_loaded);
-
-  // Handle cons strings.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
-  __ LoadRoot(at, Heap::kEmptyStringRootIndex);
-  __ Branch(call_runtime, ne, result, Operand(at));
-  // Get the first of the two strings and load its instance type.
-  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
-  // Distinguish sequential and external strings. Only these two string
-  // representations can reach here (slices and flat cons strings have been
-  // reduced to the underlying sequential or external string).
-  Label external_string, check_encoding;
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ And(at, result, Operand(kStringRepresentationMask));
-  __ Branch(&external_string, ne, at, Operand(zero_reg));
-
-  // Prepare sequential strings
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ Addu(string,
-          string,
-          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-  __ jmp(&check_encoding);
-
-  // Handle external strings.
-  __ bind(&external_string);
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ And(at, result, Operand(kIsIndirectStringMask));
-    __ Assert(eq, "external string expected, but not found",
-        at, Operand(zero_reg));
-  }
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ And(at, result, Operand(kShortExternalStringMask));
-  __ Branch(call_runtime, ne, at, Operand(zero_reg));
-  __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
-
-  Label ascii, done;
-  __ bind(&check_encoding);
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ And(at, result, Operand(kStringEncodingMask));
-  __ Branch(&ascii, ne, at, Operand(zero_reg));
-  // Two-byte string.
-  __ sll(at, index, 1);
-  __ Addu(at, string, at);
-  __ lhu(result, MemOperand(at));
-  __ jmp(&done);
-  __ bind(&ascii);
-  // Ascii string.
-  __ Addu(at, string, index);
-  __ lbu(result, MemOperand(at));
-  __ bind(&done);
-}
-
-#undef __
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index e704c4f..a8de9c8 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -31,6 +31,7 @@
 
 
 #include "ast.h"
+#include "code-stubs-mips.h"
 #include "ic-inl.h"
 
 namespace v8 {
@@ -70,26 +71,26 @@
                               int pos,
                               bool right_here = false);
 
+  // Constants related to patching of inlined load/store.
+  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+    // This is in correlation with the padding in MacroAssembler::Abort.
+    return FLAG_debug_code ? 45 : 20;
+  }
+
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 13;
+
+  static int GetInlinedNamedStoreInstructionsAfterPatch() {
+    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+    // Magic number 5: instruction count after patched map load:
+    //  li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
+    return Isolate::Current()->inlined_write_barrier_size() + 5;
+  }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
 
-class StringCharLoadGenerator : public AllStatic {
- public:
-  // Generates the code for handling different string types and loading the
-  // indexed character into |result|.  We expect |index| as untagged input and
-  // |result| as untagged output.
-  static void Generate(MacroAssembler* masm,
-                       Register string,
-                       Register index,
-                       Register result,
-                       Label* call_runtime);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 7d654f6..d0a7af5 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -302,7 +302,7 @@
           return kRegisterType;
       };
       break;
-    // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+    // 16 bits Immediate type instructions. eg: addi dest, src, imm16.
     case REGIMM:
     case BEQ:
     case BNE:
@@ -337,7 +337,7 @@
     case SWC1:
     case SDC1:
       return kImmediateType;
-    // 26 bits immediate type instructions. e.g.: j imm26.
+    // 26 bits immediate type instructions. eg: j imm26.
     case J:
     case JAL:
       return kJumpType;
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index fd04722..d76ae59 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -39,33 +39,24 @@
 
 #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
 
-enum ArchVariants {
-  kMips32r2,
-  kMips32r1,
-  kLoongson
-};
 
 #ifdef _MIPS_ARCH_MIPS32R2
-  static const ArchVariants kArchVariant = kMips32r2;
-#elif _MIPS_ARCH_LOONGSON
-// The loongson flag refers to the LOONGSON architectures based on MIPS-III,
-// which predates (and is a subset of) the mips32r2 and r1 architectures.
-  static const ArchVariants kArchVariant = kLoongson;
+  #define mips32r2 1
 #else
-  static const ArchVariants kArchVariant = kMips32r1;
+  #define mips32r2 0
 #endif
 
 
 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
 // Use floating-point coprocessor instructions. This flag is raised when
 // -mhard-float is passed to the compiler.
-const bool IsMipsSoftFloatABI = false;
+static const bool IsMipsSoftFloatABI = false;
 #elif(defined(__mips_soft_float) && __mips_soft_float != 0)
 // Not using floating-point coprocessor instructions. This flag is raised when
 // -msoft-float is passed to the compiler.
-const bool IsMipsSoftFloatABI = true;
+static const bool IsMipsSoftFloatABI = true;
 #else
-const bool IsMipsSoftFloatABI = true;
+static const bool IsMipsSoftFloatABI = true;
 #endif
 
 
@@ -83,45 +74,46 @@
 // Registers and FPURegisters.
 
 // Number of general purpose registers.
-const int kNumRegisters = 32;
-const int kInvalidRegister = -1;
+static const int kNumRegisters = 32;
+static const int kInvalidRegister = -1;
 
 // Number of registers with HI, LO, and pc.
-const int kNumSimuRegisters = 35;
+static const int kNumSimuRegisters = 35;
 
 // In the simulator, the PC register is simulated as the 34th register.
-const int kPCRegister = 34;
+static const int kPCRegister = 34;
 
 // Number coprocessor registers.
-const int kNumFPURegisters = 32;
-const int kInvalidFPURegister = -1;
+static const int kNumFPURegisters = 32;
+static const int kInvalidFPURegister = -1;
 
 // FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
-const int kFCSRRegister = 31;
-const int kInvalidFPUControlRegister = -1;
-const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+static const int kFCSRRegister = 31;
+static const int kInvalidFPUControlRegister = -1;
+static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
 
 // FCSR constants.
-const uint32_t kFCSRInexactFlagBit = 2;
-const uint32_t kFCSRUnderflowFlagBit = 3;
-const uint32_t kFCSROverflowFlagBit = 4;
-const uint32_t kFCSRDivideByZeroFlagBit = 5;
-const uint32_t kFCSRInvalidOpFlagBit = 6;
+static const uint32_t kFCSRInexactFlagBit = 2;
+static const uint32_t kFCSRUnderflowFlagBit = 3;
+static const uint32_t kFCSROverflowFlagBit = 4;
+static const uint32_t kFCSRDivideByZeroFlagBit = 5;
+static const uint32_t kFCSRInvalidOpFlagBit = 6;
 
-const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
-const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
-const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
-const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
-const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+static const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+static const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+static const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+static const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+static const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
 
-const uint32_t kFCSRFlagMask =
+static const uint32_t kFCSRFlagMask =
     kFCSRInexactFlagMask |
     kFCSRUnderflowFlagMask |
     kFCSROverflowFlagMask |
     kFCSRDivideByZeroFlagMask |
     kFCSRInvalidOpFlagMask;
 
-const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+static const uint32_t kFCSRExceptionFlagMask =
+    kFCSRFlagMask ^ kFCSRInexactFlagMask;
 
 // Helper functions for converting between register numbers and names.
 class Registers {
@@ -134,7 +126,7 @@
 
   struct RegisterAlias {
     int reg;
-    const char* name;
+    const char *name;
   };
 
   static const int32_t kMaxValue = 0x7fffffff;
@@ -156,7 +148,7 @@
 
   struct RegisterAlias {
     int creg;
-    const char* name;
+    const char *name;
   };
 
  private:
@@ -185,66 +177,67 @@
 //   instructions (see Assembler::stop()).
 // - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
 //   debugger.
-const uint32_t kMaxWatchpointCode = 31;
-const uint32_t kMaxStopCode = 127;
+static const uint32_t kMaxWatchpointCode = 31;
+static const uint32_t kMaxStopCode = 127;
 STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
 
 
 // ----- Fields offset and length.
-const int kOpcodeShift   = 26;
-const int kOpcodeBits    = 6;
-const int kRsShift       = 21;
-const int kRsBits        = 5;
-const int kRtShift       = 16;
-const int kRtBits        = 5;
-const int kRdShift       = 11;
-const int kRdBits        = 5;
-const int kSaShift       = 6;
-const int kSaBits        = 5;
-const int kFunctionShift = 0;
-const int kFunctionBits  = 6;
-const int kLuiShift      = 16;
+static const int kOpcodeShift   = 26;
+static const int kOpcodeBits    = 6;
+static const int kRsShift       = 21;
+static const int kRsBits        = 5;
+static const int kRtShift       = 16;
+static const int kRtBits        = 5;
+static const int kRdShift       = 11;
+static const int kRdBits        = 5;
+static const int kSaShift       = 6;
+static const int kSaBits        = 5;
+static const int kFunctionShift = 0;
+static const int kFunctionBits  = 6;
+static const int kLuiShift      = 16;
 
-const int kImm16Shift = 0;
-const int kImm16Bits  = 16;
-const int kImm26Shift = 0;
-const int kImm26Bits  = 26;
-const int kImm28Shift = 0;
-const int kImm28Bits  = 28;
+static const int kImm16Shift = 0;
+static const int kImm16Bits  = 16;
+static const int kImm26Shift = 0;
+static const int kImm26Bits  = 26;
+static const int kImm28Shift = 0;
+static const int kImm28Bits  = 28;
 
 // In branches and jumps immediate fields point to words, not bytes,
 // and are therefore shifted by 2.
-const int kImmFieldShift = 2;
+static const int kImmFieldShift = 2;
 
-const int kFsShift       = 11;
-const int kFsBits        = 5;
-const int kFtShift       = 16;
-const int kFtBits        = 5;
-const int kFdShift       = 6;
-const int kFdBits        = 5;
-const int kFCccShift     = 8;
-const int kFCccBits      = 3;
-const int kFBccShift     = 18;
-const int kFBccBits      = 3;
-const int kFBtrueShift   = 16;
-const int kFBtrueBits    = 1;
+static const int kFsShift       = 11;
+static const int kFsBits        = 5;
+static const int kFtShift       = 16;
+static const int kFtBits        = 5;
+static const int kFdShift       = 6;
+static const int kFdBits        = 5;
+static const int kFCccShift     = 8;
+static const int kFCccBits      = 3;
+static const int kFBccShift     = 18;
+static const int kFBccBits      = 3;
+static const int kFBtrueShift   = 16;
+static const int kFBtrueBits    = 1;
 
 // ----- Miscellaneous useful masks.
 // Instruction bit masks.
-const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
-const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
-const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
-const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
-const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
-const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
-const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
-const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
-const int  kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
+static const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+static const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
+static const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
+static const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
+static const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
+static const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
+static const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
+static const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
+static const int  kFunctionFieldMask =
+    ((1 << kFunctionBits) - 1) << kFunctionShift;
 // Misc masks.
-const int  kHiMask       =   0xffff << 16;
-const int  kLoMask       =   0xffff;
-const int  kSignMask     =   0x80000000;
-const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+static const int  kHiMask       =   0xffff << 16;
+static const int  kLoMask       =   0xffff;
+static const int  kSignMask     =   0x80000000;
+static const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
 
 // ----- MIPS Opcodes and Function Fields.
 // We use this presentation to stay close to the table representation in
@@ -536,7 +529,7 @@
   kRoundToMinusInf = RM
 };
 
-const uint32_t kFPURoundingModeMask = 3 << 0;
+static const uint32_t kFPURoundingModeMask = 3 << 0;
 
 enum CheckForInexactConversion {
   kCheckForInexactConversion,
@@ -756,7 +749,7 @@
 
   // Say if the instruction should not be used in a branch delay slot.
   bool IsForbiddenInBranchDelay() const;
-  // Say if the instruction 'links'. e.g. jal, bal.
+  // Say if the instruction 'links'. eg: jal, bal.
   bool IsLinkingInstruction() const;
   // Say if the instruction is a break or a trap.
   bool IsTrap() const;
@@ -779,18 +772,18 @@
 // MIPS assembly various constants.
 
 // C/C++ argument slots size.
-const int kCArgSlotCount = 4;
-const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
+static const int kCArgSlotCount = 4;
+static const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize;
 // JS argument slots size.
-const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
 // Assembly builtins argument slots size.
-const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
+static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
 
-const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
 
-const int kDoubleAlignmentBits = 3;
-const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
-const int kDoubleAlignmentMask = kDoubleAlignment - 1;
+static const int kDoubleAlignmentBits = 3;
+static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
 
 
 } }   // namespace v8::internal
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 93ebeda..26e95fb 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,7 +47,7 @@
 namespace internal {
 
 
-void CPU::SetUp() {
+void CPU::Setup() {
   CpuFeatures::Probe();
 }
 
@@ -64,19 +64,15 @@
   }
 
 #if !defined (USE_SIMULATOR)
-#if defined(ANDROID)
-  // Bionic cacheflush can typically run in userland, avoiding kernel call.
-  char *end = reinterpret_cast<char *>(start) + size;
-  cacheflush(
-    reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
-#else  // ANDROID
   int res;
+
   // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
   res = syscall(__NR_cacheflush, start, size, ICACHE);
+
   if (res) {
     V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
   }
-#endif  // ANDROID
+
 #else  // USE_SIMULATOR.
   // Not generating mips instructions for C-code. This means that we are
   // building a mips emulator based target.  We should notify the simulator
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index 83f5f50..e323c50 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -124,59 +124,56 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Store the registers containing live values on the expression stack to
-    // make sure that these are correctly updated during GC. Non object values
-    // are stored as a smi causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
-    if ((object_regs | non_object_regs) != 0) {
-      for (int i = 0; i < kNumJSCallerSaved; i++) {
-        int r = JSCallerSavedCode(i);
-        Register reg = { r };
-        if ((non_object_regs & (1 << r)) != 0) {
-          if (FLAG_debug_code) {
-            __ And(at, reg, 0xc0000000);
-            __ Assert(
-                eq, "Unable to encode value as smi", at, Operand(zero_reg));
-          }
-          __ sll(reg, reg, kSmiTagSize);
+  // Store the registers containing live values on the expression stack to
+  // make sure that these are correctly updated during GC. Non object values
+  // are stored as a smi causing it to be untouched by GC.
+  ASSERT((object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((object_regs & non_object_regs) == 0);
+  if ((object_regs | non_object_regs) != 0) {
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ And(at, reg, 0xc0000000);
+          __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
         }
+        __ sll(reg, reg, kSmiTagSize);
       }
-      __ MultiPush(object_regs | non_object_regs);
     }
+    __ MultiPush(object_regs | non_object_regs);
+  }
 
 #ifdef DEBUG
-    __ RecordComment("// Calling from debug break to runtime - come in - over");
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-    __ PrepareCEntryArgs(0);  // No arguments.
-    __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
+  __ mov(a0, zero_reg);  // No arguments.
+  __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
 
-    CEntryStub ceb(1);
-    __ CallStub(&ceb);
+  CEntryStub ceb(1);
+  __ CallStub(&ceb);
 
-    // Restore the register values from the expression stack.
-    if ((object_regs | non_object_regs) != 0) {
-      __ MultiPop(object_regs | non_object_regs);
-      for (int i = 0; i < kNumJSCallerSaved; i++) {
-        int r = JSCallerSavedCode(i);
-        Register reg = { r };
-        if ((non_object_regs & (1 << r)) != 0) {
-          __ srl(reg, reg, kSmiTagSize);
-        }
-        if (FLAG_debug_code &&
-            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-          __ li(reg, kDebugZapValue);
-        }
+  // Restore the register values from the expression stack.
+  if ((object_regs | non_object_regs) != 0) {
+    __ MultiPop(object_regs | non_object_regs);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ srl(reg, reg, kSmiTagSize);
+      }
+      if (FLAG_debug_code &&
+          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+        __ li(reg, kDebugZapValue);
       }
     }
-
-    // Leave the internal frame.
   }
 
+  __ LeaveInternalFrame();
+
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
   // overwritten by the address of DebugBreakXXX.
@@ -243,6 +240,14 @@
 }
 
 
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+  // Calling convention for construct call (from builtins-mips.cc).
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
+}
+
+
 void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
   // In places other than IC call sites it is expected that v0 is TOS which
   // is an object - this is not generally the case so this should be used with
@@ -251,43 +256,11 @@
 }
 
 
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-mips.cc).
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- a1 : function
+  //  No registers used on entry.
   // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-mips.cc).
-  // ----------- S t a t e -------------
-  //  -- a1 : function
-  //  -- a2 : cache cell for call target
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
-  // Calling convention for CallConstructStub (from code-stubs-mips.cc).
-  // ----------- S t a t e -------------
-  //  -- a0     : number of arguments (not smi)
-  //  -- a1     : constructor function
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
-}
-
-
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
-  // Calling convention for CallConstructStub (from code-stubs-mips.cc).
-  // ----------- S t a t e -------------
-  //  -- a0     : number of arguments (not smi)
-  //  -- a1     : constructor function
-  //  -- a2     : cache cell for call target
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
+  Generate_DebugBreakCallHelper(masm, 0, 0);
 }
 
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 51c2e46..18b6231 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -32,961 +32,65 @@
 #include "full-codegen.h"
 #include "safepoint-table.h"
 
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
 namespace v8 {
 namespace internal {
 
 
+const int Deoptimizer::table_entry_size_ = 10;
+
+
 int Deoptimizer::patch_size() {
-  const int kCallInstructionSizeInWords = 4;
+  const int kCallInstructionSizeInWords = 3;
   return kCallInstructionSizeInWords * Assembler::kInstrSize;
 }
 
 
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
-  HandleScope scope;
-  AssertNoAllocation no_allocation;
-
-  if (!function->IsOptimized()) return;
-
-  // Get the optimized code.
-  Code* code = function->code();
-  Address code_start_address = code->instruction_start();
-
-  // Invalidate the relocation information, as it will become invalid by the
-  // code patching below, and is not needed any more.
-  code->InvalidateRelocation();
-
-  // For each LLazyBailout instruction insert a call to the corresponding
-  // deoptimization entry.
-  DeoptimizationInputData* deopt_data =
-      DeoptimizationInputData::cast(code->deoptimization_data());
-#ifdef DEBUG
-  Address prev_call_address = NULL;
-#endif
-  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
-    if (deopt_data->Pc(i)->value() == -1) continue;
-    Address call_address = code_start_address + deopt_data->Pc(i)->value();
-    Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
-    int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
-                                                      RelocInfo::NONE);
-    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
-    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
-    ASSERT(call_size_in_bytes <= patch_size());
-    CodePatcher patcher(call_address, call_size_in_words);
-    patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
-    ASSERT(prev_call_address == NULL ||
-           call_address >= prev_call_address + patch_size());
-    ASSERT(call_address + patch_size() <= code->instruction_end());
-
-#ifdef DEBUG
-    prev_call_address = call_address;
-#endif
-  }
-
-  Isolate* isolate = code->GetIsolate();
-
-  // Add the deoptimizing code to the list.
-  DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = isolate->deoptimizer_data();
-  node->set_next(data->deoptimizing_code_list_);
-  data->deoptimizing_code_list_ = node;
-
-  // We might be in the middle of incremental marking with compaction.
-  // Tell collector to treat this code object in a special way and
-  // ignore all slots that might have been recorded on it.
-  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
-  // Set the code for the function to non-optimized version.
-  function->ReplaceCode(function->shared()->code());
-
-  if (FLAG_trace_deopt) {
-    PrintF("[forced deoptimization: ");
-    function->PrintName();
-    PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
-#ifdef DEBUG
-    if (FLAG_print_code) {
-      code->PrintLn();
-    }
-#endif
-  }
+  UNIMPLEMENTED();
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
-                                        Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
-  const int kInstrSize = Assembler::kInstrSize;
-  // This structure comes from FullCodeGenerator::EmitStackCheck.
-  // The call of the stack guard check has the following form:
-  // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
-  // beq at, zero_reg, ok
-  // lui t9, <stack guard address> upper
-  // ori t9, <stack guard address> lower
-  // jalr t9
-  // nop
-  // ----- pc_after points here
-
-  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
-  // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
-  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
-  patcher.masm()->addiu(at, zero_reg, 1);
-
-  // Replace the stack check address in the load-immediate (lui/ori pair)
-  // with the entry address of the replacement code.
-  ASSERT(reinterpret_cast<uint32_t>(
-      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
-      reinterpret_cast<uint32_t>(check_code->entry()));
-  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
-                                   replacement_code->entry());
-
-  // We patched the code to the following form:
-  // addiu at, zero_reg, 1
-  // beq at, zero_reg, ok  ;; Not changed
-  // lui t9, <on-stack replacement address> upper
-  // ori t9, <on-stack replacement address> lower
-  // jalr t9  ;; Not changed
-  // nop  ;; Not changed
-  // ----- pc_after points here
-
-  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
+  UNIMPLEMENTED();
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
-                                         Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
-  // Exact opposite of the function above.
-  const int kInstrSize = Assembler::kInstrSize;
-  ASSERT(Assembler::IsAddImmediate(
-      Assembler::instr_at(pc_after - 6 * kInstrSize)));
-  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
-  // Restore the sltu instruction so beq can be taken again.
-  CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
-  if (FLAG_count_based_interrupts) {
-    patcher.masm()->slt(at, a3, zero_reg);
-  } else {
-    patcher.masm()->sltu(at, sp, t0);
-  }
-
-  // Replace the on-stack replacement address in the load-immediate (lui/ori
-  // pair) with the entry address of the normal stack-check code.
-  ASSERT(reinterpret_cast<uint32_t>(
-      Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
-      reinterpret_cast<uint32_t>(replacement_code->entry()));
-  Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
-                                   check_code->entry());
-
-  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, pc_after - 4 * kInstrSize, check_code);
-}
-
-
-static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
-  ByteArray* translations = data->TranslationByteArray();
-  int length = data->DeoptCount();
-  for (int i = 0; i < length; i++) {
-    if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
-      TranslationIterator it(translations,  data->TranslationIndex(i)->value());
-      int value = it.Next();
-      ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
-      // Read the number of frames.
-      value = it.Next();
-      if (value == 1) return i;
-    }
-  }
-  UNREACHABLE();
-  return -1;
+  UNIMPLEMENTED();
 }
 
 
 void Deoptimizer::DoComputeOsrOutputFrame() {
-  DeoptimizationInputData* data = DeoptimizationInputData::cast(
-      optimized_code_->deoptimization_data());
-  unsigned ast_id = data->OsrAstId()->value();
-
-  int bailout_id = LookupBailoutId(data, ast_id);
-  unsigned translation_index = data->TranslationIndex(bailout_id)->value();
-  ByteArray* translations = data->TranslationByteArray();
-
-  TranslationIterator iterator(translations, translation_index);
-  Translation::Opcode opcode =
-      static_cast<Translation::Opcode>(iterator.Next());
-  ASSERT(Translation::BEGIN == opcode);
-  USE(opcode);
-  int count = iterator.Next();
-  iterator.Skip(1);  // Drop JS frame count.
-  ASSERT(count == 1);
-  USE(count);
-
-  opcode = static_cast<Translation::Opcode>(iterator.Next());
-  USE(opcode);
-  ASSERT(Translation::JS_FRAME == opcode);
-  unsigned node_id = iterator.Next();
-  USE(node_id);
-  ASSERT(node_id == ast_id);
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
-  USE(function);
-  ASSERT(function == function_);
-  unsigned height = iterator.Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  USE(height_in_bytes);
-
-  unsigned fixed_size = ComputeFixedSize(function_);
-  unsigned input_frame_size = input_->GetFrameSize();
-  ASSERT(fixed_size + height_in_bytes == input_frame_size);
-
-  unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
-  unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
-  unsigned outgoing_size = outgoing_height * kPointerSize;
-  unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
-  ASSERT(outgoing_size == 0);  // OSR does not happen in the middle of a call.
-
-  if (FLAG_trace_osr) {
-    PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
-           reinterpret_cast<intptr_t>(function_));
-    function_->PrintName();
-    PrintF(" => node=%u, frame=%d->%d]\n",
-           ast_id,
-           input_frame_size,
-           output_frame_size);
-  }
-
-  // There's only one output frame in the OSR case.
-  output_count_ = 1;
-  output_ = new FrameDescription*[1];
-  output_[0] = new(output_frame_size) FrameDescription(
-      output_frame_size, function_);
-  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
-
-  // Clear the incoming parameters in the optimized frame to avoid
-  // confusing the garbage collector.
-  unsigned output_offset = output_frame_size - kPointerSize;
-  int parameter_count = function_->shared()->formal_parameter_count() + 1;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_[0]->SetFrameSlot(output_offset, 0);
-    output_offset -= kPointerSize;
-  }
-
-  // Translate the incoming parameters. This may overwrite some of the
-  // incoming argument slots we've just cleared.
-  int input_offset = input_frame_size - kPointerSize;
-  bool ok = true;
-  int limit = input_offset - (parameter_count * kPointerSize);
-  while (ok && input_offset > limit) {
-    ok = DoOsrTranslateCommand(&iterator, &input_offset);
-  }
-
-  // There are no translation commands for the caller's pc and fp, the
-  // context, and the function.  Set them up explicitly.
-  for (int i =  StandardFrameConstants::kCallerPCOffset;
-       ok && i >=  StandardFrameConstants::kMarkerOffset;
-       i -= kPointerSize) {
-    uint32_t input_value = input_->GetFrameSlot(input_offset);
-    if (FLAG_trace_osr) {
-      const char* name = "UNKNOWN";
-      switch (i) {
-        case StandardFrameConstants::kCallerPCOffset:
-          name = "caller's pc";
-          break;
-        case StandardFrameConstants::kCallerFPOffset:
-          name = "fp";
-          break;
-        case StandardFrameConstants::kContextOffset:
-          name = "context";
-          break;
-        case StandardFrameConstants::kMarkerOffset:
-          name = "function";
-          break;
-      }
-      PrintF("    [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
-             output_offset,
-             input_value,
-             input_offset,
-             name);
-    }
-
-    output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
-    input_offset -= kPointerSize;
-    output_offset -= kPointerSize;
-  }
-
-  // Translate the rest of the frame.
-  while (ok && input_offset >= 0) {
-    ok = DoOsrTranslateCommand(&iterator, &input_offset);
-  }
-
-  // If translation of any command failed, continue using the input frame.
-  if (!ok) {
-    delete output_[0];
-    output_[0] = input_;
-    output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
-  } else {
-    // Set up the frame pointer and the context pointer.
-    output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
-    output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
-
-    unsigned pc_offset = data->OsrPcOffset()->value();
-    uint32_t pc = reinterpret_cast<uint32_t>(
-        optimized_code_->entry() + pc_offset);
-    output_[0]->SetPc(pc);
-  }
-  Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
-  output_[0]->SetContinuation(
-      reinterpret_cast<uint32_t>(continuation->entry()));
-
-  if (FLAG_trace_osr) {
-    PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
-           ok ? "finished" : "aborted",
-           reinterpret_cast<intptr_t>(function));
-    function->PrintName();
-    PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
-  }
+  UNIMPLEMENTED();
 }
 
 
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
-                                                 int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
-  // Arguments adaptor can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  uint32_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // A marker value is used in place of the context.
-  output_offset -= kPointerSize;
-  intptr_t context = reinterpret_cast<intptr_t>(
-      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  output_frame->SetFrameSlot(output_offset, context);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
-           top_address + output_offset, output_offset, context);
-  }
-
-  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(function);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* adaptor_trampoline =
-      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
-  uint32_t pc = reinterpret_cast<uint32_t>(
-      adaptor_trampoline->instruction_start() +
-      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  UNIMPLEMENTED();
 }
 
 
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
-                                              int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = 7 * kPointerSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
-  // Construct stub can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  uint32_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // The context can be gotten from the previous frame.
-  output_offset -= kPointerSize;
-  value = output_[frame_index - 1]->GetContext();
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // A marker value is used in place of the function.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  // Constructor function being invoked by the stub.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(function);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // The newly allocated object was passed as receiver in the artificial
-  // constructor stub environment created by HEnvironment::CopyForInlining().
-  output_offset -= kPointerSize;
-  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
-  uint32_t pc = reinterpret_cast<uint32_t>(
-      construct_stub->instruction_start() +
-      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
-}
-
-
-// This code is very similar to ia32/arm code, but relies on register names
-// (fp, sp) and how the frame is laid out.
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
-                                   int frame_index) {
-  // Read the ast node id, function, and frame height for this output frame.
-  int node_id = iterator->Next();
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating ");
-    function->PrintName();
-    PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
-  }
-
-  // The 'fixed' part of the frame consists of the incoming parameters and
-  // the part described by JavaScriptFrameConstants.
-  unsigned fixed_frame_size = ComputeFixedSize(function);
-  unsigned input_frame_size = input_->GetFrameSize();
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
-
-  bool is_bottommost = (0 == frame_index);
-  bool is_topmost = (output_count_ - 1 == frame_index);
-  ASSERT(frame_index >= 0 && frame_index < output_count_);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address for the bottommost output frame can be computed from
-  // the input frame pointer and the output frame's height.  For all
-  // subsequent output frames, it can be computed from the previous one's
-  // top address and the current frame's size.
-  uint32_t top_address;
-  if (is_bottommost) {
-    // 2 = context and function in the frame.
-    top_address =
-        input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
-  } else {
-    top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  }
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = function->shared()->formal_parameter_count() + 1;
-  unsigned output_offset = output_frame_size;
-  unsigned input_offset = input_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-  input_offset -= (parameter_count * kPointerSize);
-
-  // There are no translation commands for the caller's pc and fp, the
-  // context, and the function.  Synthesize their values and set them up
-  // explicitly.
-  //
-  // The caller's pc for the bottommost output frame is the same as in the
-  // input frame.  For all subsequent output frames, it can be read from the
-  // previous one.  This frame's pc can be computed from the non-optimized
-  // function code and AST id of the bailout.
-  output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
-  intptr_t value;
-  if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
-  } else {
-    value = output_[frame_index - 1]->GetPc();
-  }
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // The caller's frame pointer for the bottommost output frame is the same
-  // as in the input frame.  For all subsequent output frames, it can be
-  // read from the previous one.  Also compute and set this frame's frame
-  // pointer.
-  output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
-  if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
-  } else {
-    value = output_[frame_index - 1]->GetFp();
-  }
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
-  output_frame->SetFp(fp_value);
-  if (is_topmost) {
-    output_frame->SetRegister(fp.code(), fp_value);
-  }
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // For the bottommost output frame the context can be gotten from the input
-  // frame. For all subsequent output frames it can be gotten from the function
-  // so long as we don't inline functions that need local contexts.
-  output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
-  if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
-  } else {
-    value = reinterpret_cast<intptr_t>(function->context());
-  }
-  output_frame->SetFrameSlot(output_offset, value);
-  output_frame->SetContext(value);
-  if (is_topmost) output_frame->SetRegister(cp.code(), value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // The function was mentioned explicitly in the BEGIN_FRAME.
-  output_offset -= kPointerSize;
-  input_offset -= kPointerSize;
-  value = reinterpret_cast<uint32_t>(function);
-  // The function for the bottommost output frame should also agree with the
-  // input frame.
-  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Translate the rest of the frame.
-  for (unsigned i = 0; i < height; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-  ASSERT(0 == output_offset);
-
-  // Compute this frame's PC, state, and continuation.
-  Code* non_optimized_code = function->shared()->code();
-  FixedArray* raw_data = non_optimized_code->deoptimization_data();
-  DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
-  Address start = non_optimized_code->instruction_start();
-  unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
-  unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
-  uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
-  output_frame->SetPc(pc_value);
-
-  FullCodeGenerator::State state =
-      FullCodeGenerator::StateField::decode(pc_and_state);
-  output_frame->SetState(Smi::FromInt(state));
-
-
-  // Set the continuation for the topmost frame.
-  if (is_topmost && bailout_type_ != DEBUGGER) {
-    Builtins* builtins = isolate_->builtins();
-    Code* continuation = (bailout_type_ == EAGER)
-        ? builtins->builtin(Builtins::kNotifyDeoptimized)
-        : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
-    output_frame->SetContinuation(
-        reinterpret_cast<uint32_t>(continuation->entry()));
-  }
-}
-
 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
-  // Set the register values. The values are not important as there are no
-  // callee saved registers in JavaScript frames, so all registers are
-  // spilled. Registers fp and sp are set to the correct values though.
-
-  for (int i = 0; i < Register::kNumRegisters; i++) {
-    input_->SetRegister(i, i * 4);
-  }
-  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
-  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
-    input_->SetDoubleRegister(i, 0.0);
-  }
-
-  // Fill the frame content from the actual data on the frame.
-  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
-    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
-  }
+  UNIMPLEMENTED();
 }
 
 
-#define __ masm()->
-
-
-// This code tries to be close to ia32 code so that any changes can be
-// easily ported.
 void Deoptimizer::EntryGenerator::Generate() {
-  GeneratePrologue();
-
-  Isolate* isolate = masm()->isolate();
-
-  CpuFeatures::Scope scope(FPU);
-  // Unlike on ARM we don't save all the registers, just the useful ones.
-  // For the rest, there are gaps on the stack, so the offsets remain the same.
-  const int kNumberOfRegisters = Register::kNumRegisters;
-
-  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
-  RegList saved_regs = restored_regs | sp.bit() | ra.bit();
-
-  const int kDoubleRegsSize =
-      kDoubleSize * FPURegister::kNumAllocatableRegisters;
-
-  // Save all FPU registers before messing with them.
-  __ Subu(sp, sp, Operand(kDoubleRegsSize));
-  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
-    FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
-    int offset = i * kDoubleSize;
-    __ sdc1(fpu_reg, MemOperand(sp, offset));
-  }
-
-  // Push saved_regs (needed to populate FrameDescription::registers_).
-  // Leave gaps for other registers.
-  __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
-  for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
-    if ((saved_regs & (1 << i)) != 0) {
-      __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
-    }
-  }
-
-  const int kSavedRegistersAreaSize =
-      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
-
-  // Get the bailout id from the stack.
-  __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
-
-  // Get the address of the location in the code object if possible (a3) (return
-  // address for lazy deoptimization) and compute the fp-to-sp delta in
-  // register t0.
-  if (type() == EAGER) {
-    __ mov(a3, zero_reg);
-    // Correct one word for bailout id.
-    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else if (type() == OSR) {
-    __ mov(a3, ra);
-    // Correct one word for bailout id.
-    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else {
-    __ mov(a3, ra);
-    // Correct two words for bailout id and return address.
-    __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
-  }
-
-  __ Subu(t0, fp, t0);
-
-  // Allocate a new deoptimizer object.
-  // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
-  __ PrepareCallCFunction(6, t1);
-  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a1, Operand(type()));  // bailout type,
-  // a2: bailout id already loaded.
-  // a3: code address or 0 already loaded.
-  __ sw(t0, CFunctionArgumentOperand(5));  // Fp-to-sp delta.
-  __ li(t1, Operand(ExternalReference::isolate_address()));
-  __ sw(t1, CFunctionArgumentOperand(6));  // Isolate.
-  // Call Deoptimizer::New().
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
-  }
-
-  // Preserve "deoptimizer" object in register v0 and get the input
-  // frame descriptor pointer to a1 (deoptimizer->input_);
-  // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
-  __ mov(a0, v0);
-  __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
-
-  // Copy core registers into FrameDescription::registers_[kNumRegisters].
-  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
-  for (int i = 0; i < kNumberOfRegisters; i++) {
-    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
-    if ((saved_regs & (1 << i)) != 0) {
-      __ lw(a2, MemOperand(sp, i * kPointerSize));
-      __ sw(a2, MemOperand(a1, offset));
-    } else if (FLAG_debug_code) {
-      __ li(a2, kDebugZapValue);
-      __ sw(a2, MemOperand(a1, offset));
-    }
-  }
-
-  // Copy FPU registers to
-  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
-  int double_regs_offset = FrameDescription::double_registers_offset();
-  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
-    int dst_offset = i * kDoubleSize + double_regs_offset;
-    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
-    __ ldc1(f0, MemOperand(sp, src_offset));
-    __ sdc1(f0, MemOperand(a1, dst_offset));
-  }
-
-  // Remove the bailout id, eventually return address, and the saved registers
-  // from the stack.
-  if (type() == EAGER || type() == OSR) {
-    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
-  } else {
-    __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
-  }
-
-  // Compute a pointer to the unwinding limit in register a2; that is
-  // the first stack slot not part of the input frame.
-  __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
-  __ Addu(a2, a2, sp);
-
-  // Unwind the stack down to - but not including - the unwinding
-  // limit and copy the contents of the activation frame to the input
-  // frame description.
-  __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
-  Label pop_loop;
-  __ bind(&pop_loop);
-  __ pop(t0);
-  __ sw(t0, MemOperand(a3, 0));
-  __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
-  __ addiu(a3, a3, sizeof(uint32_t));  // In delay slot.
-
-  // Compute the output frame in the deoptimizer.
-  __ push(a0);  // Preserve deoptimizer object across call.
-  // a0: deoptimizer object; a1: scratch.
-  __ PrepareCallCFunction(1, a1);
-  // Call Deoptimizer::ComputeOutputFrames().
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(
-        ExternalReference::compute_output_frames_function(isolate), 1);
-  }
-  __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
-
-  // Replace the current (input) frame with the output frames.
-  Label outer_push_loop, inner_push_loop;
-  // Outer loop state: a0 = current "FrameDescription** output_",
-  // a1 = one past the last FrameDescription**.
-  __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
-  __ lw(a0, MemOperand(a0, Deoptimizer::output_offset()));  // a0 is output_.
-  __ sll(a1, a1, kPointerSizeLog2);  // Count to offset.
-  __ addu(a1, a0, a1);  // a1 = one past the last FrameDescription**.
-  __ bind(&outer_push_loop);
-  // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
-  __ lw(a2, MemOperand(a0, 0));  // output_[ix]
-  __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
-  __ bind(&inner_push_loop);
-  __ Subu(a3, a3, Operand(sizeof(uint32_t)));
-  __ Addu(t2, a2, Operand(a3));
-  __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
-  __ push(t3);
-  __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
-
-  __ Addu(a0, a0, Operand(kPointerSize));
-  __ Branch(&outer_push_loop, lt, a0, Operand(a1));
-
-
-  // Push state, pc, and continuation from the last output frame.
-  if (type() != OSR) {
-    __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
-    __ push(t2);
-  }
-
-  __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
-  __ push(t2);
-  __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
-  __ push(t2);
-
-
-  // Technically restoring 'at' should work unless zero_reg is also restored
-  // but it's safer to check for this.
-  ASSERT(!(at.bit() & restored_regs));
-  // Restore the registers from the last output frame.
-  __ mov(at, a2);
-  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
-    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
-    if ((restored_regs & (1 << i)) != 0) {
-      __ lw(ToRegister(i), MemOperand(at, offset));
-    }
-  }
-
-  __ InitializeRootRegister();
-
-  __ pop(at);  // Get continuation, leave pc on stack.
-  __ pop(ra);
-  __ Jump(at);
-  __ stop("Unreachable.");
+  UNIMPLEMENTED();
 }
 
 
-// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
-
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
-  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
-
-  // Create a sequence of deoptimization entries. Note that any
-  // registers may be still live.
-  Label table_start;
-  __ bind(&table_start);
-  for (int i = 0; i < count(); i++) {
-    Label start;
-    __ bind(&start);
-    if (type() != EAGER) {
-      // Emulate ia32 like call by pushing return address to stack.
-      __ addiu(sp, sp, -2 * kPointerSize);
-      __ sw(ra, MemOperand(sp, 1 * kPointerSize));
-    } else {
-      __ addiu(sp, sp, -1 * kPointerSize);
-    }
-    // Jump over the remaining deopt entries (including this one).
-    // This code is always reached by calling Jump, which puts the target (label
-    // start) into t9.
-    const int remaining_entries = (count() - i) * table_entry_size_;
-    __ Addu(t9, t9, remaining_entries);
-    // 'at' was clobbered so we can only load the current entry value here.
-    __ li(at, i);
-    __ jr(t9);  // Expose delay slot.
-    __ sw(at, MemOperand(sp, 0 * kPointerSize));  // In the delay slot.
-
-    // Pad the rest of the code.
-    while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
-      __ nop();
-    }
-
-    ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
-  }
-
-  ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
-      count() * table_entry_size_);
+  UNIMPLEMENTED();
 }
 
-#undef __
-
 
 } }  // namespace v8::internal
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index 1d40c2c..fde0c58 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -515,7 +515,7 @@
               Format(instr, "cvt.w.d 'fd, 'fs");
               break;
             case CVT_L_D: {
-              if (kArchVariant == kMips32r2) {
+              if (mips32r2) {
                 Format(instr, "cvt.l.d 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -526,7 +526,7 @@
               Format(instr, "trunc.w.d 'fd, 'fs");
               break;
             case TRUNC_L_D: {
-              if (kArchVariant == kMips32r2) {
+              if (mips32r2) {
                 Format(instr, "trunc.l.d 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -592,7 +592,7 @@
         case L:
           switch (instr->FunctionFieldRaw()) {
             case CVT_D_L: {
-              if (kArchVariant == kMips32r2) {
+              if (mips32r2) {
                 Format(instr, "cvt.d.l 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -600,7 +600,7 @@
               break;
             }
             case CVT_S_L: {
-              if (kArchVariant == kMips32r2) {
+              if (mips32r2) {
                 Format(instr, "cvt.s.l 'fd, 'fs");
               } else {
                 Unknown(instr);
@@ -636,7 +636,7 @@
           if (instr->RsValue() == 0) {
             Format(instr, "srl     'rd, 'rt, 'sa");
           } else {
-            if (kArchVariant == kMips32r2) {
+            if (mips32r2) {
               Format(instr, "rotr    'rd, 'rt, 'sa");
             } else {
               Unknown(instr);
@@ -653,7 +653,7 @@
           if (instr->SaValue() == 0) {
             Format(instr, "srlv    'rd, 'rt, 'rs");
           } else {
-            if (kArchVariant == kMips32r2) {
+            if (mips32r2) {
               Format(instr, "rotrv   'rd, 'rt, 'rs");
             } else {
               Unknown(instr);
@@ -770,7 +770,7 @@
     case SPECIAL3:
       switch (instr->FunctionFieldRaw()) {
         case INS: {
-          if (kArchVariant == kMips32r2) {
+          if (mips32r2) {
             Format(instr, "ins     'rt, 'rs, 'sa, 'ss2");
           } else {
             Unknown(instr);
@@ -778,7 +778,7 @@
           break;
         }
         case EXT: {
-          if (kArchVariant == kMips32r2) {
+          if (mips32r2) {
             Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
           } else {
             Unknown(instr);
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 2ed358a..2c83893 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -36,9 +36,9 @@
 // Register lists.
 // Note that the bit values must match those used in actual instruction
 // encoding.
-const int kNumRegs = 32;
+static const int kNumRegs = 32;
 
-const RegList kJSCallerSaved =
+static const RegList kJSCallerSaved =
   1 << 2  |  // v0
   1 << 3  |  // v1
   1 << 4  |  // a0
@@ -54,7 +54,7 @@
   1 << 14 |  // t6
   1 << 15;   // t7
 
-const int kNumJSCallerSaved = 14;
+static const int kNumJSCallerSaved = 14;
 
 
 // Return the code of the n-th caller-saved register available to JavaScript
@@ -63,7 +63,7 @@
 
 
 // Callee-saved registers preserved when switching from C to JavaScript.
-const RegList kCalleeSaved =
+static const RegList kCalleeSaved =
   1 << 16 |  // s0
   1 << 17 |  // s1
   1 << 18 |  // s2
@@ -74,9 +74,9 @@
   1 << 23 |  // s7 (cp in Javascript code)
   1 << 30;   // fp/s8
 
-const int kNumCalleeSaved = 9;
+static const int kNumCalleeSaved = 9;
 
-const RegList kCalleeSavedFPU =
+static const RegList kCalleeSavedFPU =
   1 << 20 |  // f20
   1 << 22 |  // f22
   1 << 24 |  // f24
@@ -84,37 +84,23 @@
   1 << 28 |  // f28
   1 << 30;   // f30
 
-const int kNumCalleeSavedFPU = 6;
-
-const RegList kCallerSavedFPU =
-  1 << 0  |  // f0
-  1 << 2  |  // f2
-  1 << 4  |  // f4
-  1 << 6  |  // f6
-  1 << 8  |  // f8
-  1 << 10 |  // f10
-  1 << 12 |  // f12
-  1 << 14 |  // f14
-  1 << 16 |  // f16
-  1 << 18;   // f18
-
-
+static const int kNumCalleeSavedFPU = 6;
 // Number of registers for which space is reserved in safepoints. Must be a
 // multiple of 8.
-const int kNumSafepointRegisters = 24;
+static const int kNumSafepointRegisters = 24;
 
 // Define the list of registers actually saved at safepoints.
 // Note that the number of saved registers may be smaller than the reserved
 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters =
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
     kNumJSCallerSaved + kNumCalleeSaved;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
-const int kUndefIndex = -1;
+static const int kUndefIndex = -1;
 // Map with indexes on stack that corresponds to codes of saved registers.
-const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+static const int kSafepointRegisterStackIndexMap[kNumRegs] = {
   kUndefIndex,  // zero_reg
   kUndefIndex,  // at
   0,   // v0
@@ -154,13 +140,13 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset     = 0 * kPointerSize;
-  static const int kCodeOffset     = 1 * kPointerSize;
-  static const int kStateOffset    = 2 * kPointerSize;
-  static const int kContextOffset  = 3 * kPointerSize;
-  static const int kFPOffset       = 4 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kStateOffset   = 1 * kPointerSize;
+  static const int kContextOffset = 2 * kPointerSize;
+  static const int kFPOffset      = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
-  static const int kSize = kFPOffset + kPointerSize;
+  static const int kSize = kPCOffset + kPointerSize;
 };
 
 
@@ -195,9 +181,6 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -233,8 +216,6 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 657bee8..9a210c4 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,13 +42,11 @@
 #include "compiler.h"
 #include "debug.h"
 #include "full-codegen.h"
-#include "isolate-inl.h"
 #include "parser.h"
 #include "scopes.h"
 #include "stub-cache.h"
 
 #include "mips/code-stubs-mips.h"
-#include "mips/macro-assembler-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -56,14 +54,17 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
-// (raw 16 bit immediate value is used) is the delta from the pc to the first
+// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
+// bit immediate value is used) is the delta from the pc to the first
 // instruction of the patchable code.
-// The marker instruction is effectively a NOP (dest is zero_reg) and will
-// never be emitted by normal code.
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -102,7 +103,7 @@
     if (patch_site_.is_bound()) {
       int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
       Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
-      __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
+      __ andi(at, reg, delta_to_patch_site % kImm16Mask);
 #ifdef DEBUG
       info_emitted_ = true;
 #endif
@@ -120,20 +121,13 @@
 };
 
 
-// TODO(jkummerow): Obsolete as soon as x64 is updated. Remove.
-int FullCodeGenerator::self_optimization_header_size() {
-  UNREACHABLE();
-  return 10 * Instruction::kInstrSize;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right.  The actual
 // argument count matches the formal parameter count expected by the
 // function.
 //
 // The live registers are:
-//   o a1: the JS function object being called (i.e. ourselves)
+//   o a1: the JS function object being called (ie, ourselves)
 //   o cp: our context
 //   o fp: our caller's frame pointer
 //   o sp: stack pointer
@@ -141,12 +135,10 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-mips.h for its layout.
-void FullCodeGenerator::Generate() {
-  CompilationInfo* info = info_;
-  handler_table_ =
-      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
-  profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
-      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  scope_ = info->scope();
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
@@ -161,7 +153,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). t1 is zero for method calls and non-zero for
   // function calls.
-  if (!info->is_classic_mode() || info->is_native()) {
+  if (info->is_strict_mode() || info->is_native()) {
     Label ok;
     __ Branch(&ok, eq, t1, Operand(zero_reg));
     int receiver_offset = info->scope()->num_parameters() * kPointerSize;
@@ -170,11 +162,6 @@
     __ bind(&ok);
   }
 
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done below).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(ra, fp, cp, a1);
@@ -220,12 +207,14 @@
         // Load parameter from stack.
         __ lw(a0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        MemOperand target = ContextOperand(cp, var->index());
-        __ sw(a0, target);
-
-        // Update the write barrier.
-        __ RecordWriteContextSlot(
-            cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+        __ li(a1, Operand(Context::SlotOffset(var->index())));
+        __ addu(a2, cp, a1);
+        __ sw(a0, MemOperand(a2, 0));
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have to use two more registers to avoid
+        // clobbering cp.
+        __ mov(a2, cp);
+        __ RecordWrite(a2, a1, a3);
       }
     }
   }
@@ -253,7 +242,7 @@
     // The stub will rewrite receiever and parameter count if the previous
     // stack frame was an arguments adapter frame.
     ArgumentsAccessStub::Type type;
-    if (!is_classic_mode()) {
+    if (is_strict_mode()) {
       type = ArgumentsAccessStub::NEW_STRICT;
     } else if (function()->has_duplicate_parameters()) {
       type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
@@ -282,11 +271,8 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -323,74 +309,18 @@
 }
 
 
-void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
-  __ li(a2, Operand(profiling_counter_));
-  __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-  __ Subu(a3, a3, Operand(Smi::FromInt(delta)));
-  __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-void FullCodeGenerator::EmitProfilingCounterReset() {
-  int reset_value = FLAG_interrupt_budget;
-  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
-    // Self-optimization is a one-off thing: if it fails, don't try again.
-    reset_value = Smi::kMaxValue;
-  }
-  if (isolate()->IsDebuggerActive()) {
-    // Detect debug break requests as soon as possible.
-    reset_value = 10;
-  }
-  __ li(a2, Operand(profiling_counter_));
-  __ li(a3, Operand(Smi::FromInt(reset_value)));
-  __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
-}
-
-
-static const int kMaxBackEdgeWeight = 127;
-static const int kBackEdgeDistanceDivisor = 142;
-
-
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
-                                       Label* back_edge_target) {
-  // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
-  // to make sure it is constant. Branch may emit a skip-or-jump sequence
-  // instead of the normal Branch. It seems that the "skip" part of that
-  // sequence is about as long as this Branch would be so it is safe to ignore
-  // that.
-  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
-  if (FLAG_count_based_interrupts) {
-    int weight = 1;
-    if (FLAG_weighted_back_edges) {
-      ASSERT(back_edge_target->is_bound());
-      int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-      weight = Min(kMaxBackEdgeWeight,
-                   Max(1, distance / kBackEdgeDistanceDivisor));
-    }
-    EmitProfilingCounterDecrement(weight);
-    __ slt(at, a3, zero_reg);
-    __ beq(at, zero_reg, &ok);
-    // CallStub will emit a li t9 first, so it is safe to use the delay slot.
-    InterruptStub stub;
-    __ CallStub(&stub);
-  } else {
-    __ LoadRoot(t0, Heap::kStackLimitRootIndex);
-    __ sltu(at, sp, t0);
-    __ beq(at, zero_reg, &ok);
-    // CallStub will emit a li t9 first, so it is safe to use the delay slot.
-    StackCheckStub stub;
-    __ CallStub(&stub);
-  }
+  __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(t0));
+  StackCheckStub stub;
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
   RecordStackCheck(stmt->OsrEntryId());
-  if (FLAG_count_based_interrupts) {
-    EmitProfilingCounterReset();
-  }
 
+  __ CallStub(&stub);
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -412,32 +342,6 @@
       __ push(v0);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
-    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
-      // Pretend that the exit is a backwards jump to the entry.
-      int weight = 1;
-      if (info_->ShouldSelfOptimize()) {
-        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-      } else if (FLAG_weighted_back_edges) {
-        int distance = masm_->pc_offset();
-        weight = Min(kMaxBackEdgeWeight,
-                     Max(1, distance / kBackEdgeDistanceDivisor));
-      }
-      EmitProfilingCounterDecrement(weight);
-      Label ok;
-      __ Branch(&ok, ge, a3, Operand(zero_reg));
-      __ push(v0);
-      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
-        __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-        __ push(a2);
-        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
-      } else {
-        InterruptStub stub;
-        __ CallStub(&stub);
-      }
-      __ pop(v0);
-      EmitProfilingCounterReset();
-      __ bind(&ok);
-    }
 
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
@@ -489,7 +393,7 @@
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -512,7 +416,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -547,7 +451,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -606,7 +510,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -673,7 +577,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -766,17 +670,15 @@
   __ sw(src, location);
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
-    __ RecordWriteContextSlot(scratch0,
-                              location.offset(),
-                              src,
-                              scratch1,
-                              kRAHasBeenSaved,
-                              kDontSaveFPRegs);
+    __ RecordWrite(scratch0,
+                   Operand(Context::SlotOffset(var->index())),
+                   scratch1,
+                   src);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -787,7 +689,13 @@
 
   Label skip;
   if (should_normalize) __ Branch(&skip);
-  PrepareForBailout(expr, TOS_REG);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
   if (should_normalize) {
     __ LoadRoot(t0, Heap::kTrueValueRootIndex);
     Split(eq, a0, Operand(t0), if_true, if_false, NULL);
@@ -797,17 +705,16 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function,
+                                        int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      ++(*global_count);
       break;
 
     case Variable::PARAMETER:
@@ -816,7 +723,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ sw(result_register(), StackOperand(variable));
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
           Comment cmnt(masm_, "[ Declaration");
           __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
           __ sw(t0, StackOperand(variable));
@@ -843,16 +750,10 @@
         __ sw(result_register(), ContextOperand(cp, variable->index()));
         int offset = Context::SlotOffset(variable->index());
         // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(cp,
-                                  offset,
-                                  result_register(),
-                                  a2,
-                                  kRAHasBeenSaved,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
+        __ mov(a1, cp);
+        __ RecordWrite(a1, Operand(offset), a2, result_register());
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
           Comment cmnt(masm_, "[ Declaration");
           __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
           __ sw(at, ContextOperand(cp, variable->index()));
@@ -864,13 +765,11 @@
     case Variable::LOOKUP: {
       Comment cmnt(masm_, "[ Declaration");
       __ li(a2, Operand(variable->name()));
-      // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
-      PropertyAttributes attr = (mode == CONST || mode == CONST_HARMONY)
-        ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
       __ li(a1, Operand(Smi::FromInt(attr)));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -880,7 +779,7 @@
         __ Push(cp, a2, a1);
         // Push initial value for function declaration.
         VisitForStackValue(function);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
           __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
           __ Push(cp, a2, a1, a0);
       } else {
@@ -895,6 +794,9 @@
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
@@ -957,7 +859,7 @@
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
-    CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+    __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
 
     __ Branch(&next_test, ne, v0, Operand(zero_reg));
@@ -1007,8 +909,7 @@
   Register null_value = t1;
   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   __ Branch(&exit, eq, a0, Operand(null_value));
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-  __ mov(a0, v0);
+
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(a0, &convert);
@@ -1021,17 +922,48 @@
   __ bind(&done_convert);
   __ push(a0);
 
-  // Check for proxies.
-  Label call_runtime;
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ GetObjectType(a0, a1, a1);
-  __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  Label next, call_runtime;
+  // Preload a couple of values used in the loop.
+  Register  empty_fixed_array_value = t2;
+  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = t3;
+  __ LoadRoot(empty_descriptor_array_value,
+              Heap::kEmptyDescriptorArrayRootIndex);
+  __ mov(a1, a0);
+  __ bind(&next);
+
+  // Check that there are no elements.  Register a1 contains the
+  // current JS object we've reached through the prototype chain.
+  __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ Branch(&call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in a2 for the subsequent
+  // prototype load.
+  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (a3).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+  __ JumpIfSmi(a3, &call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  __ Branch(&check_prototype, eq, a1, Operand(a0));
+  __ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ Branch(&call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+  // Load the prototype from the map and loop if non-null.
+  __ bind(&check_prototype);
+  __ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+  __ Branch(&next, ne, a1, Operand(null_value));
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -1059,7 +991,7 @@
   __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset));
   __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Set up the four remaining stack slots.
+  // Setup the four remaining stack slots.
   __ push(v0);  // Map.
   __ lw(a1, FieldMemOperand(a2, FixedArray::kLengthOffset));
   __ li(a0, Operand(Smi::FromInt(0)));
@@ -1068,32 +1000,14 @@
   __ jmp(&loop);
 
   // We got a fixed array in register v0. Iterate through that.
-  Label non_proxy;
   __ bind(&fixed_array);
-
-  Handle<JSGlobalPropertyCell> cell =
-      isolate()->factory()->NewJSGlobalPropertyCell(
-          Handle<Object>(
-              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
-  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
-  __ LoadHeapObject(a1, cell);
-  __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
-  __ sw(a2, FieldMemOperand(a1, JSGlobalPropertyCell::kValueOffset));
-
-  __ li(a1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
-  __ lw(a2, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ GetObjectType(a2, a3, a3);
-  __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
-  __ li(a1, Operand(Smi::FromInt(0)));  // Zero indicates proxy
-  __ bind(&non_proxy);
-  __ Push(a1, v0);  // Smi and array
+  __ li(a1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(a1, v0);
   __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
   __ li(a0, Operand(Smi::FromInt(0)));
   __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
 
   // Generate code for doing the condition check.
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   // Load the current count to a0, load the length to a1.
   __ lw(a0, MemOperand(sp, 0 * kPointerSize));
@@ -1107,22 +1021,17 @@
   __ addu(t0, a2, t0);  // Array base + scaled (smi) index.
   __ lw(a3, MemOperand(t0));  // Current entry.
 
-  // Get the expected map from the stack or a smi in the
+  // Get the expected map from the stack or a zero map in the
   // permanent slow case into register a2.
   __ lw(a2, MemOperand(sp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we may have to filter the key.
+  // If not, we have to filter the key.
   Label update_each;
   __ lw(a1, MemOperand(sp, 4 * kPointerSize));
   __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
   __ Branch(&update_each, eq, t0, Operand(a2));
 
-  // For proxies, no filtering is done.
-  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  ASSERT_EQ(Smi::FromInt(0), 0);
-  __ Branch(&update_each, eq, a2, Operand(zero_reg));
-
   // Convert the entry to a string or (smi) 0 if it isn't a property
   // any more. If the property has been removed while iterating, we
   // just skip it.
@@ -1138,7 +1047,7 @@
   __ mov(result_register(), a3);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each());
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
   }
 
   // Generate code for the body of the loop.
@@ -1151,7 +1060,7 @@
   __ Addu(a0, a0, Operand(Smi::FromInt(1)));
   __ push(a0);
 
-  EmitStackCheck(stmt, &loop);
+  EmitStackCheck(stmt);
   __ Branch(&loop);
 
   // Remove the pointers stored on the stack.
@@ -1159,7 +1068,6 @@
   __ Drop(5);
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1178,7 +1086,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->language_mode());
+    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ li(a0, Operand(info));
     __ push(a0);
     __ CallStub(&stub);
@@ -1209,7 +1117,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX));
         __ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1221,7 +1129,7 @@
     }
     // If no outer scope calls eval, we do not need to check more
     // context extensions.
-    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1250,7 +1158,7 @@
       ? RelocInfo::CODE_TARGET
       : RelocInfo::CODE_TARGET_CONTEXT;
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallIC(ic, mode);
+  __ Call(ic, mode);
 }
 
 
@@ -1263,7 +1171,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX));
         __ Branch(slow, ne, temp, Operand(zero_reg));
@@ -1293,26 +1201,17 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == DYNAMIC_GLOBAL) {
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ Branch(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == CONST ||
-        local->mode() == CONST_HARMONY ||
-        local->mode() == LET) {
+    if (local->mode() == Variable::CONST) {
       __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
       __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-      if (local->mode() == CONST) {
-        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-        __ Movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
-      } else {  // LET || CONST_HARMONY
-        __ Branch(done, ne, at, Operand(zero_reg));
-        __ li(a0, Operand(var->name()));
-        __ push(a0);
-        __ CallRuntime(Runtime::kThrowReferenceError, 1);
-      }
+      __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+      __ movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
     }
     __ Branch(done);
   }
@@ -1334,7 +1233,7 @@
       __ lw(a0, GlobalObjectOperand());
       __ li(a2, Operand(var->name()));
       Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+      __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
       context()->Plug(v0);
       break;
     }
@@ -1345,66 +1244,26 @@
       Comment cmnt(masm_, var->IsContextSlot()
                               ? "Context variable"
                               : "Stack variable");
-      if (var->binding_needs_init()) {
-        // var->scope() may be NULL when the proxy is located in eval code and
-        // refers to a potential outside binding. Currently those bindings are
-        // always looked up dynamically, i.e. in that case
-        //     var->location() == LOOKUP.
-        // always holds.
-        ASSERT(var->scope() != NULL);
-
-        // Check if the binding really needs an initialization check. The check
-        // can be skipped in the following situation: we have a LET or CONST
-        // binding in harmony mode, both the Variable and the VariableProxy have
-        // the same declaration scope (i.e. they are both in global code, in the
-        // same function or in the same eval code) and the VariableProxy is in
-        // the source physically located after the initializer of the variable.
-        //
-        // We cannot skip any initialization checks for CONST in non-harmony
-        // mode because const variables may be declared but never initialized:
-        //   if (false) { const x; }; var y = x;
-        //
-        // The condition on the declaration scopes is a conservative check for
-        // nested functions that access a binding and are called before the
-        // binding is initialized:
-        //   function() { f(); let x = 1; function f() { x = 2; } }
-        //
-        bool skip_init_check;
-        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
-          skip_init_check = false;
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        GetVar(v0, var);
+        __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+        __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
+        if (var->mode() == Variable::LET) {
+          Label done;
+          __ Branch(&done, ne, at, Operand(zero_reg));
+          __ li(a0, Operand(var->name()));
+          __ push(a0);
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          __ bind(&done);
         } else {
-          // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
-          skip_init_check = var->mode() != CONST &&
-              var->initializer_position() < proxy->position();
+          __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+          __ movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
         }
-
-        if (!skip_init_check) {
-          // Let and const need a read barrier.
-          GetVar(v0, var);
-          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-          __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
-            // Throw a reference error when using an uninitialized let/const
-            // binding in harmony mode.
-            Label done;
-            __ Branch(&done, ne, at, Operand(zero_reg));
-            __ li(a0, Operand(var->name()));
-            __ push(a0);
-            __ CallRuntime(Runtime::kThrowReferenceError, 1);
-            __ bind(&done);
-          } else {
-            // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST);
-            __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
-            __ Movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
-          }
-          context()->Plug(v0);
-          break;
-        }
+        context()->Plug(v0);
       }
-      context()->Plug(var);
       break;
     }
 
@@ -1476,23 +1335,12 @@
 }
 
 
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
-  if (expression == NULL) {
-    __ LoadRoot(a1, Heap::kNullValueRootIndex);
-    __ push(a1);
-  } else {
-    VisitForStackValue(expression);
-  }
-}
-
-
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ lw(a3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(constant_properties));
+  __ li(a1, Operand(expr->constant_properties()));
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1501,15 +1349,10 @@
       : ObjectLiteral::kNoFlags;
   __ li(a0, Operand(Smi::FromInt(flags)));
   __ Push(a3, a2, a1, a0);
-  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    __ CallStub(&stub);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1521,7 +1364,6 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
-  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1545,10 +1387,10 @@
             __ mov(a0, result_register());
             __ li(a2, Operand(key->handle()));
             __ lw(a1, MemOperand(sp));
-            Handle<Code> ic = is_classic_mode()
-                ? isolate()->builtins()->StoreIC_Initialize()
-                : isolate()->builtins()->StoreIC_Initialize_Strict();
-            CallIC(ic, RelocInfo::CODE_TARGET, key->id());
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
+            __ Call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
             VisitForEffect(value);
@@ -1571,29 +1413,21 @@
         }
         break;
       case ObjectLiteral::Property::GETTER:
-        accessor_table.lookup(key)->second->getter = value;
-        break;
       case ObjectLiteral::Property::SETTER:
-        accessor_table.lookup(key)->second->setter = value;
+        // Duplicate receiver on stack.
+        __ lw(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        __ li(a1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+                           Smi::FromInt(1) :
+                           Smi::FromInt(0)));
+        __ push(a1);
+        VisitForStackValue(value);
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
         break;
     }
   }
 
-  // Emit code to define accessors, using only a single call to the runtime for
-  // each pair of corresponding getters and setters.
-  for (AccessorTable::Iterator it = accessor_table.begin();
-       it != accessor_table.end();
-       ++it) {
-    __ lw(a0, MemOperand(sp));  // Duplicate receiver.
-    __ push(a0);
-    VisitForStackValue(it->first);
-    EmitAccessor(it->second->getter);
-    EmitAccessor(it->second->setter);
-    __ li(a0, Operand(Smi::FromInt(NONE)));
-    __ push(a0);
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
-  }
-
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ lw(a0, MemOperand(sp));
@@ -1614,22 +1448,13 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
-
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
-  bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
-
   __ mov(a0, result_register());
   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
-  __ li(a1, Operand(constant_elements));
+  __ li(a1, Operand(expr->constant_elements()));
   __ Push(a3, a2, a1);
-  if (has_fast_elements && constant_elements_values->map() ==
+  if (expr->constant_elements()->map() ==
       isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
         FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1641,13 +1466,8 @@
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
-           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-           FLAG_smi_only_arrays);
-    FastCloneShallowArrayStub::Mode mode = has_fast_elements
-      ? FastCloneShallowArrayStub::CLONE_ELEMENTS
-      : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-    FastCloneShallowArrayStub stub(mode, length);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
     __ CallStub(&stub);
   }
 
@@ -1668,30 +1488,21 @@
       __ push(v0);
       result_saved = true;
     }
-
     VisitForAccumulatorValue(subexpr);
 
-    if (constant_elements_kind == FAST_ELEMENTS) {
-      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-      __ lw(t2, MemOperand(sp));  // Copy of array literal.
-      __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
-      __ sw(result_register(), FieldMemOperand(a1, offset));
-      // Update the write barrier for the array store.
-      __ RecordWriteField(a1, offset, result_register(), a2,
-                          kRAHasBeenSaved, kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
-    } else {
-      __ lw(a1, MemOperand(sp));  // Copy of array literal.
-      __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
-      __ li(a3, Operand(Smi::FromInt(i)));
-      __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
-      __ mov(a0, result_register());
-      StoreArrayLiteralElementStub stub;
-      __ CallStub(&stub);
-    }
+    // Store the subexpression value in the array's elements.
+    __ lw(a1, MemOperand(sp));  // Copy of array literal.
+    __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ sw(result_register(), FieldMemOperand(a1, offset));
+
+    // Update the write barrier for the array store with v0 as the scratch
+    // register.
+    __ RecordWrite(a1, Operand(offset), a2, result_register());
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
+
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1821,7 +1632,7 @@
   __ li(a2, Operand(key->handle()));
   // Call load IC. It has arguments receiver and property name a0 and a2.
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1830,7 +1641,7 @@
   __ mov(a0, result_register());
   // Call keyed load IC. It has arguments key and receiver in a0 and a1.
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1858,7 +1669,7 @@
 
   __ bind(&stub_call);
   BinaryOpStub stub(op, mode);
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
 
@@ -1941,13 +1752,13 @@
   __ pop(a1);
   BinaryOpStub stub(op, mode);
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   patch_site.EmitPatchInfo();
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1979,10 +1790,10 @@
       __ mov(a1, result_register());
       __ pop(a0);  // Restore value.
       __ li(a2, Operand(prop->key()->AsLiteral()->handle()));
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
-      CallIC(ic);
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ Call(ic);
       break;
     }
     case KEYED_PROPERTY: {
@@ -1992,13 +1803,14 @@
       __ mov(a1, result_register());
       __ pop(a2);
       __ pop(a0);  // Restore value.
-      Handle<Code> ic = is_classic_mode()
-        ? isolate()->builtins()->KeyedStoreIC_Initialize()
-        : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      CallIC(ic);
+      Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+        : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ Call(ic);
       break;
     }
   }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(v0);
 }
 
@@ -2010,10 +1822,10 @@
     __ mov(a0, result_register());
     __ li(a2, Operand(var->name()));
     __ lw(a1, GlobalObjectOperand());
-    Handle<Code> ic = is_classic_mode()
-        ? isolate()->builtins()->StoreIC_Initialize()
-        : isolate()->builtins()->StoreIC_Initialize_Strict();
-    CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -2038,12 +1850,12 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == LET && op != Token::INIT_LET) {
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(v0);  // Value.
       __ li(a1, Operand(var->name()));
-      __ li(a0, Operand(Smi::FromInt(language_mode())));
+      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
       __ Push(cp, a1, a0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
@@ -2063,14 +1875,12 @@
         // RecordWrite may destroy all its register arguments.
         __ mov(a3, result_register());
         int offset = Context::SlotOffset(var->index());
-        __ RecordWriteContextSlot(
-            a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+        __ RecordWrite(a1, Operand(offset), a2, a3);
       }
     }
 
-  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
+  } else if (var->mode() != Variable::CONST) {
+    // Assignment to var or initializing assignment to let.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, a1);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -2083,15 +1893,13 @@
       __ sw(v0, location);
       if (var->IsContextSlot()) {
         __ mov(a3, v0);
-        int offset = Context::SlotOffset(var->index());
-        __ RecordWriteContextSlot(
-            a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+        __ RecordWrite(a1, Operand(Context::SlotOffset(var->index())), a2, a3);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(v0);  // Value.
       __ li(a1, Operand(var->name()));
-      __ li(a0, Operand(Smi::FromInt(language_mode())));
+      __ li(a0, Operand(Smi::FromInt(strict_mode_flag())));
       __ Push(cp, a1, a0);  // Context, name, strict mode.
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
@@ -2129,10 +1937,10 @@
     __ pop(a1);
   }
 
-  Handle<Code> ic = is_classic_mode()
-        ? isolate()->builtins()->StoreIC_Initialize()
-        : isolate()->builtins()->StoreIC_Initialize_Strict();
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2181,10 +1989,10 @@
     __ pop(a2);
   }
 
-  Handle<Code> ic = is_classic_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
@@ -2219,14 +2027,6 @@
 }
 
 
-void FullCodeGenerator::CallIC(Handle<Code> code,
-                               RelocInfo::Mode rmode,
-                               unsigned ast_id) {
-  ic_total_count_++;
-  __ Call(code, rmode, ast_id);
-}
-
-
 void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
@@ -2244,7 +2044,7 @@
   // Call the IC initialization code.
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-  CallIC(ic, mode, expr->id());
+  __ Call(ic, mode, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2277,7 +2077,7 @@
   Handle<Code> ic =
       isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count);
   __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize));  // Key.
-  CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+  __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
   RecordJSReturnSite(expr);
   // Restore context register.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2297,7 +2097,6 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
-  __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2306,7 +2105,8 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+                                                      int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
@@ -2315,20 +2115,22 @@
   }
   __ push(a1);
 
-  // Push the receiver of the enclosing function.
+  // Push the receiver of the enclosing function and do runtime call.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ lw(a1, MemOperand(fp, receiver_offset * kPointerSize));
   __ push(a1);
-  // Push the language mode.
-  __ li(a1, Operand(Smi::FromInt(language_mode())));
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ li(a1, Operand(Smi::FromInt(strict_mode)));
   __ push(a1);
 
-  // Push the start position of the scope the calls resides in.
-  __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
-  __ push(a1);
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
+                 : Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2362,11 +2164,28 @@
         VisitForStackValue(args->at(i));
       }
 
+      // If we know that eval can only be shadowed by eval-introduced
+      // variables we attempt to load the global eval function directly
+      // in generated code. If we succeed, there is no need to perform a
+      // context lookup in the runtime system.
+      Label done;
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
+        Label slow;
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
+        // Push the function and resolve eval.
+        __ push(v0);
+        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+        __ jmp(&done);
+        __ bind(&slow);
+      }
+
       // Push a copy of the function (found below the arguments) and
       // resolve eval.
       __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
       __ push(a1);
-      EmitResolvePossiblyDirectEval(arg_count);
+      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+      __ bind(&done);
 
       // The runtime call returns a pair of values in v0 (function) and
       // v1 (receiver). Touch up the stack with the right values.
@@ -2376,7 +2195,6 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
-    __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2483,29 +2301,14 @@
   __ li(a0, Operand(arg_count));
   __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
 
-  // Record call targets in unoptimized code, but not in the snapshot.
-  CallFunctionFlags flags;
-  if (!Serializer::enabled()) {
-    flags = RECORD_CALL_TARGET;
-    Handle<Object> uninitialized =
-       TypeFeedbackCells::UninitializedSentinel(isolate());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
-    RecordTypeFeedbackCell(expr->id(), cell);
-    __ li(a2, Operand(cell));
-  } else {
-    flags = NO_CALL_FUNCTION_FLAGS;
-  }
-
-  CallConstructStub stub(flags);
-  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+  Handle<Code> construct_builtin =
+      isolate()->builtins()->JSConstructCall();
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2517,7 +2320,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ And(t0, v0, Operand(kSmiTagMask));
   Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2525,8 +2328,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2538,7 +2340,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ And(at, v0, Operand(kSmiTagMask | 0x80000000));
   Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
 
@@ -2546,8 +2348,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2569,7 +2370,7 @@
   __ Branch(if_false, ne, at, Operand(zero_reg));
   __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
   __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2577,8 +2378,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2592,7 +2392,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
         if_true, if_false, fall_through);
 
@@ -2600,8 +2400,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2617,7 +2416,7 @@
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
   __ And(at, a1, Operand(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2625,8 +2424,8 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+    ZoneList<Expression*>* args) {
+
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2676,7 +2475,7 @@
   Label entry, loop;
   // The use of t2 to store the valueOf symbol asumes that it is not otherwise
   // used in the loop below.
-  __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
+  __ li(t2, Operand(FACTORY->value_of_symbol()));
   __ jmp(&entry);
   __ bind(&loop);
   __ lw(a3, MemOperand(t0, 0));
@@ -2702,13 +2501,12 @@
   __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2722,7 +2520,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a2);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
   __ Branch(if_false);
 
@@ -2730,8 +2528,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2745,7 +2542,7 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, a1, Operand(JS_ARRAY_TYPE),
         if_true, if_false, fall_through);
 
@@ -2753,8 +2550,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2768,15 +2564,15 @@
 
   __ JumpIfSmi(v0, if_false);
   __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2798,7 +2594,7 @@
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
   __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
         if_true, if_false, fall_through);
 
@@ -2806,8 +2602,7 @@
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2822,15 +2617,14 @@
                          &if_true, &if_false, &fall_through);
 
   __ pop(a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in a1 and the formal
@@ -2844,8 +2638,9 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
   Label exit;
   // Get the number of formal parameters.
   __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -2865,8 +2660,7 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2877,23 +2671,18 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  // Assume that there are only two callable types, and one of them is at
-  // either end of the type range for JS object types. Saves extra comparisons.
-  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ GetObjectType(v0, v0, a1);  // Map is now in v0.
   __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                FIRST_SPEC_OBJECT_TYPE + 1);
-  __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ Branch(&function, ge, a1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
 
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_SPEC_OBJECT_TYPE - 1);
-  __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
-  // Assume that there is no larger type.
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
-
-  // Check if the constructor in the map is a JS function.
+  // Check if the constructor in the map is a function.
   __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
   __ GetObjectType(v0, a1, a1);
   __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
@@ -2925,7 +2714,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2933,7 +2722,6 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2947,8 +2735,9 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
 
@@ -2971,10 +2760,10 @@
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   if (CpuFeatures::IsSupported(FPU)) {
     __ PrepareCallCFunction(1, a0);
-    __ lw(a0, ContextOperand(cp, Context::GLOBAL_INDEX));
-    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+    __ li(a0, Operand(ExternalReference::isolate_address()));
     __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
+
     CpuFeatures::Scope scope(FPU);
     // 0x41300000 is the top half of 1.0 x 2^20 as a double.
     __ li(a1, Operand(0x41300000));
@@ -2989,8 +2778,7 @@
   } else {
     __ PrepareCallCFunction(2, a0);
     __ mov(a0, s0);
-    __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
-    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalContextOffset));
+    __ li(a1, Operand(ExternalReference::isolate_address()));
     __ CallCFunction(
         ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
   }
@@ -2999,10 +2787,9 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3012,10 +2799,9 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3026,8 +2812,7 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -3046,70 +2831,18 @@
 }
 
 
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
-  VisitForAccumulatorValue(args->at(0));  // Load the object.
-
-  Label runtime, done;
-  Register object = v0;
-  Register result = v0;
-  Register scratch0 = t5;
-  Register scratch1 = a1;
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ GetObjectType(object, scratch1, scratch1);
-  __ Assert(eq, "Trying to get date field from non-date.",
-      scratch1, Operand(JS_DATE_TYPE));
-#endif
-
-  if (index->value() == 0) {
-    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ li(scratch1, Operand(stamp));
-      __ lw(scratch1, MemOperand(scratch1));
-      __ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
-      __ Branch(&runtime, ne, scratch1, Operand(scratch0));
-      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
-                                            kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch1);
-    __ li(a1, Operand(index));
-    __ Move(a0, object);
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ bind(&done);
-  }
-
-  context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  if (CpuFeatures::IsSupported(FPU)) {
-    MathPowStub stub(MathPowStub::ON_STACK);
-    __ CallStub(&stub);
-  } else {
-    __ CallRuntime(Runtime::kMath_pow, 2);
-  }
+  MathPowStub stub;
+  __ CallStub(&stub);
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -3128,17 +2861,14 @@
   __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
-  __ mov(a2, v0);
-  __ RecordWriteField(
-      a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(a1, Operand(JSValue::kValueOffset - kHeapObjectTag), a2, a3);
 
   __ bind(&done);
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -3150,8 +2880,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3169,8 +2898,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3179,6 +2907,7 @@
 
   Register object = a1;
   Register index = a0;
+  Register scratch = a2;
   Register result = v0;
 
   __ pop(object);
@@ -3188,6 +2917,7 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
+                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -3216,8 +2946,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3226,7 +2955,8 @@
 
   Register object = a1;
   Register index = a0;
-  Register scratch = a3;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
   Register result = v0;
 
   __ pop(object);
@@ -3236,7 +2966,8 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch,
+                                  scratch1,
+                                  scratch2,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -3265,9 +2996,9 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
+
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3277,8 +3008,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3290,11 +3020,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3303,11 +3032,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3316,24 +3044,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
-  // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
-  VisitForStackValue(args->at(0));
-  __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
-  __ CallStub(&stub);
-  context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ mov(a0, result_register());  // Stub requires parameter in a0 and on tos.
@@ -3342,9 +3056,8 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3352,8 +3065,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3362,31 +3074,18 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
-  // Check for proxy.
-  Label proxy, done;
-  __ GetObjectType(v0, a1, a1);
-  __ Branch(&proxy, eq, a1, Operand(JS_FUNCTION_PROXY_TYPE));
-
   // InvokeFunction requires the function in a1. Move it in there.
   __ mov(a1, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(a1, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ jmp(&done);
-
-  __ bind(&proxy);
-  __ push(v0);
-  __ CallRuntime(Runtime::kCall, args->length());
-  __ bind(&done);
-
   context()->Plug(v0);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
   RegExpConstructResultStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3396,8 +3095,7 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3456,31 +3154,16 @@
   __ sw(scratch1, MemOperand(index2, 0));
   __ sw(scratch2, MemOperand(index1, 0));
 
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   scratch1,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   ne,
-                   &no_remembered_set);
+  Label new_space;
+  __ InNewSpace(elements, scratch1, eq, &new_space);
   // Possible optimization: do a check that both values are Smis
   // (or them and test against Smi mask).
 
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index1,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index2,
-                         scratch2,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
+  __ mov(scratch1, elements);
+  __ RecordWriteHelper(elements, index1, scratch2);
+  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
 
-  __ bind(&no_remembered_set);
+  __ bind(&new_space);
   // We are done. Drop elements from the stack, and return undefined.
   __ Drop(3);
   __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
@@ -3494,8 +3177,7 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3548,8 +3230,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   Register right = v0;
@@ -3565,7 +3246,8 @@
   __ Branch(&ok, eq, left, Operand(right));
   // Fail if either is a non-HeapObject.
   __ And(tmp, left, Operand(right));
-  __ JumpIfSmi(tmp, &fail);
+  __ And(at, tmp, Operand(kSmiTagMask));
+  __ Branch(&fail, eq, at, Operand(zero_reg));
   __ lw(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
   __ lbu(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
   __ Branch(&fail, ne, tmp2, Operand(JS_REGEXP_TYPE));
@@ -3585,8 +3267,7 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false;
@@ -3599,15 +3280,14 @@
   __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset));
   __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3622,12 +3302,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
-  ZoneList<Expression*>* args = expr->arguments();
+
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
@@ -3795,7 +3475,7 @@
 
   // One-character separator case.
   __ bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its ascii character value.
   __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator.
@@ -3806,7 +3486,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ASCII char (in lower byte).
+  //   separator: Single separator ascii char (in lower byte).
 
   // Copy the separator character to the result.
   __ sb(separator, MemOperand(result_pos));
@@ -3891,7 +3571,7 @@
     RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
     Handle<Code> ic =
         isolate()->stub_cache()->ComputeCallInitialize(arg_count, mode);
-    CallIC(ic, mode, expr->id());
+    __ Call(ic, mode, expr->id());
     // Restore context register.
     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
@@ -3912,9 +3592,7 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
-            ? kNonStrictMode : kStrictMode;
-        __ li(a1, Operand(Smi::FromInt(strict_mode_flag)));
+        __ li(a1, Operand(Smi::FromInt(strict_mode_flag())));
         __ push(a1);
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(v0);
@@ -3922,7 +3600,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
         if (var->IsUnallocated()) {
           __ lw(a2, GlobalObjectOperand());
           __ li(a1, Operand(var->name()));
@@ -3965,35 +3643,18 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
-      } else if (context()->IsTest()) {
-        const TestContext* test = TestContext::cast(context());
-        // The labels are swapped for the recursive call.
-        VisitForControl(expr->expression(),
-                        test->false_label(),
-                        test->true_label(),
-                        test->fall_through());
-        context()->Plug(test->true_label(), test->false_label());
       } else {
-        // We handle value contexts explicitly rather than simply visiting
-        // for control and plugging the control flow into the context,
-        // because we need to prepare a pair of extra administrative AST ids
-        // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
-        Label materialize_true, materialize_false, done;
-        VisitForControl(expr->expression(),
-                        &materialize_false,
-                        &materialize_true,
-                        &materialize_true);
-        __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
-        __ LoadRoot(v0, Heap::kTrueValueRootIndex);
-        if (context()->IsStackValue()) __ push(v0);
-        __ jmp(&done);
-        __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
-        __ LoadRoot(v0, Heap::kFalseValueRootIndex);
-        if (context()->IsStackValue()) __ push(v0);
-        __ bind(&done);
+        Label materialize_true, materialize_false;
+        Label* if_true = NULL;
+        Label* if_false = NULL;
+        Label* fall_through = NULL;
+
+        // Notice that the labels are swapped.
+        context()->PrepareTest(&materialize_true, &materialize_false,
+                               &if_false, &if_true, &fall_through);
+        if (context()->IsTest()) ForwardBailoutToChild(expr);
+        VisitForControl(expr->expression(), if_true, if_false, fall_through);
+        context()->Plug(if_false, if_true);  // Labels swapped.
       }
       break;
     }
@@ -4047,7 +3708,7 @@
   VisitForAccumulatorValue(expr->expression());
   SetSourcePosition(expr->position());
   __ mov(a0, result_register());
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
   context()->Plug(v0);
 }
 
@@ -4158,7 +3819,7 @@
   SetSourcePosition(expr->position());
 
   BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
-  CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+  __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4188,10 +3849,10 @@
       __ mov(a0, result_register());  // Value.
       __ li(a2, Operand(prop->key()->AsLiteral()->handle()));  // Name.
       __ pop(a1);  // Receiver.
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4206,10 +3867,10 @@
       __ mov(a0, result_register());  // Value.
       __ pop(a1);  // Key.
       __ pop(a2);  // Receiver.
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
         if (!context()->IsEffect()) {
@@ -4235,7 +3896,7 @@
     Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
-    CallIC(ic);
+    __ Call(ic);
     PrepareForBailout(expr, TOS_REG);
     context()->Plug(v0);
   } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -4255,24 +3916,19 @@
     context()->Plug(v0);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInDuplicateContext(expr);
+    VisitInCurrentContext(expr);
   }
 }
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Expression* sub_expr,
-                                                 Handle<String> check) {
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(sub_expr);
+    VisitForTypeofValue(expr);
   }
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(v0, if_true);
@@ -4308,11 +3964,10 @@
     Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(v0, if_false);
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ GetObjectType(v0, v0, a1);
-    __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
-    Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
-          if_true, if_false, fall_through);
+    __ GetObjectType(v0, a1, v0);  // Leave map in a1.
+    Split(ge, v0, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(v0, if_false);
     if (!FLAG_harmony_typeof) {
@@ -4331,7 +3986,18 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  Split(eq, v0, Operand(at), if_true, if_false, fall_through);
 }
 
 
@@ -4339,12 +4005,9 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr)) return;
-
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
+
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4352,13 +4015,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+    context()->Plug(if_true, if_false);
+    return;
+  }
+
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ LoadRoot(t0, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
       break;
@@ -4367,7 +4037,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       // The stub returns 0 for true.
       Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
       break;
@@ -4380,26 +4050,36 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = eq;
+          __ mov(a0, result_register());
+          __ pop(a1);
           break;
         case Token::LT:
           cc = lt;
+          __ mov(a0, result_register());
+          __ pop(a1);
           break;
         case Token::GT:
-          cc = gt;
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = lt;
+          __ mov(a1, result_register());
+          __ pop(a0);
          break;
         case Token::LTE:
-          cc = le;
+          // Reverse left and right sides to obtain ECMA-262 conversion order.
+          cc = ge;
+          __ mov(a1, result_register());
+          __ pop(a0);
           break;
         case Token::GTE:
           cc = ge;
+          __ mov(a0, result_register());
+          __ pop(a1);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
-      __ mov(a0, result_register());
-      __ pop(a1);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4413,9 +4093,9 @@
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
       Handle<Code> ic = CompareIC::GetUninitialized(op);
-      CallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+      __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
     }
   }
@@ -4426,9 +4106,8 @@
 }
 
 
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
-                                              Expression* sub_expr,
-                                              NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+  Comment cmnt(masm_, "[ CompareToNull");
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4436,23 +4115,18 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Heap::RootListIndex nil_value = nil == kNullValue ?
-      Heap::kNullValueRootIndex :
-      Heap::kUndefinedValueRootIndex;
+  VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ mov(a0, result_register());
-  __ LoadRoot(a1, nil_value);
-  if (expr->op() == Token::EQ_STRICT) {
+  __ LoadRoot(a1, Heap::kNullValueRootIndex);
+  if (expr->is_strict()) {
     Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
   } else {
-    Heap::RootListIndex other_nil_value = nil == kNullValue ?
-        Heap::kUndefinedValueRootIndex :
-        Heap::kNullValueRootIndex;
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ LoadRoot(a1, other_nil_value);
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ JumpIfSmi(a0, if_false);
+    __ And(at, a0, Operand(kSmiTagMask));
+    __ Branch(if_false, eq, at, Operand(zero_reg));
     // It can be an undetectable object.
     __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
     __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 2c4da1a..a76c215 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -210,8 +210,7 @@
 
   // Update the write barrier. Make sure not to clobber the value.
   __ mov(scratch1, value);
-  __ RecordWrite(
-      elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(elements, scratch2, scratch1);
 }
 
 
@@ -384,10 +383,10 @@
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                               int argc,
-                                               Code::Kind kind,
-                                               Code::ExtraICState extra_state) {
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- a1    : receiver
   //  -- a2    : name
@@ -397,11 +396,11 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_state,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a1, a2, a3, t0, t1, t2);
+      masm, flags, a1, a2, a3, t0, t1);
 
   // If the stub cache probing failed, the receiver might be a value.
   // For value objects, we use the map of the prototype objects for
@@ -437,7 +436,7 @@
   // Probe the stub cache for the value object.
   __ bind(&probe);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a1, a2, a3, t0, t1, t2);
+      masm, flags, a1, a2, a3, t0, t1);
 
   __ bind(&miss);
 }
@@ -463,7 +462,7 @@
 }
 
 
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -486,10 +485,10 @@
 }
 
 
-void CallICBase::GenerateMiss(MacroAssembler* masm,
-                              int argc,
-                              IC::UtilityId id,
-                              Code::ExtraICState extra_state) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -505,29 +504,29 @@
   // Get the receiver of the function from the stack.
   __ lw(a3, MemOperand(sp, argc*kPointerSize));
 
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Push the receiver and the name of the function.
-    __ Push(a3, a2);
+  // Push the receiver and the name of the function.
+  __ Push(a3, a2);
 
-    // Call the entry.
-    __ PrepareCEntryArgs(2);
-    __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
+  // Call the entry.
+  __ li(a0, Operand(2));
+  __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
 
-    CEntryStub stub(1);
-    __ CallStub(&stub);
+  CEntryStub stub(1);
+  __ CallStub(&stub);
 
-    // Move result to a1 and leave the internal frame.
-    __ mov(a1, v0);
-  }
+  // Move result to a1 and leave the internal frame.
+  __ mov(a1, v0);
+  __ LeaveInternalFrame();
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
   if (id == IC::kCallIC_Miss) {
     Label invoke, global;
     __ lw(a2, MemOperand(sp, argc * kPointerSize));
-    __ JumpIfSmi(a2, &invoke);
+    __ andi(t0, a2, kSmiTagMask);
+    __ Branch(&invoke, eq, t0, Operand(zero_reg));
     __ GetObjectType(a2, a3, a3);
     __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
     __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -539,7 +538,7 @@
     __ bind(&invoke);
   }
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -551,6 +550,18 @@
 }
 
 
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+}
+
+
 void CallIC::GenerateMegamorphic(MacroAssembler* masm,
                                  int argc,
                                  Code::ExtraICState extra_ic_state) {
@@ -566,6 +577,27 @@
 }
 
 
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
+}
+
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   //  -- a2    : name
@@ -617,13 +649,12 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(a2);  // Save the key.
-    __ Push(a1, a2);  // Pass the receiver and the key.
-    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-    __ pop(a2);  // Restore the key.
-  }
+  __ EnterInternalFrame();
+  __ push(a2);  // Save the key.
+  __ Push(a1, a2);  // Pass the receiver and the key.
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(a2);  // Restore the key.
+  __ LeaveInternalFrame();
   __ mov(a1, v0);
   __ jmp(&do_call);
 
@@ -682,7 +713,7 @@
   __ JumpIfSmi(a2, &miss);
   __ IsObjectJSStringType(a2, a0, &miss);
 
-  CallICBase::GenerateNormal(masm, argc);
+  GenerateCallNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
@@ -702,7 +733,7 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a0, a2, a3, t0, t1, t2);
+      masm, flags, a0, a2, a3, t0, t1);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -758,6 +789,8 @@
                                                 Register scratch3,
                                                 Label* unmapped_case,
                                                 Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
   // Check that the receiver is a JSObject. Because of the map check
   // later, we do not need to check for interceptors or whether it
   // requires access checks.
@@ -771,12 +804,10 @@
   __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
 
   // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
   __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1,
-              scratch2,
-              Heap::kNonStrictArgumentsElementsMapRootIndex,
-              slow_case,
-              DONT_DO_SMI_CHECK);
+  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
   // Check if element is in the range of mapped arguments. If not, jump
   // to the unmapped lookup with the parameter map in scratch1.
   __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
@@ -788,7 +819,7 @@
       FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
 
   __ li(scratch3, Operand(kPointerSize >> 1));
-  __ Mul(scratch3, key, scratch3);
+  __ mul(scratch3, key, scratch3);
   __ Addu(scratch3, scratch3, Operand(kOffset));
 
   __ Addu(scratch2, scratch1, scratch3);
@@ -801,7 +832,7 @@
   // map in scratch1).
   __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
   __ li(scratch3, Operand(kPointerSize >> 1));
-  __ Mul(scratch3, scratch2, scratch3);
+  __ mul(scratch3, scratch2, scratch3);
   __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
   __ Addu(scratch2, scratch1, scratch3);
   return MemOperand(scratch2);
@@ -820,15 +851,13 @@
   const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
   Register backing_store = parameter_map;
   __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
-  __ CheckMap(backing_store,
-              scratch,
-              Heap::kFixedArrayMapRootIndex,
-              slow_case,
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
               DONT_DO_SMI_CHECK);
   __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
   __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
   __ li(scratch, Operand(kPointerSize >> 1));
-  __ Mul(scratch, key, scratch);
+  __ mul(scratch, key, scratch);
   __ Addu(scratch,
           scratch,
           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -846,8 +875,8 @@
   Label slow, notin;
   MemOperand mapped_location =
       GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
-  __ Ret(USE_DELAY_SLOT);
   __ lw(v0, mapped_location);
+  __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in a2.
   MemOperand unmapped_location =
@@ -855,8 +884,8 @@
   __ lw(a2, unmapped_location);
   __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
   __ Branch(&slow, eq, a2, Operand(a3));
-  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a2);
+  __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
 }
@@ -870,26 +899,21 @@
   //  -- lr     : return address
   // -----------------------------------
   Label slow, notin;
-  // Store address is returned in register (of MemOperand) mapped_location.
   MemOperand mapped_location =
       GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
   __ sw(a0, mapped_location);
-  __ mov(t5, a0);
+  // Verify mapped_location MemOperand is register, with no offset.
   ASSERT_EQ(mapped_location.offset(), 0);
-  __ RecordWrite(a3, mapped_location.rm(), t5,
-                 kRAHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(a3, mapped_location.rm(), t5);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in a3.
-  // Store address is returned in register (of MemOperand) unmapped_location.
   MemOperand unmapped_location =
       GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
   __ sw(a0, unmapped_location);
-  __ mov(t5, a0);
   ASSERT_EQ(unmapped_location.offset(), 0);
-  __ RecordWrite(a3, unmapped_location.rm(), t5,
-                 kRAHasNotBeenSaved, kDontSaveFPRegs);
+  __ RecordWrite(a3, unmapped_location.rm(), t5);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
   __ bind(&slow);
@@ -1035,32 +1059,19 @@
   __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset));
   __ sra(at, t0, String::kHashShift);
   __ xor_(a3, a3, at);
-  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
-  __ And(a3, a3, Operand(mask));
+  __ And(a3, a3, Operand(KeyedLookupCache::kCapacityMask));
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys =
       ExternalReference::keyed_lookup_cache_keys(isolate);
   __ li(t0, Operand(cache_keys));
   __ sll(at, a3, kPointerSizeLog2 + 1);
   __ addu(t0, t0, at);
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
-    __ Branch(&try_next_entry, ne, a2, Operand(t1));
-    __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
-    __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
-    __ bind(&try_next_entry);
-  }
-
-  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
+  __ lw(t1, MemOperand(t0));  // Move t0 to symbol.
+  __ Addu(t0, t0, Operand(kPointerSize));
   __ Branch(&slow, ne, a2, Operand(t1));
-  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
+  __ lw(t1, MemOperand(t0));
   __ Branch(&slow, ne, a0, Operand(t1));
 
   // Get field offset.
@@ -1070,24 +1081,15 @@
   // a3     : lookup cache index
   ExternalReference cache_field_offsets =
       ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    __ li(t0, Operand(cache_field_offsets));
-    __ sll(at, a3, kPointerSizeLog2);
-    __ addu(at, t0, at);
-    __ lw(t1, MemOperand(at, kPointerSize * i));
-    __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
-    __ Subu(t1, t1, t2);
-    __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
-    if (i != 0) {
-      __ Branch(&load_in_object_property);
-    }
-  }
+  __ li(t0, Operand(cache_field_offsets));
+  __ sll(at, a3, kPointerSizeLog2);
+  __ addu(at, t0, at);
+  __ lw(t1, MemOperand(at));
+  __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
+  __ Subu(t1, t1, t2);
+  __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
 
   // Load in-object property.
-  __ bind(&load_in_object_property);
   __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
   __ addu(t2, t2, t1);  // Index from start of object.
   __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
@@ -1148,12 +1150,14 @@
 
   Register receiver = a1;
   Register index = a0;
-  Register scratch = a3;
+  Register scratch1 = a2;
+  Register scratch2 = a3;
   Register result = v0;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch,
+                                          scratch1,
+                                          scratch2,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1197,193 +1201,109 @@
   //  -- a2     : receiver
   //  -- ra     : return address
   // -----------------------------------
-  Label slow, array, extra, check_if_double_array;
-  Label fast_object_with_map_check, fast_object_without_map_check;
-  Label fast_double_with_map_check, fast_double_without_map_check;
-  Label transition_smi_elements, finish_object_store, non_double_value;
-  Label transition_double_elements;
+
+  Label slow, fast, array, extra, exit;
 
   // Register usage.
   Register value = a0;
   Register key = a1;
   Register receiver = a2;
-  Register receiver_map = a3;
-  Register elements_map = t2;
-  Register elements = t3;  // Elements array of the receiver.
-  // t0 and t1 are used as general scratch registers.
+  Register elements = a3;  // Elements array of the receiver.
+  // t0 is used as ip in the arm version.
+  // t3-t4 are used as temporaries.
 
   // Check that the key is a smi.
   __ JumpIfNotSmi(key, &slow);
   // Check that the object isn't a smi.
   __ JumpIfSmi(receiver, &slow);
+
   // Get the map of the object.
-  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ lbu(t0, FieldMemOperand(t3, Map::kBitFieldOffset));
   __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
   __ Branch(&slow, ne, t0, Operand(zero_reg));
   // Check if the object is a JS array or not.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
+  __ lbu(t3, FieldMemOperand(t3, Map::kInstanceTypeOffset));
+
+  __ Branch(&array, eq, t3, Operand(JS_ARRAY_TYPE));
   // Check that the object is some kind of JSObject.
-  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
+  __ Branch(&slow, lt, t3, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ Branch(&slow, eq, t3, Operand(JS_PROXY_TYPE));
+  __ Branch(&slow, eq, t3, Operand(JS_FUNCTION_PROXY_TYPE));
 
   // Object case: Check key against length in the elements array.
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check that the object is in fast mode and writable.
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
   // Check array bounds. Both the key and the length of FixedArray are smis.
   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast_object_with_map_check, lo, key, Operand(t0));
+  __ Branch(&fast, lo, key, Operand(t0));
+  // Fall thru to slow if un-tagged index >= length.
 
   // Slow case, handle jump to runtime.
   __ bind(&slow);
+
   // Entry registers are intact.
   // a0: value.
   // a1: key.
   // a2: receiver.
+
   GenerateRuntimeSetProperty(masm, strict_mode);
 
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
+
   __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
   // Only support writing to array[array.length].
   __ Branch(&slow, ne, key, Operand(t0));
   // Check for room in the elements backing store.
   // Both the key and the length of FixedArray are smis.
   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
   __ Branch(&slow, hs, key, Operand(t0));
-  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Branch(
-      &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
-
   // Calculate key + 1 as smi.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Addu(t0, key, Operand(Smi::FromInt(1)));
-  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&fast_object_without_map_check);
+  STATIC_ASSERT(0 == kSmiTag);
+  __ Addu(t3, key, Operand(Smi::FromInt(1)));
+  __ sw(t3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&fast);
 
-  __ bind(&check_if_double_array);
-  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-  // Add 1 to key, and go to common element store code for doubles.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Addu(t0, key, Operand(Smi::FromInt(1)));
-  __ sw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
   // is the length is always a smi.
+
   __ bind(&array);
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(t3, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(t0, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&slow, ne, t3, Operand(t0));
 
   // Check the key against the length in the array.
   __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
   __ Branch(&extra, hs, key, Operand(t0));
   // Fall through to fast case.
 
-  __ bind(&fast_object_with_map_check);
-  Register scratch_value = t0;
-  Register address = t1;
-  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Branch(&fast_double_with_map_check,
-            ne,
-            elements_map,
-            Heap::kFixedArrayMapRootIndex);
-  __ bind(&fast_object_without_map_check);
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch_value);
-  __ sw(value, MemOperand(address));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, value);
+  __ bind(&fast);
+  // Fast case, store the value to the elements backing store.
+  __ Addu(t4, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(t1, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t4, t4, Operand(t1));
+  __ sw(value, MemOperand(t4));
+  // Skip write barrier if the written value is a smi.
+  __ JumpIfSmi(value, &exit);
 
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch_value,
-                             &transition_smi_elements);
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch_value);
-  __ sw(value, MemOperand(address));
   // Update write barrier for the elements array address.
-  __ mov(v0, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements,
-                 address,
-                 value,
-                 kRAHasNotBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
+  __ Subu(t3, t4, Operand(elements));
+
+  __ RecordWrite(elements, Operand(t3), t4, t5);
+  __ bind(&exit);
+
+  __ mov(v0, a0);  // Return the value written.
   __ Ret();
-
-  __ bind(&fast_double_with_map_check);
-  // Check for fast double array case. If this fails, call through to the
-  // runtime.
-  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value,
-                                 key,
-                                 receiver,
-                                 elements,
-                                 a3,
-                                 t0,
-                                 t1,
-                                 t2,
-                                 &transition_double_elements);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, value);
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&non_double_value, ne, t0, Operand(at));
-
-  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         receiver_map,
-                                         t0,
-                                         &slow);
-  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
-  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         t0,
-                                         &slow);
-  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
-  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         t0,
-                                         &slow);
-  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
 }
 
 
@@ -1462,47 +1382,6 @@
 }
 
 
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a2     : receiver
-  //  -- a3     : target map
-  //  -- ra     : return address
-  // -----------------------------------
-  // Must return the modified receiver in v0.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
-    __ Ret(USE_DELAY_SLOT);
-    __ mov(v0, a2);
-    __ bind(&fail);
-  }
-
-  __ push(a2);
-  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
-    MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a2     : receiver
-  //  -- a3     : target map
-  //  -- ra     : return address
-  // -----------------------------------
-  // Must return the modified receiver in v0.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
-    __ Ret(USE_DELAY_SLOT);
-    __ mov(v0, a2);
-    __ bind(&fail);
-  }
-
-  __ push(a2);
-  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
                                   StrictModeFlag strict_mode) {
   // ----------- S t a t e -------------
@@ -1516,7 +1395,7 @@
   Code::Flags flags =
       Code::ComputeFlags(Code::STORE_IC, MONOMORPHIC, strict_mode);
   Isolate::Current()->stub_cache()->GenerateProbe(
-      masm, flags, a1, a2, a3, t0, t1, t2);
+      masm, flags, a1, a2, a3, t0, t1);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -1547,10 +1426,11 @@
   //  -- ra    : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSArray::SetElementsLength accepts
-  // (currently anything except for external arrays which means anything with
-  // elements of FixedArray type).  Value must be a number, but only smis are
-  // accepted as the most common case.
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
 
   Label miss;
 
@@ -1572,13 +1452,6 @@
   __ GetObjectType(scratch, scratch, scratch);
   __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
 
-  // Check that the array has fast properties, otherwise the length
-  // property might have been redefined.
-  __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
-  __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
-  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
-  __ Branch(&miss, eq, scratch, Operand(at));
-
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1648,9 +1521,11 @@
     case Token::LT:
       return lt;
     case Token::GT:
-      return gt;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return lt;
     case Token::LTE:
-      return le;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return ge;
     case Token::GTE:
       return ge;
     default:
@@ -1670,9 +1545,6 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
-    if (state == KNOWN_OBJECTS) {
-      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
-    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
@@ -1700,8 +1572,7 @@
   // If the instruction following the call is not a andi at, rx, #yyy, nothing
   // was inlined.
   Instr instr = Assembler::instr_at(andi_instruction_address);
-  if (!(Assembler::IsAndImmediate(instr) &&
-        Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
+  if (!Assembler::IsAndImmediate(instr)) {
     return;
   }
 
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
deleted file mode 100644
index 6628d8e..0000000
--- a/src/mips/lithium-codegen-mips.cc
+++ /dev/null
@@ -1,5073 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "mips/lithium-codegen-mips.h"
-#include "mips/lithium-gap-resolver-mips.h"
-#include "code-stubs.h"
-#include "stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-class SafepointGenerator : public CallWrapper {
- public:
-  SafepointGenerator(LCodeGen* codegen,
-                     LPointerMap* pointers,
-                     Safepoint::DeoptMode mode)
-      : codegen_(codegen),
-        pointers_(pointers),
-        deopt_mode_(mode) { }
-  virtual ~SafepointGenerator() { }
-
-  virtual void BeforeCall(int call_size) const { }
-
-  virtual void AfterCall() const {
-    codegen_->RecordSafepoint(pointers_, deopt_mode_);
-  }
-
- private:
-  LCodeGen* codegen_;
-  LPointerMap* pointers_;
-  Safepoint::DeoptMode deopt_mode_;
-};
-
-
-#define __ masm()->
-
-bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
-  ASSERT(is_unused());
-  status_ = GENERATING;
-  CpuFeatures::Scope scope(FPU);
-
-  CodeStub::GenerateFPStubs();
-
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // NONE indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done in GeneratePrologue).
-  FrameScope frame_scope(masm_, StackFrame::NONE);
-
-  return GeneratePrologue() &&
-      GenerateBody() &&
-      GenerateDeferredCode() &&
-      GenerateSafepointTable();
-}
-
-
-void LCodeGen::FinishCode(Handle<Code> code) {
-  ASSERT(is_done());
-  code->set_stack_slots(GetStackSlotCount());
-  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
-  PopulateDeoptimizationData(code);
-}
-
-
-void LCodeGen::Abort(const char* format, ...) {
-  if (FLAG_trace_bailout) {
-    SmartArrayPointer<char> name(
-        info()->shared_info()->DebugName()->ToCString());
-    PrintF("Aborting LCodeGen in @\"%s\": ", *name);
-    va_list arguments;
-    va_start(arguments, format);
-    OS::VPrint(format, arguments);
-    va_end(arguments);
-    PrintF("\n");
-  }
-  status_ = ABORTED;
-}
-
-
-void LCodeGen::Comment(const char* format, ...) {
-  if (!FLAG_code_comments) return;
-  char buffer[4 * KB];
-  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
-  va_list arguments;
-  va_start(arguments, format);
-  builder.AddFormattedList(format, arguments);
-  va_end(arguments);
-
-  // Copy the string before recording it in the assembler to avoid
-  // issues when the stack allocated buffer goes out of scope.
-  size_t length = builder.position();
-  Vector<char> copy = Vector<char>::New(length + 1);
-  memcpy(copy.start(), builder.Finalize(), copy.length());
-  masm()->RecordComment(copy.start());
-}
-
-
-bool LCodeGen::GeneratePrologue() {
-  ASSERT(is_generating());
-
-#ifdef DEBUG
-  if (strlen(FLAG_stop_at) > 0 &&
-      info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
-    __ stop("stop_at");
-  }
-#endif
-
-  // a1: Callee's JS function.
-  // cp: Callee's context.
-  // fp: Caller's frame pointer.
-  // lr: Caller's pc.
-
-  // Strict mode functions and builtins need to replace the receiver
-  // with undefined when called as functions (without an explicit
-  // receiver object). r5 is zero for method calls and non-zero for
-  // function calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
-    Label ok;
-    __ Branch(&ok, eq, t1, Operand(zero_reg));
-
-    int receiver_offset = scope()->num_parameters() * kPointerSize;
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ sw(a2, MemOperand(sp, receiver_offset));
-    __ bind(&ok);
-  }
-
-  __ Push(ra, fp, cp, a1);
-  __ Addu(fp, sp, Operand(2 * kPointerSize));  // Adj. FP to point to saved FP.
-
-  // Reserve space for the stack slots needed by the code.
-  int slots = GetStackSlotCount();
-  if (slots > 0) {
-    if (FLAG_debug_code) {
-      __ li(a0, Operand(slots));
-      __ li(a2, Operand(kSlotsZapValue));
-      Label loop;
-      __ bind(&loop);
-      __ push(a2);
-      __ Subu(a0, a0, 1);
-      __ Branch(&loop, ne, a0, Operand(zero_reg));
-    } else {
-      __ Subu(sp, sp, Operand(slots * kPointerSize));
-    }
-  }
-
-  // Possibly allocate a local context.
-  int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  if (heap_slots > 0) {
-    Comment(";;; Allocate local context");
-    // Argument to NewContext is the function, which is in a1.
-    __ push(a1);
-    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
-      FastNewContextStub stub(heap_slots);
-      __ CallStub(&stub);
-    } else {
-      __ CallRuntime(Runtime::kNewFunctionContext, 1);
-    }
-    RecordSafepoint(Safepoint::kNoLazyDeopt);
-    // Context is returned in both v0 and cp.  It replaces the context
-    // passed to us.  It's saved in the stack and kept live in cp.
-    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-    // Copy any necessary parameters into the context.
-    int num_parameters = scope()->num_parameters();
-    for (int i = 0; i < num_parameters; i++) {
-      Variable* var = scope()->parameter(i);
-      if (var->IsContextSlot()) {
-        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
-            (num_parameters - 1 - i) * kPointerSize;
-        // Load parameter from stack.
-        __ lw(a0, MemOperand(fp, parameter_offset));
-        // Store it in the context.
-        MemOperand target = ContextOperand(cp, var->index());
-        __ sw(a0, target);
-        // Update the write barrier. This clobbers a3 and a0.
-        __ RecordWriteContextSlot(
-            cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
-      }
-    }
-    Comment(";;; End allocate local context");
-  }
-
-  // Trace the call.
-  if (FLAG_trace) {
-    __ CallRuntime(Runtime::kTraceEnter, 0);
-  }
-  EnsureSpaceForLazyDeopt();
-  return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateBody() {
-  ASSERT(is_generating());
-  bool emit_instructions = true;
-  for (current_instruction_ = 0;
-       !is_aborted() && current_instruction_ < instructions_->length();
-       current_instruction_++) {
-    LInstruction* instr = instructions_->at(current_instruction_);
-    if (instr->IsLabel()) {
-      LLabel* label = LLabel::cast(instr);
-      emit_instructions = !label->HasReplacement();
-    }
-
-    if (emit_instructions) {
-      Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
-      instr->CompileToNative(this);
-    }
-  }
-  return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeferredCode() {
-  ASSERT(is_generating());
-  if (deferred_.length() > 0) {
-    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
-      LDeferredCode* code = deferred_[i];
-      __ bind(code->entry());
-      Comment(";;; Deferred code @%d: %s.",
-              code->instruction_index(),
-              code->instr()->Mnemonic());
-      code->Generate();
-      __ jmp(code->exit());
-    }
-  }
-  // Deferred code is the last part of the instruction sequence. Mark
-  // the generated code as done unless we bailed out.
-  if (!is_aborted()) status_ = DONE;
-  return !is_aborted();
-}
-
-
-bool LCodeGen::GenerateDeoptJumpTable() {
-  // TODO(plind): not clear that this will have advantage for MIPS.
-  // Skipping it for now. Raised issue #100 for this.
-  Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
-  return false;
-}
-
-
-bool LCodeGen::GenerateSafepointTable() {
-  ASSERT(is_done());
-  safepoints_.Emit(masm(), GetStackSlotCount());
-  return !is_aborted();
-}
-
-
-Register LCodeGen::ToRegister(int index) const {
-  return Register::FromAllocationIndex(index);
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
-  return DoubleRegister::FromAllocationIndex(index);
-}
-
-
-Register LCodeGen::ToRegister(LOperand* op) const {
-  ASSERT(op->IsRegister());
-  return ToRegister(op->index());
-}
-
-
-Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
-  if (op->IsRegister()) {
-    return ToRegister(op->index());
-  } else if (op->IsConstantOperand()) {
-    LConstantOperand* const_op = LConstantOperand::cast(op);
-    Handle<Object> literal = chunk_->LookupLiteral(const_op);
-    Representation r = chunk_->LookupLiteralRepresentation(const_op);
-    if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
-      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
-    } else if (r.IsDouble()) {
-      Abort("EmitLoadRegister: Unsupported double immediate.");
-    } else {
-      ASSERT(r.IsTagged());
-      if (literal->IsSmi()) {
-        __ li(scratch, Operand(literal));
-      } else {
-       __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
-      }
-    }
-    return scratch;
-  } else if (op->IsStackSlot() || op->IsArgument()) {
-    __ lw(scratch, ToMemOperand(op));
-    return scratch;
-  }
-  UNREACHABLE();
-  return scratch;
-}
-
-
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
-  ASSERT(op->IsDoubleRegister());
-  return ToDoubleRegister(op->index());
-}
-
-
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
-                                                FloatRegister flt_scratch,
-                                                DoubleRegister dbl_scratch) {
-  if (op->IsDoubleRegister()) {
-    return ToDoubleRegister(op->index());
-  } else if (op->IsConstantOperand()) {
-    LConstantOperand* const_op = LConstantOperand::cast(op);
-    Handle<Object> literal = chunk_->LookupLiteral(const_op);
-    Representation r = chunk_->LookupLiteralRepresentation(const_op);
-    if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
-      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
-      __ mtc1(at, flt_scratch);
-      __ cvt_d_w(dbl_scratch, flt_scratch);
-      return dbl_scratch;
-    } else if (r.IsDouble()) {
-      Abort("unsupported double immediate");
-    } else if (r.IsTagged()) {
-      Abort("unsupported tagged immediate");
-    }
-  } else if (op->IsStackSlot() || op->IsArgument()) {
-    MemOperand mem_op = ToMemOperand(op);
-    __ ldc1(dbl_scratch, mem_op);
-    return dbl_scratch;
-  }
-  UNREACHABLE();
-  return dbl_scratch;
-}
-
-
-Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
-  Handle<Object> literal = chunk_->LookupLiteral(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
-  return literal;
-}
-
-
-bool LCodeGen::IsInteger32(LConstantOperand* op) const {
-  return chunk_->LookupLiteralRepresentation(op).IsInteger32();
-}
-
-
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
-  Handle<Object> value = chunk_->LookupLiteral(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
-  ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
-      value->Number());
-  return static_cast<int32_t>(value->Number());
-}
-
-
-double LCodeGen::ToDouble(LConstantOperand* op) const {
-  Handle<Object> value = chunk_->LookupLiteral(op);
-  return value->Number();
-}
-
-
-Operand LCodeGen::ToOperand(LOperand* op) {
-  if (op->IsConstantOperand()) {
-    LConstantOperand* const_op = LConstantOperand::cast(op);
-    Handle<Object> literal = chunk_->LookupLiteral(const_op);
-    Representation r = chunk_->LookupLiteralRepresentation(const_op);
-    if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
-      return Operand(static_cast<int32_t>(literal->Number()));
-    } else if (r.IsDouble()) {
-      Abort("ToOperand Unsupported double immediate.");
-    }
-    ASSERT(r.IsTagged());
-    return Operand(literal);
-  } else if (op->IsRegister()) {
-    return Operand(ToRegister(op));
-  } else if (op->IsDoubleRegister()) {
-    Abort("ToOperand IsDoubleRegister unimplemented");
-    return Operand(0);
-  }
-  // Stack slots not implemented, use ToMemOperand instead.
-  UNREACHABLE();
-  return Operand(0);
-}
-
-
-MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
-  ASSERT(!op->IsRegister());
-  ASSERT(!op->IsDoubleRegister());
-  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
-  int index = op->index();
-  if (index >= 0) {
-    // Local or spill slot. Skip the frame pointer, function, and
-    // context in the fixed part of the frame.
-    return MemOperand(fp, -(index + 3) * kPointerSize);
-  } else {
-    // Incoming parameter. Skip the return address.
-    return MemOperand(fp, -(index - 1) * kPointerSize);
-  }
-}
-
-
-MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
-  ASSERT(op->IsDoubleStackSlot());
-  int index = op->index();
-  if (index >= 0) {
-    // Local or spill slot. Skip the frame pointer, function, context,
-    // and the first word of the double in the fixed part of the frame.
-    return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
-  } else {
-    // Incoming parameter. Skip the return address and the first word of
-    // the double.
-    return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
-  }
-}
-
-
-void LCodeGen::WriteTranslation(LEnvironment* environment,
-                                Translation* translation) {
-  if (environment == NULL) return;
-
-  // The translation includes one command per value in the environment.
-  int translation_size = environment->values()->length();
-  // The output frame height does not include the parameters.
-  int height = translation_size - environment->parameter_count();
-
-  WriteTranslation(environment->outer(), translation);
-  int closure_id = DefineDeoptimizationLiteral(environment->closure());
-  switch (environment->frame_type()) {
-    case JS_FUNCTION:
-      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
-      break;
-    case JS_CONSTRUCT:
-      translation->BeginConstructStubFrame(closure_id, translation_size);
-      break;
-    case ARGUMENTS_ADAPTOR:
-      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
-      break;
-    default:
-      UNREACHABLE();
-  }
-  for (int i = 0; i < translation_size; ++i) {
-    LOperand* value = environment->values()->at(i);
-    // spilled_registers_ and spilled_double_registers_ are either
-    // both NULL or both set.
-    if (environment->spilled_registers() != NULL && value != NULL) {
-      if (value->IsRegister() &&
-          environment->spilled_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(translation,
-                         environment->spilled_registers()[value->index()],
-                         environment->HasTaggedValueAt(i));
-      } else if (
-          value->IsDoubleRegister() &&
-          environment->spilled_double_registers()[value->index()] != NULL) {
-        translation->MarkDuplicate();
-        AddToTranslation(
-            translation,
-            environment->spilled_double_registers()[value->index()],
-            false);
-      }
-    }
-
-    AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
-  }
-}
-
-
-void LCodeGen::AddToTranslation(Translation* translation,
-                                LOperand* op,
-                                bool is_tagged) {
-  if (op == NULL) {
-    // TODO(twuerthinger): Introduce marker operands to indicate that this value
-    // is not present and must be reconstructed from the deoptimizer. Currently
-    // this is only used for the arguments object.
-    translation->StoreArgumentsObject();
-  } else if (op->IsStackSlot()) {
-    if (is_tagged) {
-      translation->StoreStackSlot(op->index());
-    } else {
-      translation->StoreInt32StackSlot(op->index());
-    }
-  } else if (op->IsDoubleStackSlot()) {
-    translation->StoreDoubleStackSlot(op->index());
-  } else if (op->IsArgument()) {
-    ASSERT(is_tagged);
-    int src_index = GetStackSlotCount() + op->index();
-    translation->StoreStackSlot(src_index);
-  } else if (op->IsRegister()) {
-    Register reg = ToRegister(op);
-    if (is_tagged) {
-      translation->StoreRegister(reg);
-    } else {
-      translation->StoreInt32Register(reg);
-    }
-  } else if (op->IsDoubleRegister()) {
-    DoubleRegister reg = ToDoubleRegister(op);
-    translation->StoreDoubleRegister(reg);
-  } else if (op->IsConstantOperand()) {
-    Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
-    int src_index = DefineDeoptimizationLiteral(literal);
-    translation->StoreLiteral(src_index);
-  } else {
-    UNREACHABLE();
-  }
-}
-
-
-void LCodeGen::CallCode(Handle<Code> code,
-                        RelocInfo::Mode mode,
-                        LInstruction* instr) {
-  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallCodeGeneric(Handle<Code> code,
-                               RelocInfo::Mode mode,
-                               LInstruction* instr,
-                               SafepointMode safepoint_mode) {
-  ASSERT(instr != NULL);
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  __ Call(code, mode);
-  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
-}
-
-
-void LCodeGen::CallRuntime(const Runtime::Function* function,
-                           int num_arguments,
-                           LInstruction* instr) {
-  ASSERT(instr != NULL);
-  LPointerMap* pointers = instr->pointer_map();
-  ASSERT(pointers != NULL);
-  RecordPosition(pointers->position());
-
-  __ CallRuntime(function, num_arguments);
-  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-}
-
-
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
-                                       int argc,
-                                       LInstruction* instr) {
-  __ CallRuntimeSaveDoubles(id);
-  RecordSafepointWithRegisters(
-      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
-                                                    Safepoint::DeoptMode mode) {
-  if (!environment->HasBeenRegistered()) {
-    // Physical stack frame layout:
-    // -x ............. -4  0 ..................................... y
-    // [incoming arguments] [spill slots] [pushed outgoing arguments]
-
-    // Layout of the environment:
-    // 0 ..................................................... size-1
-    // [parameters] [locals] [expression stack including arguments]
-
-    // Layout of the translation:
-    // 0 ........................................................ size - 1 + 4
-    // [expression stack including arguments] [locals] [4 words] [parameters]
-    // |>------------  translation_size ------------<|
-
-    int frame_count = 0;
-    int jsframe_count = 0;
-    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
-      ++frame_count;
-      if (e->frame_type() == JS_FUNCTION) {
-        ++jsframe_count;
-      }
-    }
-    Translation translation(&translations_, frame_count, jsframe_count);
-    WriteTranslation(environment, &translation);
-    int deoptimization_index = deoptimizations_.length();
-    int pc_offset = masm()->pc_offset();
-    environment->Register(deoptimization_index,
-                          translation.index(),
-                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
-    deoptimizations_.Add(environment);
-  }
-}
-
-
-void LCodeGen::DeoptimizeIf(Condition cc,
-                            LEnvironment* environment,
-                            Register src1,
-                            const Operand& src2) {
-  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(environment->HasBeenRegistered());
-  int id = environment->deoptimization_index();
-  Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
-  if (entry == NULL) {
-    Abort("bailout was not prepared");
-    return;
-  }
-
-  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
-
-  if (FLAG_deopt_every_n_times == 1 &&
-      info_->shared_info()->opt_count() == id) {
-    __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
-    return;
-  }
-
-  if (FLAG_trap_on_deopt) {
-    Label skip;
-    if (cc != al) {
-      __ Branch(&skip, NegateCondition(cc), src1, src2);
-    }
-    __ stop("trap_on_deopt");
-    __ bind(&skip);
-  }
-
-  // TODO(plind): The Arm port is a little different here, due to their
-  // DeOpt jump table, which is not used for Mips yet.
-  __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
-}
-
-
-void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
-  int length = deoptimizations_.length();
-  if (length == 0) return;
-  Handle<DeoptimizationInputData> data =
-      factory()->NewDeoptimizationInputData(length, TENURED);
-
-  Handle<ByteArray> translations = translations_.CreateByteArray();
-  data->SetTranslationByteArray(*translations);
-  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
-
-  Handle<FixedArray> literals =
-      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
-  for (int i = 0; i < deoptimization_literals_.length(); i++) {
-    literals->set(i, *deoptimization_literals_[i]);
-  }
-  data->SetLiteralArray(*literals);
-
-  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
-  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
-
-  // Populate the deoptimization entries.
-  for (int i = 0; i < length; i++) {
-    LEnvironment* env = deoptimizations_[i];
-    data->SetAstId(i, Smi::FromInt(env->ast_id()));
-    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
-    data->SetArgumentsStackHeight(i,
-                                  Smi::FromInt(env->arguments_stack_height()));
-    data->SetPc(i, Smi::FromInt(env->pc_offset()));
-  }
-  code->set_deoptimization_data(*data);
-}
-
-
-int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
-  int result = deoptimization_literals_.length();
-  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
-    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
-  }
-  deoptimization_literals_.Add(literal);
-  return result;
-}
-
-
-void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  ASSERT(deoptimization_literals_.length() == 0);
-
-  const ZoneList<Handle<JSFunction> >* inlined_closures =
-      chunk()->inlined_closures();
-
-  for (int i = 0, length = inlined_closures->length();
-       i < length;
-       i++) {
-    DefineDeoptimizationLiteral(inlined_closures->at(i));
-  }
-
-  inlined_function_count_ = deoptimization_literals_.length();
-}
-
-
-void LCodeGen::RecordSafepointWithLazyDeopt(
-    LInstruction* instr, SafepointMode safepoint_mode) {
-  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
-    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
-  } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
-  }
-}
-
-
-void LCodeGen::RecordSafepoint(
-    LPointerMap* pointers,
-    Safepoint::Kind kind,
-    int arguments,
-    Safepoint::DeoptMode deopt_mode) {
-  ASSERT(expected_safepoint_kind_ == kind);
-
-  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
-  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-      kind, arguments, deopt_mode);
-  for (int i = 0; i < operands->length(); i++) {
-    LOperand* pointer = operands->at(i);
-    if (pointer->IsStackSlot()) {
-      safepoint.DefinePointerSlot(pointer->index());
-    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
-      safepoint.DefinePointerRegister(ToRegister(pointer));
-    }
-  }
-  if (kind & Safepoint::kWithRegisters) {
-    // Register cp always contains a pointer to the context.
-    safepoint.DefinePointerRegister(cp);
-  }
-}
-
-
-void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               Safepoint::DeoptMode deopt_mode) {
-  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
-  LPointerMap empty_pointers(RelocInfo::kNoPosition);
-  RecordSafepoint(&empty_pointers, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
-                                            int arguments,
-                                            Safepoint::DeoptMode deopt_mode) {
-  RecordSafepoint(
-      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
-    LPointerMap* pointers,
-    int arguments,
-    Safepoint::DeoptMode deopt_mode) {
-  RecordSafepoint(
-      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
-void LCodeGen::RecordPosition(int position) {
-  if (position == RelocInfo::kNoPosition) return;
-  masm()->positions_recorder()->RecordPosition(position);
-}
-
-
-void LCodeGen::DoLabel(LLabel* label) {
-  if (label->is_loop_header()) {
-    Comment(";;; B%d - LOOP entry", label->block_id());
-  } else {
-    Comment(";;; B%d", label->block_id());
-  }
-  __ bind(label->label());
-  current_block_ = label->block_id();
-  DoGap(label);
-}
-
-
-void LCodeGen::DoParallelMove(LParallelMove* move) {
-  resolver_.Resolve(move);
-}
-
-
-void LCodeGen::DoGap(LGap* gap) {
-  for (int i = LGap::FIRST_INNER_POSITION;
-       i <= LGap::LAST_INNER_POSITION;
-       i++) {
-    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
-    LParallelMove* move = gap->GetParallelMove(inner_pos);
-    if (move != NULL) DoParallelMove(move);
-  }
-}
-
-
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
-  DoGap(instr);
-}
-
-
-void LCodeGen::DoParameter(LParameter* instr) {
-  // Nothing to do.
-}
-
-
-void LCodeGen::DoCallStub(LCallStub* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-  switch (instr->hydrogen()->major_key()) {
-    case CodeStub::RegExpConstructResult: {
-      RegExpConstructResultStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::RegExpExec: {
-      RegExpExecStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::SubString: {
-      SubStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::NumberToString: {
-      NumberToStringStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::StringAdd: {
-      StringAddStub stub(NO_STRING_ADD_FLAGS);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::StringCompare: {
-      StringCompareStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    case CodeStub::TranscendentalCache: {
-      __ lw(a0, MemOperand(sp, 0));
-      TranscendentalCacheStub stub(instr->transcendental_type(),
-                                   TranscendentalCacheStub::TAGGED);
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
-  // Nothing to do.
-}
-
-
-void LCodeGen::DoModI(LModI* instr) {
-  Register scratch = scratch0();
-  const Register left = ToRegister(instr->InputAt(0));
-  const Register result = ToRegister(instr->result());
-
-  Label done;
-
-  if (instr->hydrogen()->HasPowerOf2Divisor()) {
-    Register scratch = scratch0();
-    ASSERT(!left.is(scratch));
-    __ mov(scratch, left);
-    int32_t p2constant = HConstant::cast(
-        instr->hydrogen()->right())->Integer32Value();
-    ASSERT(p2constant != 0);
-    // Result always takes the sign of the dividend (left).
-    p2constant = abs(p2constant);
-
-    Label positive_dividend;
-    __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
-    __ subu(result, zero_reg, left);
-    __ And(result, result, p2constant - 1);
-    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
-    }
-    __ Branch(USE_DELAY_SLOT, &done);
-    __ subu(result, zero_reg, result);
-    __ bind(&positive_dividend);
-    __ And(result, scratch, p2constant - 1);
-  } else {
-    // div runs in the background while we check for special cases.
-    Register right = EmitLoadRegister(instr->InputAt(1), scratch);
-    __ div(left, right);
-
-    // Check for x % 0.
-    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
-      DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
-    }
-
-    __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
-    __ mfhi(result);
-
-    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
-    }
-  }
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDivI(LDivI* instr) {
-  const Register left = ToRegister(instr->InputAt(0));
-  const Register right = ToRegister(instr->InputAt(1));
-  const Register result = ToRegister(instr->result());
-
-  // On MIPS div is asynchronous - it will run in the background while we
-  // check for special cases.
-  __ div(left, right);
-
-  // Check for x / 0.
-  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
-    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
-  }
-
-  // Check for (0 / -x) that will produce negative zero.
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    Label left_not_zero;
-    __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
-    DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
-    __ bind(&left_not_zero);
-  }
-
-  // Check for (-kMinInt / -1).
-  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    Label left_not_min_int;
-    __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
-    DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
-    __ bind(&left_not_min_int);
-  }
-
-  __ mfhi(result);
-  DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
-  __ mflo(result);
-}
-
-
-void LCodeGen::DoMulI(LMulI* instr) {
-  Register scratch = scratch0();
-  Register result = ToRegister(instr->result());
-  // Note that result may alias left.
-  Register left = ToRegister(instr->InputAt(0));
-  LOperand* right_op = instr->InputAt(1);
-
-  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-  bool bailout_on_minus_zero =
-    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
-
-  if (right_op->IsConstantOperand() && !can_overflow) {
-    // Use optimized code for specific constants.
-    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
-
-    if (bailout_on_minus_zero && (constant < 0)) {
-      // The case of a null constant will be handled separately.
-      // If constant is negative and left is null, the result should be -0.
-      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
-    }
-
-    switch (constant) {
-      case -1:
-        __ Subu(result, zero_reg, left);
-        break;
-      case 0:
-        if (bailout_on_minus_zero) {
-          // If left is strictly negative and the constant is null, the
-          // result is -0. Deoptimize if required, otherwise return 0.
-          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
-        }
-        __ mov(result, zero_reg);
-        break;
-      case 1:
-        // Nothing to do.
-        __ Move(result, left);
-        break;
-      default:
-        // Multiplying by powers of two and powers of two plus or minus
-        // one can be done faster with shifted operands.
-        // For other constants we emit standard code.
-        int32_t mask = constant >> 31;
-        uint32_t constant_abs = (constant + mask) ^ mask;
-
-        if (IsPowerOf2(constant_abs) ||
-            IsPowerOf2(constant_abs - 1) ||
-            IsPowerOf2(constant_abs + 1)) {
-          if (IsPowerOf2(constant_abs)) {
-            int32_t shift = WhichPowerOf2(constant_abs);
-            __ sll(result, left, shift);
-          } else if (IsPowerOf2(constant_abs - 1)) {
-            int32_t shift = WhichPowerOf2(constant_abs - 1);
-            __ sll(result, left, shift);
-            __ Addu(result, result, left);
-          } else if (IsPowerOf2(constant_abs + 1)) {
-            int32_t shift = WhichPowerOf2(constant_abs + 1);
-            __ sll(result, left, shift);
-            __ Subu(result, result, left);
-          }
-
-          // Correct the sign of the result is the constant is negative.
-          if (constant < 0)  {
-            __ Subu(result, zero_reg, result);
-          }
-
-        } else {
-          // Generate standard code.
-          __ li(at, constant);
-          __ Mul(result, left, at);
-        }
-    }
-
-  } else {
-    Register right = EmitLoadRegister(right_op, scratch);
-    if (bailout_on_minus_zero) {
-      __ Or(ToRegister(instr->TempAt(0)), left, right);
-    }
-
-    if (can_overflow) {
-      // hi:lo = left * right.
-      __ mult(left, right);
-      __ mfhi(scratch);
-      __ mflo(result);
-      __ sra(at, result, 31);
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
-    } else {
-      __ Mul(result, left, right);
-    }
-
-    if (bailout_on_minus_zero) {
-      // Bail out if the result is supposed to be negative zero.
-      Label done;
-      __ Branch(&done, ne, result, Operand(zero_reg));
-      DeoptimizeIf(lt,
-                   instr->environment(),
-                   ToRegister(instr->TempAt(0)),
-                   Operand(zero_reg));
-      __ bind(&done);
-    }
-  }
-}
-
-
-void LCodeGen::DoBitI(LBitI* instr) {
-  LOperand* left_op = instr->InputAt(0);
-  LOperand* right_op = instr->InputAt(1);
-  ASSERT(left_op->IsRegister());
-  Register left = ToRegister(left_op);
-  Register result = ToRegister(instr->result());
-  Operand right(no_reg);
-
-  if (right_op->IsStackSlot() || right_op->IsArgument()) {
-    right = Operand(EmitLoadRegister(right_op, at));
-  } else {
-    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
-    right = ToOperand(right_op);
-  }
-
-  switch (instr->op()) {
-    case Token::BIT_AND:
-      __ And(result, left, right);
-      break;
-    case Token::BIT_OR:
-      __ Or(result, left, right);
-      break;
-    case Token::BIT_XOR:
-      __ Xor(result, left, right);
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void LCodeGen::DoShiftI(LShiftI* instr) {
-  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
-  // result may alias either of them.
-  LOperand* right_op = instr->InputAt(1);
-  Register left = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  if (right_op->IsRegister()) {
-    // No need to mask the right operand on MIPS, it is built into the variable
-    // shift instructions.
-    switch (instr->op()) {
-      case Token::SAR:
-        __ srav(result, left, ToRegister(right_op));
-        break;
-      case Token::SHR:
-        __ srlv(result, left, ToRegister(right_op));
-        if (instr->can_deopt()) {
-          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
-        }
-        break;
-      case Token::SHL:
-        __ sllv(result, left, ToRegister(right_op));
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  } else {
-    // Mask the right_op operand.
-    int value = ToInteger32(LConstantOperand::cast(right_op));
-    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
-    switch (instr->op()) {
-      case Token::SAR:
-        if (shift_count != 0) {
-          __ sra(result, left, shift_count);
-        } else {
-          __ Move(result, left);
-        }
-        break;
-      case Token::SHR:
-        if (shift_count != 0) {
-          __ srl(result, left, shift_count);
-        } else {
-          if (instr->can_deopt()) {
-            __ And(at, left, Operand(0x80000000));
-            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
-          }
-          __ Move(result, left);
-        }
-        break;
-      case Token::SHL:
-        if (shift_count != 0) {
-          __ sll(result, left, shift_count);
-        } else {
-          __ Move(result, left);
-        }
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void LCodeGen::DoSubI(LSubI* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  LOperand* result = instr->result();
-  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
-  if (!can_overflow) {
-    if (right->IsStackSlot() || right->IsArgument()) {
-      Register right_reg = EmitLoadRegister(right, at);
-      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
-    } else {
-      ASSERT(right->IsRegister() || right->IsConstantOperand());
-      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
-    }
-  } else {  // can_overflow.
-    Register overflow = scratch0();
-    Register scratch = scratch1();
-    if (right->IsStackSlot() ||
-        right->IsArgument() ||
-        right->IsConstantOperand()) {
-      Register right_reg = EmitLoadRegister(right, scratch);
-      __ SubuAndCheckForOverflow(ToRegister(result),
-                                 ToRegister(left),
-                                 right_reg,
-                                 overflow);  // Reg at also used as scratch.
-    } else {
-      ASSERT(right->IsRegister());
-      // Due to overflow check macros not supporting constant operands,
-      // handling the IsConstantOperand case was moved to prev if clause.
-      __ SubuAndCheckForOverflow(ToRegister(result),
-                                 ToRegister(left),
-                                 ToRegister(right),
-                                 overflow);  // Reg at also used as scratch.
-    }
-    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
-  }
-}
-
-
-void LCodeGen::DoConstantI(LConstantI* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ li(ToRegister(instr->result()), Operand(instr->value()));
-}
-
-
-void LCodeGen::DoConstantD(LConstantD* instr) {
-  ASSERT(instr->result()->IsDoubleRegister());
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  double v = instr->value();
-  __ Move(result, v);
-}
-
-
-void LCodeGen::DoConstantT(LConstantT* instr) {
-  Handle<Object> value = instr->value();
-  if (value->IsSmi()) {
-    __ li(ToRegister(instr->result()), Operand(value));
-  } else {
-    __ LoadHeapObject(ToRegister(instr->result()),
-                      Handle<HeapObject>::cast(value));
-  }
-}
-
-
-void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register array = ToRegister(instr->InputAt(0));
-  __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
-}
-
-
-void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
-  Register result = ToRegister(instr->result());
-  Register array = ToRegister(instr->InputAt(0));
-  __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
-}
-
-
-void LCodeGen::DoElementsKind(LElementsKind* instr) {
-  Register result = ToRegister(instr->result());
-  Register input = ToRegister(instr->InputAt(0));
-
-  // Load map into |result|.
-  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
-  // Load the map's "bit field 2" into |result|. We only need the first byte,
-  // but the following bit field extraction takes care of that anyway.
-  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
-  // Retrieve elements_kind from bit field 2.
-  __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
-}
-
-
-void LCodeGen::DoValueOf(LValueOf* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register map = ToRegister(instr->TempAt(0));
-  Label done;
-
-  // If the object is a smi return the object.
-  __ Move(result, input);
-  __ JumpIfSmi(input, &done);
-
-  // If the object is not a value type, return the object.
-  __ GetObjectType(input, map, map);
-  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
-  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
-
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDateField(LDateField* instr) {
-  Register object = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->TempAt(0));
-  Smi* index = instr->index();
-  Label runtime, done;
-  ASSERT(object.is(a0));
-  ASSERT(result.is(v0));
-  ASSERT(!scratch.is(scratch0()));
-  ASSERT(!scratch.is(object));
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ GetObjectType(object, scratch, scratch);
-  __ Assert(eq, "Trying to get date field from non-date.",
-      scratch, Operand(JS_DATE_TYPE));
-#endif
-
-  if (index->value() == 0) {
-    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ li(scratch, Operand(stamp));
-      __ lw(scratch, MemOperand(scratch));
-      __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
-      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
-      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
-                                            kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2, scratch);
-    __ li(a1, Operand(index));
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoBitNotI(LBitNotI* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  __ Nor(result, zero_reg, Operand(input));
-}
-
-
-void LCodeGen::DoThrow(LThrow* instr) {
-  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
-  __ push(input_reg);
-  CallRuntime(Runtime::kThrow, 1, instr);
-
-  if (FLAG_debug_code) {
-    __ stop("Unreachable code.");
-  }
-}
-
-
-void LCodeGen::DoAddI(LAddI* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  LOperand* result = instr->result();
-  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-
-  if (!can_overflow) {
-    if (right->IsStackSlot() || right->IsArgument()) {
-      Register right_reg = EmitLoadRegister(right, at);
-      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
-    } else {
-      ASSERT(right->IsRegister() || right->IsConstantOperand());
-      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
-    }
-  } else {  // can_overflow.
-    Register overflow = scratch0();
-    Register scratch = scratch1();
-    if (right->IsStackSlot() ||
-        right->IsArgument() ||
-        right->IsConstantOperand()) {
-      Register right_reg = EmitLoadRegister(right, scratch);
-      __ AdduAndCheckForOverflow(ToRegister(result),
-                                 ToRegister(left),
-                                 right_reg,
-                                 overflow);  // Reg at also used as scratch.
-    } else {
-      ASSERT(right->IsRegister());
-      // Due to overflow check macros not supporting constant operands,
-      // handling the IsConstantOperand case was moved to prev if clause.
-      __ AdduAndCheckForOverflow(ToRegister(result),
-                                 ToRegister(left),
-                                 ToRegister(right),
-                                 overflow);  // Reg at also used as scratch.
-    }
-    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
-  }
-}
-
-
-void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
-  DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
-  DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  switch (instr->op()) {
-    case Token::ADD:
-      __ add_d(result, left, right);
-      break;
-    case Token::SUB:
-      __ sub_d(result, left, right);
-      break;
-    case Token::MUL:
-      __ mul_d(result, left, right);
-      break;
-    case Token::DIV:
-      __ div_d(result, left, right);
-      break;
-    case Token::MOD: {
-      // Save a0-a3 on the stack.
-      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
-      __ MultiPush(saved_regs);
-
-      __ PrepareCallCFunction(0, 2, scratch0());
-      __ SetCallCDoubleArguments(left, right);
-      __ CallCFunction(
-          ExternalReference::double_fp_operation(Token::MOD, isolate()),
-          0, 2);
-      // Move the result in the double result register.
-      __ GetCFunctionDoubleResult(result);
-
-      // Restore saved register.
-      __ MultiPop(saved_regs);
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
-  ASSERT(ToRegister(instr->InputAt(1)).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  // Other arch use a nop here, to signal that there is no inlined
-  // patchable code. Mips does not need the nop, since our marker
-  // instruction (andi zero_reg) will never be used in normal code.
-}
-
-
-int LCodeGen::GetNextEmittedBlock(int block) {
-  for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
-    LLabel* label = chunk_->GetLabel(i);
-    if (!label->HasReplacement()) return i;
-  }
-  return -1;
-}
-
-
-void LCodeGen::EmitBranch(int left_block, int right_block,
-                          Condition cc, Register src1, const Operand& src2) {
-  int next_block = GetNextEmittedBlock(current_block_);
-  right_block = chunk_->LookupDestination(right_block);
-  left_block = chunk_->LookupDestination(left_block);
-  if (right_block == left_block) {
-    EmitGoto(left_block);
-  } else if (left_block == next_block) {
-    __ Branch(chunk_->GetAssemblyLabel(right_block),
-              NegateCondition(cc), src1, src2);
-  } else if (right_block == next_block) {
-    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
-  } else {
-    __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
-    __ Branch(chunk_->GetAssemblyLabel(right_block));
-  }
-}
-
-
-void LCodeGen::EmitBranchF(int left_block, int right_block,
-                           Condition cc, FPURegister src1, FPURegister src2) {
-  int next_block = GetNextEmittedBlock(current_block_);
-  right_block = chunk_->LookupDestination(right_block);
-  left_block = chunk_->LookupDestination(left_block);
-  if (right_block == left_block) {
-    EmitGoto(left_block);
-  } else if (left_block == next_block) {
-    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
-               NegateCondition(cc), src1, src2);
-  } else if (right_block == next_block) {
-    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
-  } else {
-    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
-    __ Branch(chunk_->GetAssemblyLabel(right_block));
-  }
-}
-
-
-void LCodeGen::DoBranch(LBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Representation r = instr->hydrogen()->value()->representation();
-  if (r.IsInteger32()) {
-    Register reg = ToRegister(instr->InputAt(0));
-    EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
-  } else if (r.IsDouble()) {
-    DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
-    // Test the double value. Zero and NaN are false.
-    EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
-  } else {
-    ASSERT(r.IsTagged());
-    Register reg = ToRegister(instr->InputAt(0));
-    HType type = instr->hydrogen()->value()->type();
-    if (type.IsBoolean()) {
-      __ LoadRoot(at, Heap::kTrueValueRootIndex);
-      EmitBranch(true_block, false_block, eq, reg, Operand(at));
-    } else if (type.IsSmi()) {
-      EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
-    } else {
-      Label* true_label = chunk_->GetAssemblyLabel(true_block);
-      Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
-      // Avoid deopts in the case where we've never executed this path before.
-      if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
-
-      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
-        // undefined -> false.
-        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-        __ Branch(false_label, eq, reg, Operand(at));
-      }
-      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
-        // Boolean -> its value.
-        __ LoadRoot(at, Heap::kTrueValueRootIndex);
-        __ Branch(true_label, eq, reg, Operand(at));
-        __ LoadRoot(at, Heap::kFalseValueRootIndex);
-        __ Branch(false_label, eq, reg, Operand(at));
-      }
-      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
-        // 'null' -> false.
-        __ LoadRoot(at, Heap::kNullValueRootIndex);
-        __ Branch(false_label, eq, reg, Operand(at));
-      }
-
-      if (expected.Contains(ToBooleanStub::SMI)) {
-        // Smis: 0 -> false, all other -> true.
-        __ Branch(false_label, eq, reg, Operand(zero_reg));
-        __ JumpIfSmi(reg, true_label);
-      } else if (expected.NeedsMap()) {
-        // If we need a map later and have a Smi -> deopt.
-        __ And(at, reg, Operand(kSmiTagMask));
-        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
-      }
-
-      const Register map = scratch0();
-      if (expected.NeedsMap()) {
-        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
-        if (expected.CanBeUndetectable()) {
-          // Undetectable -> false.
-          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
-          __ And(at, at, Operand(1 << Map::kIsUndetectable));
-          __ Branch(false_label, ne, at, Operand(zero_reg));
-        }
-      }
-
-      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
-        // spec object -> true.
-        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
-        __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
-      }
-
-      if (expected.Contains(ToBooleanStub::STRING)) {
-        // String value -> false iff empty.
-        Label not_string;
-        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
-        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
-        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
-        __ Branch(true_label, ne, at, Operand(zero_reg));
-        __ Branch(false_label);
-        __ bind(&not_string);
-      }
-
-      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
-        // heap number -> false iff +0, -0, or NaN.
-        DoubleRegister dbl_scratch = double_scratch0();
-        Label not_heap_number;
-        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-        __ Branch(&not_heap_number, ne, map, Operand(at));
-        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
-        __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
-        // Falls through if dbl_scratch == 0.
-        __ Branch(false_label);
-        __ bind(&not_heap_number);
-      }
-
-      // We've seen something for the first time -> deopt.
-      DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
-    }
-  }
-}
-
-
-void LCodeGen::EmitGoto(int block) {
-  block = chunk_->LookupDestination(block);
-  int next_block = GetNextEmittedBlock(current_block_);
-  if (block != next_block) {
-    __ jmp(chunk_->GetAssemblyLabel(block));
-  }
-}
-
-
-void LCodeGen::DoGoto(LGoto* instr) {
-  EmitGoto(instr->block_id());
-}
-
-
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
-  Condition cond = kNoCondition;
-  switch (op) {
-    case Token::EQ:
-    case Token::EQ_STRICT:
-      cond = eq;
-      break;
-    case Token::LT:
-      cond = is_unsigned ? lo : lt;
-      break;
-    case Token::GT:
-      cond = is_unsigned ? hi : gt;
-      break;
-    case Token::LTE:
-      cond = is_unsigned ? ls : le;
-      break;
-    case Token::GTE:
-      cond = is_unsigned ? hs : ge;
-      break;
-    case Token::IN:
-    case Token::INSTANCEOF:
-    default:
-      UNREACHABLE();
-  }
-  return cond;
-}
-
-
-void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
-  LOperand* left = instr->InputAt(0);
-  LOperand* right = instr->InputAt(1);
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  Condition cond = TokenToCondition(instr->op(), false);
-
-  if (left->IsConstantOperand() && right->IsConstantOperand()) {
-    // We can statically evaluate the comparison.
-    double left_val = ToDouble(LConstantOperand::cast(left));
-    double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
-    EmitGoto(next_block);
-  } else {
-    if (instr->is_double()) {
-      // Compare left and right as doubles and load the
-      // resulting flags into the normal status register.
-      FPURegister left_reg = ToDoubleRegister(left);
-      FPURegister right_reg = ToDoubleRegister(right);
-
-      // If a NaN is involved, i.e. the result is unordered,
-      // jump to false block label.
-      __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
-                 left_reg, right_reg);
-
-      EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
-    } else {
-      Register cmp_left;
-      Operand cmp_right = Operand(0);
-
-      if (right->IsConstantOperand()) {
-        cmp_left = ToRegister(left);
-        cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
-      } else if (left->IsConstantOperand()) {
-        cmp_left = ToRegister(right);
-        cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
-        // We transposed the operands. Reverse the condition.
-        cond = ReverseCondition(cond);
-      } else {
-        cmp_left = ToRegister(left);
-        cmp_right = Operand(ToRegister(right));
-      }
-
-      EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
-    }
-  }
-}
-
-
-void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
-  Register left = ToRegister(instr->InputAt(0));
-  Register right = ToRegister(instr->InputAt(1));
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  EmitBranch(true_block, false_block, eq, left, Operand(right));
-}
-
-
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
-  Register left = ToRegister(instr->InputAt(0));
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  EmitBranch(true_block, false_block, eq, left,
-             Operand(instr->hydrogen()->right()));
-}
-
-
-
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
-  Register scratch = scratch0();
-  Register reg = ToRegister(instr->InputAt(0));
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  // If the expression is known to be untagged or a smi, then it's definitely
-  // not null, and it can't be a an undetectable object.
-  if (instr->hydrogen()->representation().IsSpecialization() ||
-      instr->hydrogen()->type().IsSmi()) {
-    EmitGoto(false_block);
-    return;
-  }
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
-      Heap::kNullValueRootIndex :
-      Heap::kUndefinedValueRootIndex;
-  __ LoadRoot(at, nil_value);
-  if (instr->kind() == kStrictEquality) {
-    EmitBranch(true_block, false_block, eq, reg, Operand(at));
-  } else {
-    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
-        Heap::kUndefinedValueRootIndex :
-        Heap::kNullValueRootIndex;
-    Label* true_label = chunk_->GetAssemblyLabel(true_block);
-    Label* false_label = chunk_->GetAssemblyLabel(false_block);
-    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
-    __ LoadRoot(at, other_nil_value);  // In the delay slot.
-    __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
-    __ JumpIfSmi(reg, false_label);  // In the delay slot.
-    // Check for undetectable objects by looking in the bit field in
-    // the map. The object has already been smi checked.
-    __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
-    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
-    __ And(scratch, scratch, 1 << Map::kIsUndetectable);
-    EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
-  }
-}
-
-
-Condition LCodeGen::EmitIsObject(Register input,
-                                 Register temp1,
-                                 Register temp2,
-                                 Label* is_not_object,
-                                 Label* is_object) {
-  __ JumpIfSmi(input, is_not_object);
-
-  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
-  __ Branch(is_object, eq, input, Operand(temp2));
-
-  // Load map.
-  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
-  // Undetectable objects behave like undefined.
-  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
-  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
-  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
-
-  // Load instance type and check that it is in object type range.
-  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
-  __ Branch(is_not_object,
-            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-
-  return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register temp1 = ToRegister(instr->TempAt(0));
-  Register temp2 = scratch0();
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  Condition true_cond =
-      EmitIsObject(reg, temp1, temp2, false_label, true_label);
-
-  EmitBranch(true_block, false_block, true_cond, temp2,
-             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-}
-
-
-Condition LCodeGen::EmitIsString(Register input,
-                                 Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
-  __ GetObjectType(input, temp1, temp1);
-
-  return lt;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register temp1 = ToRegister(instr->TempAt(0));
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  Condition true_cond =
-      EmitIsString(reg, temp1, false_label);
-
-  EmitBranch(true_block, false_block, true_cond, temp1,
-             Operand(FIRST_NONSTRING_TYPE));
-}
-
-
-void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
-  __ And(at, input_reg, kSmiTagMask);
-  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
-  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
-  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
-  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
-  EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
-}
-
-
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return kNoCondition;
-  }
-}
-
-
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  Condition condition = ComputeCompareCondition(op);
-
-  EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
-}
-
-
-static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
-  InstanceType from = instr->from();
-  InstanceType to = instr->to();
-  if (from == FIRST_TYPE) return to;
-  ASSERT(from == to || to == LAST_TYPE);
-  return from;
-}
-
-
-static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
-  InstanceType from = instr->from();
-  InstanceType to = instr->to();
-  if (from == to) return eq;
-  if (to == LAST_TYPE) return hs;
-  if (from == FIRST_TYPE) return ls;
-  UNREACHABLE();
-  return eq;
-}
-
-
-void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
-  Register scratch = scratch0();
-  Register input = ToRegister(instr->InputAt(0));
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  __ JumpIfSmi(input, false_label);
-
-  __ GetObjectType(input, scratch, scratch);
-  EmitBranch(true_block,
-             false_block,
-             BranchCondition(instr->hydrogen()),
-             scratch,
-             Operand(TestType(instr->hydrogen())));
-}
-
-
-void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  if (FLAG_debug_code) {
-    __ AbortIfNotString(input);
-  }
-
-  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
-  __ IndexFromHash(result, result);
-}
-
-
-void LCodeGen::DoHasCachedArrayIndexAndBranch(
-    LHasCachedArrayIndexAndBranch* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register scratch = scratch0();
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  __ lw(scratch,
-         FieldMemOperand(input, String::kHashFieldOffset));
-  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
-  EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
-}
-
-
-// Branches to a label or falls through with the answer in flags.  Trashes
-// the temp registers, but not the input.
-void LCodeGen::EmitClassOfTest(Label* is_true,
-                               Label* is_false,
-                               Handle<String>class_name,
-                               Register input,
-                               Register temp,
-                               Register temp2) {
-  ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
-
-  __ JumpIfSmi(input, is_false);
-
-  if (class_name->IsEqualTo(CStrVector("Function"))) {
-    // Assuming the following assertions, we can use the same compares to test
-    // for both being a function type and being in the object type range.
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  FIRST_SPEC_OBJECT_TYPE + 1);
-    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  LAST_SPEC_OBJECT_TYPE - 1);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-
-    __ GetObjectType(input, temp, temp2);
-    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
-    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
-    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
-  } else {
-    // Faster code path to avoid two compares: subtract lower bound from the
-    // actual type and do a signed compare with the width of the type range.
-    __ GetObjectType(input, temp, temp2);
-    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
-                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  }
-
-  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
-  // Check if the constructor in the map is a function.
-  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
-
-  // Objects with a non-function constructor have class 'Object'.
-  __ GetObjectType(temp, temp2, temp2);
-  if (class_name->IsEqualTo(CStrVector("Object"))) {
-    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
-  } else {
-    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
-  }
-
-  // temp now contains the constructor function. Grab the
-  // instance class name from there.
-  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(temp, FieldMemOperand(temp,
-                               SharedFunctionInfo::kInstanceClassNameOffset));
-  // The class name we are testing against is a symbol because it's a literal.
-  // The name in the constructor is a symbol because of the way the context is
-  // booted.  This routine isn't expected to work for random API-created
-  // classes and it doesn't have to because you can't access it with natives
-  // syntax.  Since both sides are symbols it is sufficient to use an identity
-  // comparison.
-
-  // End with the address of this class_name instance in temp register.
-  // On MIPS, the caller must do the comparison with Handle<String>class_name.
-}
-
-
-void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register temp = scratch0();
-  Register temp2 = ToRegister(instr->TempAt(0));
-  Handle<String> class_name = instr->hydrogen()->class_name();
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
-
-  EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
-}
-
-
-void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-  int true_block = instr->true_block_id();
-  int false_block = instr->false_block_id();
-
-  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
-  EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
-}
-
-
-void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  Label true_label, done;
-  ASSERT(ToRegister(instr->InputAt(0)).is(a0));  // Object is in a0.
-  ASSERT(ToRegister(instr->InputAt(1)).is(a1));  // Function is in a1.
-  Register result = ToRegister(instr->result());
-  ASSERT(result.is(v0));
-
-  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
-  __ Branch(&true_label, eq, result, Operand(zero_reg));
-  __ li(result, Operand(factory()->false_value()));
-  __ Branch(&done);
-  __ bind(&true_label);
-  __ li(result, Operand(factory()->true_value()));
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
-  class DeferredInstanceOfKnownGlobal: public LDeferredCode {
-   public:
-    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
-                                  LInstanceOfKnownGlobal* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() {
-      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
-    }
-    virtual LInstruction* instr() { return instr_; }
-    Label* map_check() { return &map_check_; }
-
-   private:
-    LInstanceOfKnownGlobal* instr_;
-    Label map_check_;
-  };
-
-  DeferredInstanceOfKnownGlobal* deferred;
-  deferred = new DeferredInstanceOfKnownGlobal(this, instr);
-
-  Label done, false_result;
-  Register object = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-  Register result = ToRegister(instr->result());
-
-  ASSERT(object.is(a0));
-  ASSERT(result.is(v0));
-
-  // A Smi is not instance of anything.
-  __ JumpIfSmi(object, &false_result);
-
-  // This is the inlined call site instanceof cache. The two occurences of the
-  // hole value will be patched to the last map/result pair generated by the
-  // instanceof stub.
-  Label cache_miss;
-  Register map = temp;
-  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
-
-  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
-  __ bind(deferred->map_check());  // Label for calculating code patching.
-  // We use Factory::the_hole_value() on purpose instead of loading from the
-  // root array to force relocation to be able to later patch with
-  // the cached map.
-  Handle<JSGlobalPropertyCell> cell =
-      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
-  __ li(at, Operand(Handle<Object>(cell)));
-  __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
-  __ Branch(&cache_miss, ne, map, Operand(at));
-  // We use Factory::the_hole_value() on purpose instead of loading from the
-  // root array to force relocation to be able to later patch
-  // with true or false.
-  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
-  __ Branch(&done);
-
-  // The inlined call site cache did not match. Check null and string before
-  // calling the deferred code.
-  __ bind(&cache_miss);
-  // Null is not instance of anything.
-  __ LoadRoot(temp, Heap::kNullValueRootIndex);
-  __ Branch(&false_result, eq, object, Operand(temp));
-
-  // String values is not instance of anything.
-  Condition cc = __ IsObjectStringType(object, temp, temp);
-  __ Branch(&false_result, cc, temp, Operand(zero_reg));
-
-  // Go to the deferred code.
-  __ Branch(deferred->entry());
-
-  __ bind(&false_result);
-  __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
-  // Here result has either true or false. Deferred code also produces true or
-  // false object.
-  __ bind(deferred->exit());
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                               Label* map_check) {
-  Register result = ToRegister(instr->result());
-  ASSERT(result.is(v0));
-
-  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
-  flags = static_cast<InstanceofStub::Flags>(
-      flags | InstanceofStub::kArgsInRegisters);
-  flags = static_cast<InstanceofStub::Flags>(
-      flags | InstanceofStub::kCallSiteInlineCheck);
-  flags = static_cast<InstanceofStub::Flags>(
-      flags | InstanceofStub::kReturnTrueFalseObject);
-  InstanceofStub stub(flags);
-
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
-  // Get the temp register reserved by the instruction. This needs to be t0 as
-  // its slot of the pushing of safepoint registers is used to communicate the
-  // offset to the location of the map check.
-  Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(temp.is(t0));
-  __ LoadHeapObject(InstanceofStub::right(), instr->function());
-  static const int kAdditionalDelta = 7;
-  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
-  Label before_push_delta;
-  __ bind(&before_push_delta);
-  {
-    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
-    __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
-    __ StoreToSafepointRegisterSlot(temp, temp);
-  }
-  CallCodeGeneric(stub.GetCode(),
-                  RelocInfo::CODE_TARGET,
-                  instr,
-                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LEnvironment* env = instr->deoptimization_environment();
-  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-  // Put the result value into the result register slot and
-  // restore all registers.
-  __ StoreToSafepointRegisterSlot(result, result);
-}
-
-
-void LCodeGen::DoCmpT(LCmpT* instr) {
-  Token::Value op = instr->op();
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  // On MIPS there is no need for a "no inlined smi code" marker (nop).
-
-  Condition condition = ComputeCompareCondition(op);
-  // A minor optimization that relies on LoadRoot always emitting one
-  // instruction.
-  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
-  Label done;
-  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
-  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
-  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-  ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoReturn(LReturn* instr) {
-  if (FLAG_trace) {
-    // Push the return value on the stack as the parameter.
-    // Runtime::TraceExit returns its parameter in v0.
-    __ push(v0);
-    __ CallRuntime(Runtime::kTraceExit, 1);
-  }
-  int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
-  __ mov(sp, fp);
-  __ Pop(ra, fp);
-  __ Addu(sp, sp, Operand(sp_delta));
-  __ Jump(ra);
-}
-
-
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
-  Register result = ToRegister(instr->result());
-  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
-  __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
-  }
-}
-
-
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->global_object()).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  __ li(a2, Operand(instr->name()));
-  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
-                                             : RelocInfo::CODE_TARGET_CONTEXT;
-  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallCode(ic, mode, instr);
-}
-
-
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->value());
-  Register cell = scratch0();
-
-  // Load the cell.
-  __ li(cell, Operand(instr->hydrogen()->cell()));
-
-  // If the cell we are storing to contains the hole it could have
-  // been deleted from the property dictionary. In that case, we need
-  // to update the property details in the property dictionary to mark
-  // it as no longer deleted.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    // We use a temp to check the payload.
-    Register payload = ToRegister(instr->TempAt(0));
-    __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
-  }
-
-  // Store the value.
-  __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
-  // Cells are always rescanned, so no write barrier here.
-}
-
-
-void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->global_object()).is(a1));
-  ASSERT(ToRegister(instr->value()).is(a0));
-
-  __ li(a2, Operand(instr->name()));
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
-}
-
-
-void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
-  Register context = ToRegister(instr->context());
-  Register result = ToRegister(instr->result());
-
-  __ lw(result, ContextOperand(context, instr->slot_index()));
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment(), result, Operand(at));
-    } else {
-      Label is_not_hole;
-      __ Branch(&is_not_hole, ne, result, Operand(at));
-      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-      __ bind(&is_not_hole);
-    }
-  }
-}
-
-
-void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
-  Register context = ToRegister(instr->context());
-  Register value = ToRegister(instr->value());
-  Register scratch = scratch0();
-  MemOperand target = ContextOperand(context, instr->slot_index());
-
-  Label skip_assignment;
-
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ lw(scratch, target);
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
-    } else {
-      __ Branch(&skip_assignment, ne, scratch, Operand(at));
-    }
-  }
-
-  __ sw(value, target);
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    __ RecordWriteContextSlot(context,
-                              target.offset(),
-                              value,
-                              scratch0(),
-                              kRAHasBeenSaved,
-                              kSaveFPRegs,
-                              EMIT_REMEMBERED_SET,
-                              check_needed);
-  }
-
-  __ bind(&skip_assignment);
-}
-
-
-void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
-  Register object = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  if (instr->hydrogen()->is_in_object()) {
-    __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
-  } else {
-    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
-    __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
-  }
-}
-
-
-void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
-                                               Register object,
-                                               Handle<Map> type,
-                                               Handle<String> name) {
-  LookupResult lookup(isolate());
-  type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsFound() &&
-         (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
-  if (lookup.type() == FIELD) {
-    int index = lookup.GetLocalFieldIndexFromMap(*type);
-    int offset = index * kPointerSize;
-    if (index < 0) {
-      // Negative property indices are in-object properties, indexed
-      // from the end of the fixed part of the object.
-      __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
-    } else {
-      // Non-negative property indices are in the properties array.
-      __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
-      __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
-    }
-  } else {
-    Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    __ LoadHeapObject(result, function);
-  }
-}
-
-
-void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
-  Register object = ToRegister(instr->object());
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-  int map_count = instr->hydrogen()->types()->length();
-  Handle<String> name = instr->hydrogen()->name();
-  if (map_count == 0) {
-    ASSERT(instr->hydrogen()->need_generic());
-    __ li(a2, Operand(name));
-    Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-    CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  } else {
-    Label done;
-    __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
-    for (int i = 0; i < map_count - 1; ++i) {
-      Handle<Map> map = instr->hydrogen()->types()->at(i);
-      Label next;
-      __ Branch(&next, ne, scratch, Operand(map));
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ Branch(&done);
-      __ bind(&next);
-    }
-    Handle<Map> map = instr->hydrogen()->types()->last();
-    if (instr->hydrogen()->need_generic()) {
-      Label generic;
-      __ Branch(&generic, ne, scratch, Operand(map));
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-      __ Branch(&done);
-      __ bind(&generic);
-      __ li(a2, Operand(name));
-      Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-      CallCode(ic, RelocInfo::CODE_TARGET, instr);
-    } else {
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
-      EmitLoadFieldOrConstantFunction(result, object, map, name);
-    }
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->object()).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  // Name is always in a2.
-  __ li(a2, Operand(instr->name()));
-  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
-  Register scratch = scratch0();
-  Register function = ToRegister(instr->function());
-  Register result = ToRegister(instr->result());
-
-  // Check that the function really is a function. Load map into the
-  // result register.
-  __ GetObjectType(function, result, scratch);
-  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
-
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
-  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
-
-  // Get the prototype or initial map from the function.
-  __ lw(result,
-         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // Check that the function has a prototype or an initial map.
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  __ GetObjectType(result, scratch, scratch);
-  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
-  // Get the prototype from the initial map.
-  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-  __ Branch(&done);
-
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  __ bind(&non_instance);
-  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
-
-  // All done.
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoLoadElements(LLoadElements* instr) {
-  Register result = ToRegister(instr->result());
-  Register input = ToRegister(instr->InputAt(0));
-  Register scratch = scratch0();
-
-  __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
-  if (FLAG_debug_code) {
-    Label done, fail;
-    __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
-    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-    __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
-    __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);  // In the delay slot.
-    __ Branch(&done, eq, scratch, Operand(at));
-    // |scratch| still contains |input|'s map.
-    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
-    __ Ext(scratch, scratch, Map::kElementsKindShift,
-           Map::kElementsKindBitCount);
-    __ Branch(&done, eq, scratch,
-              Operand(FAST_ELEMENTS));
-    __ Branch(&fail, lt, scratch,
-              Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
-    __ Branch(&done, le, scratch,
-              Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
-    __ bind(&fail);
-    __ Abort("Check for fast or external elements failed.");
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoLoadExternalArrayPointer(
-    LLoadExternalArrayPointer* instr) {
-  Register to_reg = ToRegister(instr->result());
-  Register from_reg  = ToRegister(instr->InputAt(0));
-  __ lw(to_reg, FieldMemOperand(from_reg,
-                                ExternalArray::kExternalPointerOffset));
-}
-
-
-void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
-  Register arguments = ToRegister(instr->arguments());
-  Register length = ToRegister(instr->length());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
-  // Bailout index is not a valid argument index. Use unsigned check to get
-  // negative check for free.
-
-  // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
-  // as they do in Arm. It will save us an instruction.
-  DeoptimizeIf(ls, instr->environment(), length, Operand(index));
-
-  // There are two words between the frame pointer and the last argument.
-  // Subtracting from length accounts for one of them, add one more.
-  __ subu(length, length, index);
-  __ Addu(length, length, Operand(1));
-  __ sll(length, length, kPointerSizeLog2);
-  __ Addu(at, arguments, Operand(length));
-  __ lw(result, MemOperand(at, 0));
-}
-
-
-void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
-  Register elements = ToRegister(instr->elements());
-  Register key = EmitLoadRegister(instr->key(), scratch0());
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-
-  // Load the result.
-  __ sll(scratch, key, kPointerSizeLog2);  // Key indexes words.
-  __ addu(scratch, elements, scratch);
-  __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
-
-  // Check for the hole value.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
-  }
-}
-
-
-void LCodeGen::DoLoadKeyedFastDoubleElement(
-    LLoadKeyedFastDoubleElement* instr) {
-  Register elements = ToRegister(instr->elements());
-  bool key_is_constant = instr->key()->IsConstantOperand();
-  Register key = no_reg;
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  Register scratch = scratch0();
-
-  int shift_size =
-      ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
-  int constant_key = 0;
-  if (key_is_constant) {
-    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
-    if (constant_key & 0xF0000000) {
-      Abort("array index constant value too big.");
-    }
-  } else {
-    key = ToRegister(instr->key());
-  }
-
-  if (key_is_constant) {
-    __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
-            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  } else {
-    __ sll(scratch, key, shift_size);
-    __ Addu(elements, elements, Operand(scratch));
-    __ Addu(elements, elements,
-            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  }
-
-  __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-  DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
-
-  __ ldc1(result, MemOperand(elements));
-}
-
-
-void LCodeGen::DoLoadKeyedSpecializedArrayElement(
-    LLoadKeyedSpecializedArrayElement* instr) {
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = no_reg;
-  ElementsKind elements_kind = instr->elements_kind();
-  bool key_is_constant = instr->key()->IsConstantOperand();
-  int constant_key = 0;
-  if (key_is_constant) {
-    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
-    if (constant_key & 0xF0000000) {
-      Abort("array index constant value too big.");
-    }
-  } else {
-    key = ToRegister(instr->key());
-  }
-  int shift_size = ElementsKindToShiftSize(elements_kind);
-
-  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
-      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    FPURegister result = ToDoubleRegister(instr->result());
-    if (key_is_constant) {
-      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
-    } else {
-      __ sll(scratch0(), key, shift_size);
-      __ Addu(scratch0(), scratch0(), external_pointer);
-    }
-
-    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-      __ lwc1(result, MemOperand(scratch0()));
-      __ cvt_d_s(result, result);
-    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
-      __ ldc1(result, MemOperand(scratch0()));
-    }
-  } else {
-    Register result = ToRegister(instr->result());
-    Register scratch = scratch0();
-    MemOperand mem_operand(zero_reg);
-    if (key_is_constant) {
-      mem_operand = MemOperand(external_pointer,
-                               constant_key * (1 << shift_size));
-    } else {
-      __ sll(scratch, key, shift_size);
-      __ Addu(scratch, scratch, external_pointer);
-      mem_operand = MemOperand(scratch);
-    }
-    switch (elements_kind) {
-      case EXTERNAL_BYTE_ELEMENTS:
-        __ lb(result, mem_operand);
-        break;
-      case EXTERNAL_PIXEL_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-        __ lbu(result, mem_operand);
-        break;
-      case EXTERNAL_SHORT_ELEMENTS:
-        __ lh(result, mem_operand);
-        break;
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-        __ lhu(result, mem_operand);
-        break;
-      case EXTERNAL_INT_ELEMENTS:
-        __ lw(result, mem_operand);
-        break;
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-        __ lw(result, mem_operand);
-        // TODO(danno): we could be more clever here, perhaps having a special
-        // version of the stub that detects if the overflow case actually
-        // happens, and generate code that returns a double rather than int.
-        DeoptimizeIf(Ugreater_equal, instr->environment(),
-            result, Operand(0x80000000));
-        break;
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
-      case DICTIONARY_ELEMENTS:
-      case NON_STRICT_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
-void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->object()).is(a1));
-  ASSERT(ToRegister(instr->key()).is(a0));
-
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
-  Register scratch = scratch0();
-  Register temp = scratch1();
-  Register result = ToRegister(instr->result());
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label done, adapted;
-  __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
-  __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Result is the frame pointer for the frame if not adapted and for the real
-  // frame below the adaptor frame if adapted.
-  __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
-  __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
-}
-
-
-void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
-  Register elem = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-
-  Label done;
-
-  // If no arguments adaptor frame the number of arguments is fixed.
-  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
-  __ Branch(&done, eq, fp, Operand(elem));
-
-  // Arguments adaptor frame present. Get argument length from there.
-  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ lw(result,
-        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(result);
-
-  // Argument length is in result register.
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register function = ToRegister(instr->function());
-  Register scratch = scratch0();
-
-  // If the receiver is null or undefined, we have to pass the global
-  // object as a receiver to normal functions. Values have to be
-  // passed unchanged to builtins and strict-mode functions.
-  Label global_object, receiver_ok;
-
-  // Do not transform the receiver to object for strict mode
-  // functions.
-  __ lw(scratch,
-         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(scratch,
-         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
-
-  // Do not transform the receiver to object for builtins.
-  int32_t strict_mode_function_mask =
-                  1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
-  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
-  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
-  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
-
-  // Normal function. Replace undefined or null with global receiver.
-  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
-  __ Branch(&global_object, eq, receiver, Operand(scratch));
-  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
-  __ Branch(&global_object, eq, receiver, Operand(scratch));
-
-  // Deoptimize if the receiver is not a JS object.
-  __ And(scratch, receiver, Operand(kSmiTagMask));
-  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
-
-  __ GetObjectType(receiver, scratch, scratch);
-  DeoptimizeIf(lt, instr->environment(),
-               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
-  __ Branch(&receiver_ok);
-
-  __ bind(&global_object);
-  __ lw(receiver, GlobalObjectOperand());
-  __ lw(receiver,
-         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
-  __ bind(&receiver_ok);
-}
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
-  Register scratch = scratch0();
-  ASSERT(receiver.is(a0));  // Used for parameter count.
-  ASSERT(function.is(a1));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  // Copy the arguments to this function possibly from the
-  // adaptor frame below it.
-  const uint32_t kArgumentsLimit = 1 * KB;
-  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
-
-  // Push the receiver and use the register to keep the original
-  // number of arguments.
-  __ push(receiver);
-  __ Move(receiver, length);
-  // The arguments are at a one pointer size offset from elements.
-  __ Addu(elements, elements, Operand(1 * kPointerSize));
-
-  // Loop through the arguments pushing them onto the execution
-  // stack.
-  Label invoke, loop;
-  // length is a small non-negative integer, due to the test above.
-  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
-  __ sll(scratch, length, 2);
-  __ bind(&loop);
-  __ Addu(scratch, elements, scratch);
-  __ lw(scratch, MemOperand(scratch));
-  __ push(scratch);
-  __ Subu(length, length, Operand(1));
-  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
-  __ sll(scratch, length, 2);
-
-  __ bind(&invoke);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
-  // The number of arguments is stored in receiver which is a0, as expected
-  // by InvokeFunction.
-  ParameterCount actual(receiver);
-  __ InvokeFunction(function, actual, CALL_FUNCTION,
-                    safepoint_generator, CALL_AS_METHOD);
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoPushArgument(LPushArgument* instr) {
-  LOperand* argument = instr->InputAt(0);
-  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
-    Abort("DoPushArgument not implemented for double type.");
-  } else {
-    Register argument_reg = EmitLoadRegister(argument, at);
-    __ push(argument_reg);
-  }
-}
-
-
-void LCodeGen::DoThisFunction(LThisFunction* instr) {
-  Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
-}
-
-
-void LCodeGen::DoContext(LContext* instr) {
-  Register result = ToRegister(instr->result());
-  __ mov(result, cp);
-}
-
-
-void LCodeGen::DoOuterContext(LOuterContext* instr) {
-  Register context = ToRegister(instr->context());
-  Register result = ToRegister(instr->result());
-  __ lw(result,
-        MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
-}
-
-
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
-  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
-  // The context is the first argument.
-  __ Push(cp, scratch0(), scratch1());
-  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
-void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
-  Register context = ToRegister(instr->context());
-  Register result = ToRegister(instr->result());
-  __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
-}
-
-
-void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
-  Register global = ToRegister(instr->global());
-  Register result = ToRegister(instr->result());
-  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
-}
-
-
-void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
-                                 int arity,
-                                 LInstruction* instr,
-                                 CallKind call_kind) {
-  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
-      function->shared()->formal_parameter_count() == arity;
-
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-
-  if (can_invoke_directly) {
-    __ LoadHeapObject(a1, function);
-    // Change context if needed.
-    bool change_context =
-        (info()->closure()->context() != function->context()) ||
-        scope()->contains_with() ||
-        (scope()->num_heap_slots() > 0);
-    if (change_context) {
-      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-    }
-
-    // Set r0 to arguments count if adaption is not needed. Assumes that r0
-    // is available to write to at this point.
-    if (!function->NeedsArgumentsAdaption()) {
-      __ li(a0, Operand(arity));
-    }
-
-    // Invoke function.
-    __ SetCallKind(t1, call_kind);
-    __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-    __ Call(at);
-
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
-  } else {
-    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
-    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
-  }
-
-  // Restore context.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-  __ mov(a0, v0);
-  CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
-}
-
-
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-
-  // Deoptimize if not a heap number.
-  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
-
-  Label done;
-  Register exponent = scratch0();
-  scratch = no_reg;
-  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-  // Check the sign of the argument. If the argument is positive, just
-  // return it.
-  __ Move(result, input);
-  __ And(at, exponent, Operand(HeapNumber::kSignMask));
-  __ Branch(&done, eq, at, Operand(zero_reg));
-
-  // Input is negative. Reverse its sign.
-  // Preserve the value of all registers.
-  {
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
-    // Registers were saved at the safepoint, so we can use
-    // many scratch registers.
-    Register tmp1 = input.is(a1) ? a0 : a1;
-    Register tmp2 = input.is(a2) ? a0 : a2;
-    Register tmp3 = input.is(a3) ? a0 : a3;
-    Register tmp4 = input.is(t0) ? a0 : t0;
-
-    // exponent: floating point exponent value.
-
-    Label allocated, slow;
-    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
-    __ Branch(&allocated);
-
-    // Slow case: Call the runtime system to do the number allocation.
-    __ bind(&slow);
-
-    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
-    // Set the pointer to the new heap number in tmp.
-    if (!tmp1.is(v0))
-      __ mov(tmp1, v0);
-    // Restore input_reg after call to runtime.
-    __ LoadFromSafepointRegisterSlot(input, input);
-    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
-
-    __ bind(&allocated);
-    // exponent: floating point exponent value.
-    // tmp1: allocated heap number.
-    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
-    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
-    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
-    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
-
-    __ StoreToSafepointRegisterSlot(tmp1, result);
-  }
-
-  __ bind(&done);
-}
-
-
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
-  Label done;
-  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
-  __ mov(result, input);
-  ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
-  __ subu(result, zero_reg, input);
-  // Overflow if result is still negative, i.e. 0x80000000.
-  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
-  // Class for deferred case.
-  class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
-   public:
-    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
-                                    LUnaryMathOperation* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() {
-      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
-    }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LUnaryMathOperation* instr_;
-  };
-
-  Representation r = instr->hydrogen()->value()->representation();
-  if (r.IsDouble()) {
-    FPURegister input = ToDoubleRegister(instr->InputAt(0));
-    FPURegister result = ToDoubleRegister(instr->result());
-    __ abs_d(result, input);
-  } else if (r.IsInteger32()) {
-    EmitIntegerMathAbs(instr);
-  } else {
-    // Representation is tagged.
-    DeferredMathAbsTaggedHeapNumber* deferred =
-        new DeferredMathAbsTaggedHeapNumber(this, instr);
-    Register input = ToRegister(instr->InputAt(0));
-    // Smi check.
-    __ JumpIfNotSmi(input, deferred->entry());
-    // If smi, handle it directly.
-    EmitIntegerMathAbs(instr);
-    __ bind(deferred->exit());
-  }
-}
-
-
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  FPURegister single_scratch = double_scratch0().low();
-  Register scratch1 = scratch0();
-  Register except_flag = ToRegister(instr->TempAt(0));
-
-  __ EmitFPUTruncate(kRoundToMinusInf,
-                     single_scratch,
-                     input,
-                     scratch1,
-                     except_flag);
-
-  // Deopt if the operation did not succeed.
-  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
-  // Load the result.
-  __ mfc1(result, single_scratch);
-
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    // Test for -0.
-    Label done;
-    __ Branch(&done, ne, result, Operand(zero_reg));
-    __ mfc1(scratch1, input.high());
-    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
-    __ bind(&done);
-  }
-}
-
-
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-  Label done, check_sign_on_zero;
-
-  // Extract exponent bits.
-  __ mfc1(result, input.high());
-  __ Ext(scratch,
-         result,
-         HeapNumber::kExponentShift,
-         HeapNumber::kExponentBits);
-
-  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
-  Label skip1;
-  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
-  __ mov(result, zero_reg);
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ Branch(&check_sign_on_zero);
-  } else {
-    __ Branch(&done);
-  }
-  __ bind(&skip1);
-
-  // The following conversion will not work with numbers
-  // outside of ]-2^32, 2^32[.
-  DeoptimizeIf(ge, instr->environment(), scratch,
-               Operand(HeapNumber::kExponentBias + 32));
-
-  // Save the original sign for later comparison.
-  __ And(scratch, result, Operand(HeapNumber::kSignMask));
-
-  __ Move(double_scratch0(), 0.5);
-  __ add_d(double_scratch0(), input, double_scratch0());
-
-  // Check sign of the result: if the sign changed, the input
-  // value was in ]0.5, 0[ and the result should be -0.
-  __ mfc1(result, double_scratch0().high());
-  __ Xor(result, result, Operand(scratch));
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    // ARM uses 'mi' here, which is 'lt'
-    DeoptimizeIf(lt, instr->environment(), result,
-                 Operand(zero_reg));
-  } else {
-    Label skip2;
-    // ARM uses 'mi' here, which is 'lt'
-    // Negating it results in 'ge'
-    __ Branch(&skip2, ge, result, Operand(zero_reg));
-    __ mov(result, zero_reg);
-    __ Branch(&done);
-    __ bind(&skip2);
-  }
-
-  Register except_flag = scratch;
-
-  __ EmitFPUTruncate(kRoundToMinusInf,
-                     double_scratch0().low(),
-                     double_scratch0(),
-                     result,
-                     except_flag);
-
-  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
-  __ mfc1(result, double_scratch0().low());
-
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    // Test for -0.
-    __ Branch(&done, ne, result, Operand(zero_reg));
-    __ bind(&check_sign_on_zero);
-    __ mfc1(scratch, input.high());
-    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
-    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
-  }
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  __ sqrt_d(result, input);
-}
-
-
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
-  DoubleRegister result = ToDoubleRegister(instr->result());
-  DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
-
-  ASSERT(!input.is(result));
-
-  // Note that according to ECMA-262 15.8.2.13:
-  // Math.pow(-Infinity, 0.5) == Infinity
-  // Math.sqrt(-Infinity) == NaN
-  Label done;
-  __ Move(temp, -V8_INFINITY);
-  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
-  // Set up Infinity in the delay slot.
-  // result is overwritten if the branch is not taken.
-  __ neg_d(result, temp);
-
-  // Add +0 to convert -0 to +0.
-  __ add_d(result, input, kDoubleRegZero);
-  __ sqrt_d(result, result);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoPower(LPower* instr) {
-  Representation exponent_type = instr->hydrogen()->right()->representation();
-  // Having marked this as a call, we can use any registers.
-  // Just make sure that the input/output registers are the expected ones.
-  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
-         ToDoubleRegister(instr->InputAt(1)).is(f4));
-  ASSERT(!instr->InputAt(1)->IsRegister() ||
-         ToRegister(instr->InputAt(1)).is(a2));
-  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
-  ASSERT(ToDoubleRegister(instr->result()).is(f0));
-
-  if (exponent_type.IsTagged()) {
-    Label no_deopt;
-    __ JumpIfSmi(a2, &no_deopt);
-    __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
-    DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
-    __ bind(&no_deopt);
-    MathPowStub stub(MathPowStub::TAGGED);
-    __ CallStub(&stub);
-  } else if (exponent_type.IsInteger32()) {
-    MathPowStub stub(MathPowStub::INTEGER);
-    __ CallStub(&stub);
-  } else {
-    ASSERT(exponent_type.IsDouble());
-    MathPowStub stub(MathPowStub::DOUBLE);
-    __ CallStub(&stub);
-  }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
-  class DeferredDoRandom: public LDeferredCode {
-   public:
-    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LRandom* instr_;
-  };
-
-  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
-  // Having marked this instruction as a call we can use any
-  // registers.
-  ASSERT(ToDoubleRegister(instr->result()).is(f0));
-  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
-
-  static const int kSeedSize = sizeof(uint32_t);
-  STATIC_ASSERT(kPointerSize == kSeedSize);
-
-  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
-  static const int kRandomSeedOffset =
-      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
-  __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
-  // a2: FixedArray of the global context's random seeds
-
-  // Load state[0].
-  __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
-  __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
-  // Load state[1].
-  __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
-  // a1: state[0].
-  // a0: state[1].
-
-  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
-  __ And(a3, a1, Operand(0xFFFF));
-  __ li(t0, Operand(18273));
-  __ mul(a3, a3, t0);
-  __ srl(a1, a1, 16);
-  __ Addu(a1, a3, a1);
-  // Save state[0].
-  __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
-
-  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
-  __ And(a3, a0, Operand(0xFFFF));
-  __ li(t0, Operand(36969));
-  __ mul(a3, a3, t0);
-  __ srl(a0, a0, 16),
-  __ Addu(a0, a3, a0);
-  // Save state[1].
-  __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
-
-  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
-  __ And(a0, a0, Operand(0x3FFFF));
-  __ sll(a1, a1, 14);
-  __ Addu(v0, a0, a1);
-
-  __ bind(deferred->exit());
-
-  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
-  __ li(a2, Operand(0x41300000));
-  // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
-  __ Move(f12, v0, a2);
-  // Move 0x4130000000000000 to FPU.
-  __ Move(f14, zero_reg, a2);
-  // Subtract to get the result.
-  __ sub_d(f0, f12, f14);
-}
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
-  __ PrepareCallCFunction(1, scratch0());
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-  // Return value is in v0.
-}
-
-
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(f4));
-  TranscendentalCacheStub stub(TranscendentalCache::LOG,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(f4));
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(f4));
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(f4));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
-  switch (instr->op()) {
-    case kMathAbs:
-      DoMathAbs(instr);
-      break;
-    case kMathFloor:
-      DoMathFloor(instr);
-      break;
-    case kMathRound:
-      DoMathRound(instr);
-      break;
-    case kMathSqrt:
-      DoMathSqrt(instr);
-      break;
-    case kMathPowHalf:
-      DoMathPowHalf(instr);
-      break;
-    case kMathCos:
-      DoMathCos(instr);
-      break;
-    case kMathSin:
-      DoMathSin(instr);
-      break;
-    case kMathTan:
-      DoMathTan(instr);
-      break;
-    case kMathLog:
-      DoMathLog(instr);
-      break;
-    default:
-      Abort("Unimplemented type of LUnaryMathOperation.");
-      UNREACHABLE();
-  }
-}
-
-
-void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(a1));
-  ASSERT(instr->HasPointerMap());
-  ASSERT(instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-  ParameterCount count(instr->arity());
-  __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  int arity = instr->arity();
-  Handle<Code> ic =
-      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallNamed(LCallNamed* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  int arity = instr->arity();
-  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
-  Handle<Code> ic =
-      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
-  __ li(a2, Operand(instr->name()));
-  CallCode(ic, mode, instr);
-  // Restore context register.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(a1));
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  int arity = instr->arity();
-  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  int arity = instr->arity();
-  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
-  Handle<Code> ic =
-      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
-  __ li(a2, Operand(instr->name()));
-  CallCode(ic, mode, instr);
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-}
-
-
-void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-  CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
-}
-
-
-void LCodeGen::DoCallNew(LCallNew* instr) {
-  ASSERT(ToRegister(instr->InputAt(0)).is(a1));
-  ASSERT(ToRegister(instr->result()).is(v0));
-
-  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
-  __ li(a0, Operand(instr->arity()));
-  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
-}
-
-
-void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  CallRuntime(instr->function(), instr->arity(), instr);
-}
-
-
-void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
-  Register object = ToRegister(instr->object());
-  Register value = ToRegister(instr->value());
-  Register scratch = scratch0();
-  int offset = instr->offset();
-
-  ASSERT(!object.is(value));
-
-  if (!instr->transition().is_null()) {
-    __ li(scratch, Operand(instr->transition()));
-    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
-  }
-
-  // Do the store.
-  HType type = instr->hydrogen()->value()->type();
-  SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-  if (instr->is_in_object()) {
-    __ sw(value, FieldMemOperand(object, offset));
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
-      // Update the write barrier for the object for in-object properties.
-      __ RecordWriteField(object,
-                          offset,
-                          value,
-                          scratch,
-                          kRAHasBeenSaved,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
-    }
-  } else {
-    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
-    __ sw(value, FieldMemOperand(scratch, offset));
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
-      // Update the write barrier for the properties array.
-      // object is used as a scratch register.
-      __ RecordWriteField(scratch,
-                          offset,
-                          value,
-                          object,
-                          kRAHasBeenSaved,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
-    }
-  }
-}
-
-
-void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->object()).is(a1));
-  ASSERT(ToRegister(instr->value()).is(a0));
-
-  // Name is always in a2.
-  __ li(a2, Operand(instr->name()));
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
-      ? isolate()->builtins()->StoreIC_Initialize_Strict()
-      : isolate()->builtins()->StoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  DeoptimizeIf(hs,
-               instr->environment(),
-               ToRegister(instr->index()),
-               Operand(ToRegister(instr->length())));
-}
-
-
-void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
-  Register value = ToRegister(instr->value());
-  Register elements = ToRegister(instr->object());
-  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
-  Register scratch = scratch0();
-
-  // Do the store.
-  if (instr->key()->IsConstantOperand()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
-    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
-    int offset =
-        ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
-    __ sw(value, FieldMemOperand(elements, offset));
-  } else {
-    __ sll(scratch, key, kPointerSizeLog2);
-    __ addu(scratch, elements, scratch);
-    __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
-  }
-
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-    // Compute address of modified element and store it into key register.
-    __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ RecordWrite(elements,
-                   key,
-                   value,
-                   kRAHasBeenSaved,
-                   kSaveFPRegs,
-                   EMIT_REMEMBERED_SET,
-                   check_needed);
-  }
-}
-
-
-void LCodeGen::DoStoreKeyedFastDoubleElement(
-    LStoreKeyedFastDoubleElement* instr) {
-  DoubleRegister value = ToDoubleRegister(instr->value());
-  Register elements = ToRegister(instr->elements());
-  Register key = no_reg;
-  Register scratch = scratch0();
-  bool key_is_constant = instr->key()->IsConstantOperand();
-  int constant_key = 0;
-  Label not_nan;
-
-  // Calculate the effective address of the slot in the array to store the
-  // double value.
-  if (key_is_constant) {
-    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
-    if (constant_key & 0xF0000000) {
-      Abort("array index constant value too big.");
-    }
-  } else {
-    key = ToRegister(instr->key());
-  }
-  int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
-  if (key_is_constant) {
-    __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
-            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  } else {
-    __ sll(scratch, key, shift_size);
-    __ Addu(scratch, elements, Operand(scratch));
-    __ Addu(scratch, scratch,
-            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  }
-
-  Label is_nan;
-  // Check for NaN. All NaNs must be canonicalized.
-  __ BranchF(NULL, &is_nan, eq, value, value);
-  __ Branch(&not_nan);
-
-  // Only load canonical NaN if the comparison above set the overflow.
-  __ bind(&is_nan);
-  __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-
-  __ bind(&not_nan);
-  __ sdc1(value, MemOperand(scratch));
-}
-
-
-void LCodeGen::DoStoreKeyedSpecializedArrayElement(
-    LStoreKeyedSpecializedArrayElement* instr) {
-
-  Register external_pointer = ToRegister(instr->external_pointer());
-  Register key = no_reg;
-  ElementsKind elements_kind = instr->elements_kind();
-  bool key_is_constant = instr->key()->IsConstantOperand();
-  int constant_key = 0;
-  if (key_is_constant) {
-    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
-    if (constant_key & 0xF0000000) {
-      Abort("array index constant value too big.");
-    }
-  } else {
-    key = ToRegister(instr->key());
-  }
-  int shift_size = ElementsKindToShiftSize(elements_kind);
-
-  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
-      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-    FPURegister value(ToDoubleRegister(instr->value()));
-    if (key_is_constant) {
-      __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
-    } else {
-      __ sll(scratch0(), key, shift_size);
-      __ Addu(scratch0(), scratch0(), external_pointer);
-    }
-
-    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
-      __ cvt_s_d(double_scratch0(), value);
-      __ swc1(double_scratch0(), MemOperand(scratch0()));
-    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
-      __ sdc1(value, MemOperand(scratch0()));
-    }
-  } else {
-    Register value(ToRegister(instr->value()));
-    MemOperand mem_operand(zero_reg);
-    Register scratch = scratch0();
-    if (key_is_constant) {
-      mem_operand = MemOperand(external_pointer,
-                               constant_key * (1 << shift_size));
-    } else {
-      __ sll(scratch, key, shift_size);
-      __ Addu(scratch, scratch, external_pointer);
-      mem_operand = MemOperand(scratch);
-    }
-    switch (elements_kind) {
-      case EXTERNAL_PIXEL_ELEMENTS:
-      case EXTERNAL_BYTE_ELEMENTS:
-      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-        __ sb(value, mem_operand);
-        break;
-      case EXTERNAL_SHORT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-        __ sh(value, mem_operand);
-        break;
-      case EXTERNAL_INT_ELEMENTS:
-      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-        __ sw(value, mem_operand);
-        break;
-      case EXTERNAL_FLOAT_ELEMENTS:
-      case EXTERNAL_DOUBLE_ELEMENTS:
-      case FAST_DOUBLE_ELEMENTS:
-      case FAST_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
-      case DICTIONARY_ELEMENTS:
-      case NON_STRICT_ARGUMENTS_ELEMENTS:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->object()).is(a2));
-  ASSERT(ToRegister(instr->key()).is(a1));
-  ASSERT(ToRegister(instr->value()).is(a0));
-
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
-  Register object_reg = ToRegister(instr->object());
-  Register new_map_reg = ToRegister(instr->new_map_reg());
-  Register scratch = scratch0();
-
-  Handle<Map> from_map = instr->original_map();
-  Handle<Map> to_map = instr->transitioned_map();
-  ElementsKind from_kind = from_map->elements_kind();
-  ElementsKind to_kind = to_map->elements_kind();
-
-  __ mov(ToRegister(instr->result()), object_reg);
-
-  Label not_applicable;
-  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
-
-  __ li(new_map_reg, Operand(to_map));
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
-    // Write barrier.
-    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
-                        scratch, kRAHasBeenSaved, kDontSaveFPRegs);
-  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      to_kind == FAST_DOUBLE_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(a2));
-    ASSERT(new_map_reg.is(a3));
-    __ mov(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
-             RelocInfo::CODE_TARGET, instr);
-  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(a2));
-    ASSERT(new_map_reg.is(a3));
-    __ mov(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
-             RelocInfo::CODE_TARGET, instr);
-  } else {
-    UNREACHABLE();
-  }
-  __ bind(&not_applicable);
-}
-
-
-void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  __ push(ToRegister(instr->left()));
-  __ push(ToRegister(instr->right()));
-  StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
-  class DeferredStringCharCodeAt: public LDeferredCode {
-   public:
-    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LStringCharCodeAt* instr_;
-  };
-
-  DeferredStringCharCodeAt* deferred =
-      new DeferredStringCharCodeAt(this, instr);
-  StringCharLoadGenerator::Generate(masm(),
-                                    ToRegister(instr->string()),
-                                    ToRegister(instr->index()),
-                                    ToRegister(instr->result()),
-                                    deferred->entry());
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
-  Register string = ToRegister(instr->string());
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ mov(result, zero_reg);
-
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-  __ push(string);
-  // Push the index as a smi. This is safe because of the checks in
-  // DoStringCharCodeAt above.
-  if (instr->index()->IsConstantOperand()) {
-    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
-    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
-    __ push(scratch);
-  } else {
-    Register index = ToRegister(instr->index());
-    __ SmiTag(index);
-    __ push(index);
-  }
-  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
-  if (FLAG_debug_code) {
-    __ AbortIfNotSmi(v0);
-  }
-  __ SmiUntag(v0);
-  __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
-  class DeferredStringCharFromCode: public LDeferredCode {
-   public:
-    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LStringCharFromCode* instr_;
-  };
-
-  DeferredStringCharFromCode* deferred =
-      new DeferredStringCharFromCode(this, instr);
-
-  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
-  Register char_code = ToRegister(instr->char_code());
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-  ASSERT(!char_code.is(result));
-
-  __ Branch(deferred->entry(), hi,
-            char_code, Operand(String::kMaxAsciiCharCode));
-  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
-  __ sll(scratch, char_code, kPointerSizeLog2);
-  __ Addu(result, result, scratch);
-  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
-  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
-  __ Branch(deferred->entry(), eq, result, Operand(scratch));
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
-  Register char_code = ToRegister(instr->char_code());
-  Register result = ToRegister(instr->result());
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ mov(result, zero_reg);
-
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-  __ SmiTag(char_code);
-  __ push(char_code);
-  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
-  __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoStringLength(LStringLength* instr) {
-  Register string = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  __ lw(result, FieldMemOperand(string, String::kLengthOffset));
-}
-
-
-void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister() || input->IsStackSlot());
-  LOperand* output = instr->result();
-  ASSERT(output->IsDoubleRegister());
-  FPURegister single_scratch = double_scratch0().low();
-  if (input->IsStackSlot()) {
-    Register scratch = scratch0();
-    __ lw(scratch, ToMemOperand(input));
-    __ mtc1(scratch, single_scratch);
-  } else {
-    __ mtc1(ToRegister(input), single_scratch);
-  }
-  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
-}
-
-
-void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
-  class DeferredNumberTagI: public LDeferredCode {
-   public:
-    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LNumberTagI* instr_;
-  };
-
-  Register src = ToRegister(instr->InputAt(0));
-  Register dst = ToRegister(instr->result());
-  Register overflow = scratch0();
-
-  DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
-  __ SmiTagCheckOverflow(dst, src, overflow);
-  __ BranchOnOverflow(deferred->entry(), overflow);
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
-  Label slow;
-  Register src = ToRegister(instr->InputAt(0));
-  Register dst = ToRegister(instr->result());
-  FPURegister dbl_scratch = double_scratch0();
-
-  // Preserve the value of all registers.
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-
-  // There was overflow, so bits 30 and 31 of the original integer
-  // disagree. Try to allocate a heap number in new space and store
-  // the value in there. If that fails, call the runtime system.
-  Label done;
-  if (dst.is(src)) {
-    __ SmiUntag(src, dst);
-    __ Xor(src, src, Operand(0x80000000));
-  }
-  __ mtc1(src, dbl_scratch);
-  __ cvt_d_w(dbl_scratch, dbl_scratch);
-  if (FLAG_inline_new) {
-    __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
-    __ Move(dst, t1);
-    __ Branch(&done);
-  }
-
-  // Slow case: Call the runtime system to do the number allocation.
-  __ bind(&slow);
-
-  // TODO(3095996): Put a valid pointer value in the stack slot where the result
-  // register is stored, as this register is in the pointer map, but contains an
-  // integer value.
-  __ StoreToSafepointRegisterSlot(zero_reg, dst);
-  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
-  __ Move(dst, v0);
-
-  // Done. Put the value in dbl_scratch into the value of the allocated heap
-  // number.
-  __ bind(&done);
-  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
-  __ StoreToSafepointRegisterSlot(dst, dst);
-}
-
-
-void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
-  class DeferredNumberTagD: public LDeferredCode {
-   public:
-    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LNumberTagD* instr_;
-  };
-
-  DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
-  Register scratch = scratch0();
-  Register reg = ToRegister(instr->result());
-  Register temp1 = ToRegister(instr->TempAt(0));
-  Register temp2 = ToRegister(instr->TempAt(1));
-
-  DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
-  if (FLAG_inline_new) {
-    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
-  } else {
-    __ Branch(deferred->entry());
-  }
-  __ bind(deferred->exit());
-  __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
-}
-
-
-void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  Register reg = ToRegister(instr->result());
-  __ mov(reg, zero_reg);
-
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
-  __ StoreToSafepointRegisterSlot(v0, reg);
-}
-
-
-void LCodeGen::DoSmiTag(LSmiTag* instr) {
-  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
-  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
-}
-
-
-void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
-  Register scratch = scratch0();
-  Register input = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  if (instr->needs_check()) {
-    STATIC_ASSERT(kHeapObjectTag == 1);
-    // If the input is a HeapObject, value of scratch won't be zero.
-    __ And(scratch, input, Operand(kHeapObjectTag));
-    __ SmiUntag(result, input);
-    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
-  } else {
-    __ SmiUntag(result, input);
-  }
-}
-
-
-void LCodeGen::EmitNumberUntagD(Register input_reg,
-                                DoubleRegister result_reg,
-                                bool deoptimize_on_undefined,
-                                bool deoptimize_on_minus_zero,
-                                LEnvironment* env) {
-  Register scratch = scratch0();
-
-  Label load_smi, heap_number, done;
-
-  // Smi check.
-  __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
-
-  // Heap number map check.
-  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  if (deoptimize_on_undefined) {
-    DeoptimizeIf(ne, env, scratch, Operand(at));
-  } else {
-    Label heap_number;
-    __ Branch(&heap_number, eq, scratch, Operand(at));
-
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    DeoptimizeIf(ne, env, input_reg, Operand(at));
-
-    // Convert undefined to NaN.
-    __ LoadRoot(at, Heap::kNanValueRootIndex);
-    __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
-    __ Branch(&done);
-
-    __ bind(&heap_number);
-  }
-  // Heap number to double register conversion.
-  __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
-  if (deoptimize_on_minus_zero) {
-    __ mfc1(at, result_reg.low());
-    __ Branch(&done, ne, at, Operand(zero_reg));
-    __ mfc1(scratch, result_reg.high());
-    DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
-  }
-  __ Branch(&done);
-
-  // Smi to double register conversion
-  __ bind(&load_smi);
-  // scratch: untagged value of input_reg
-  __ mtc1(scratch, result_reg);
-  __ cvt_d_w(result_reg, result_reg);
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
-  Register input_reg = ToRegister(instr->InputAt(0));
-  Register scratch1 = scratch0();
-  Register scratch2 = ToRegister(instr->TempAt(0));
-  DoubleRegister double_scratch = double_scratch0();
-  FPURegister single_scratch = double_scratch.low();
-
-  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
-  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
-
-  Label done;
-
-  // The input is a tagged HeapObject.
-  // Heap number map check.
-  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  // This 'at' value and scratch1 map value are used for tests in both clauses
-  // of the if.
-
-  if (instr->truncating()) {
-    Register scratch3 = ToRegister(instr->TempAt(1));
-    DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
-    ASSERT(!scratch3.is(input_reg) &&
-           !scratch3.is(scratch1) &&
-           !scratch3.is(scratch2));
-    // Performs a truncating conversion of a floating point number as used by
-    // the JS bitwise operations.
-    Label heap_number;
-    __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map?
-    // Check for undefined. Undefined is converted to zero for truncating
-    // conversions.
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
-    ASSERT(ToRegister(instr->result()).is(input_reg));
-    __ mov(input_reg, zero_reg);
-    __ Branch(&done);
-
-    __ bind(&heap_number);
-    __ ldc1(double_scratch2,
-            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
-    __ EmitECMATruncate(input_reg,
-                        double_scratch2,
-                        single_scratch,
-                        scratch1,
-                        scratch2,
-                        scratch3);
-  } else {
-    // Deoptimize if we don't have a heap number.
-    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
-
-    // Load the double value.
-    __ ldc1(double_scratch,
-            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
-
-    Register except_flag = scratch2;
-    __ EmitFPUTruncate(kRoundToZero,
-                       single_scratch,
-                       double_scratch,
-                       scratch1,
-                       except_flag,
-                       kCheckForInexactConversion);
-
-    // Deopt if the operation did not succeed.
-    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
-    // Load the result.
-    __ mfc1(input_reg, single_scratch);
-
-    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ Branch(&done, ne, input_reg, Operand(zero_reg));
-
-      __ mfc1(scratch1, double_scratch.high());
-      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
-    }
-  }
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI: public LDeferredCode {
-   public:
-    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LTaggedToI* instr_;
-  };
-
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister());
-  ASSERT(input->Equals(instr->result()));
-
-  Register input_reg = ToRegister(input);
-
-  DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
-
-  // Let the deferred code handle the HeapObject case.
-  __ JumpIfNotSmi(input_reg, deferred->entry());
-
-  // Smi to int32 conversion.
-  __ SmiUntag(input_reg);
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister());
-  LOperand* result = instr->result();
-  ASSERT(result->IsDoubleRegister());
-
-  Register input_reg = ToRegister(input);
-  DoubleRegister result_reg = ToDoubleRegister(result);
-
-  EmitNumberUntagD(input_reg, result_reg,
-                   instr->hydrogen()->deoptimize_on_undefined(),
-                   instr->hydrogen()->deoptimize_on_minus_zero(),
-                   instr->environment());
-}
-
-
-void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
-  Register result_reg = ToRegister(instr->result());
-  Register scratch1 = scratch0();
-  Register scratch2 = ToRegister(instr->TempAt(0));
-  DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
-  DoubleRegister double_scratch = double_scratch0();
-  FPURegister single_scratch = double_scratch0().low();
-
-  if (instr->truncating()) {
-    Register scratch3 = ToRegister(instr->TempAt(1));
-    __ EmitECMATruncate(result_reg,
-                        double_input,
-                        single_scratch,
-                        scratch1,
-                        scratch2,
-                        scratch3);
-  } else {
-    Register except_flag = scratch2;
-
-    __ EmitFPUTruncate(kRoundToMinusInf,
-                       single_scratch,
-                       double_input,
-                       scratch1,
-                       except_flag,
-                       kCheckForInexactConversion);
-
-    // Deopt if the operation did not succeed (except_flag != 0).
-    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
-    // Load the result.
-    __ mfc1(result_reg, single_scratch);
-  }
-}
-
-
-void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
-  LOperand* input = instr->InputAt(0);
-  __ And(at, ToRegister(input), Operand(kSmiTagMask));
-  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
-  LOperand* input = instr->InputAt(0);
-  __ And(at, ToRegister(input), Operand(kSmiTagMask));
-  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  Register scratch = scratch0();
-
-  __ GetObjectType(input, scratch, scratch);
-
-  if (instr->hydrogen()->is_interval_check()) {
-    InstanceType first;
-    InstanceType last;
-    instr->hydrogen()->GetCheckInterval(&first, &last);
-
-    // If there is only one type in the interval check for equality.
-    if (first == last) {
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
-    } else {
-      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
-      // Omit check for the last type.
-      if (last != LAST_TYPE) {
-        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
-      }
-    }
-  } else {
-    uint8_t mask;
-    uint8_t tag;
-    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
-
-    if (IsPowerOf2(mask)) {
-      ASSERT(tag == 0 || IsPowerOf2(tag));
-      __ And(at, scratch, mask);
-      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
-          at, Operand(zero_reg));
-    } else {
-      __ And(scratch, scratch, Operand(mask));
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
-    }
-  }
-}
-
-
-void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  Register reg = ToRegister(instr->value());
-  Handle<JSFunction> target = instr->hydrogen()->target();
-  if (isolate()->heap()->InNewSpace(*target)) {
-    Register reg = ToRegister(instr->value());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(target);
-    __ li(at, Operand(Handle<Object>(cell)));
-    __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
-    DeoptimizeIf(ne, instr->environment(), reg,
-                 Operand(at));
-  } else {
-    DeoptimizeIf(ne, instr->environment(), reg,
-                 Operand(target));
-  }
-}
-
-
-void LCodeGen::DoCheckMapCommon(Register reg,
-                                Register scratch,
-                                Handle<Map> map,
-                                CompareMapMode mode,
-                                LEnvironment* env) {
-  Label success;
-  __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
-  DeoptimizeIf(al, env);
-  __ bind(&success);
-}
-
-
-void LCodeGen::DoCheckMap(LCheckMap* instr) {
-  Register scratch = scratch0();
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister());
-  Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, scratch, map, instr->hydrogen()->mode(),
-                   instr->environment());
-}
-
-
-void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
-  Register result_reg = ToRegister(instr->result());
-  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
-  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
-}
-
-
-void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
-  Register unclamped_reg = ToRegister(instr->unclamped());
-  Register result_reg = ToRegister(instr->result());
-  __ ClampUint8(result_reg, unclamped_reg);
-}
-
-
-void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
-  Register scratch = scratch0();
-  Register input_reg = ToRegister(instr->unclamped());
-  Register result_reg = ToRegister(instr->result());
-  DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
-  Label is_smi, done, heap_number;
-
-  // Both smi and heap number cases are handled.
-  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
-
-  // Check for heap number
-  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
-  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
-
-  // Check for undefined. Undefined is converted to zero for clamping
-  // conversions.
-  DeoptimizeIf(ne, instr->environment(), input_reg,
-               Operand(factory()->undefined_value()));
-  __ mov(result_reg, zero_reg);
-  __ jmp(&done);
-
-  // Heap number
-  __ bind(&heap_number);
-  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
-                                             HeapNumber::kValueOffset));
-  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
-  __ jmp(&done);
-
-  __ bind(&is_smi);
-  __ ClampUint8(result_reg, scratch);
-
-  __ bind(&done);
-}
-
-
-void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
-  Register temp1 = ToRegister(instr->TempAt(0));
-  Register temp2 = ToRegister(instr->TempAt(1));
-
-  Handle<JSObject> holder = instr->holder();
-  Handle<JSObject> current_prototype = instr->prototype();
-
-  // Load prototype object.
-  __ LoadHeapObject(temp1, current_prototype);
-
-  // Check prototype maps up to the holder.
-  while (!current_prototype.is_identical_to(holder)) {
-    DoCheckMapCommon(temp1, temp2,
-                     Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-    current_prototype =
-        Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
-    // Load next prototype object.
-    __ LoadHeapObject(temp1, current_prototype);
-  }
-
-  // Check the holder map.
-  DoCheckMapCommon(temp1, temp2,
-                   Handle<Map>(current_prototype->map()),
-                   ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
-  class DeferredAllocateObject: public LDeferredCode {
-   public:
-    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LAllocateObject* instr_;
-  };
-
-  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
-
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->TempAt(0));
-  Register scratch2 = ToRegister(instr->TempAt(1));
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-  Handle<Map> initial_map(constructor->initial_map());
-  int instance_size = initial_map->instance_size();
-  ASSERT(initial_map->pre_allocated_property_fields() +
-         initial_map->unused_property_fields() -
-         initial_map->inobject_properties() == 0);
-
-  // Allocate memory for the object.  The initial map might change when
-  // the constructor's prototype changes, but instance size and property
-  // counts remain unchanged (if slack tracking finished).
-  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
-  __ AllocateInNewSpace(instance_size,
-                        result,
-                        scratch,
-                        scratch2,
-                        deferred->entry(),
-                        TAG_OBJECT);
-
-  // Load the initial map.
-  Register map = scratch;
-  __ LoadHeapObject(map, constructor);
-  __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // Initialize map and fields of the newly allocated object.
-  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
-  __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
-  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
-  __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
-  if (initial_map->inobject_properties() != 0) {
-    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
-    for (int i = 0; i < initial_map->inobject_properties(); i++) {
-      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
-      __ sw(scratch, FieldMemOperand(result, property_offset));
-    }
-  }
-
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
-  Register result = ToRegister(instr->result());
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ mov(result, zero_reg);
-
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-  __ LoadHeapObject(a0, constructor);
-  __ push(a0);
-  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
-  __ StoreToSafepointRegisterSlot(v0, result);
-}
-
-
-void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  Heap* heap = isolate()->heap();
-  ElementsKind boilerplate_elements_kind =
-      instr->hydrogen()->boilerplate_elements_kind();
-
-  // Deopt if the array literal boilerplate ElementsKind is of a type different
-  // than the expected one. The check isn't necessary if the boilerplate has
-  // already been converted to FAST_ELEMENTS.
-  if (boilerplate_elements_kind != FAST_ELEMENTS) {
-    __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
-    // Load map into a2.
-    __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
-    // Load the map's "bit field 2".
-    __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
-    DeoptimizeIf(ne,
-                 instr->environment(),
-                 a2,
-                 Operand(boilerplate_elements_kind));
-  }
-  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
-  __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  // Boilerplate already exists, constant elements are never accessed.
-  // Pass an empty fixed array.
-  __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
-  __ Push(a3, a2, a1);
-
-  // Pick the right runtime function or stub to call.
-  int length = instr->hydrogen()->length();
-  if (instr->hydrogen()->IsCopyOnWrite()) {
-    ASSERT(instr->hydrogen()->depth() == 1);
-    FastCloneShallowArrayStub::Mode mode =
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
-    FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  } else if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
-  } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
-    CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
-  } else {
-    FastCloneShallowArrayStub::Mode mode =
-        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
-            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
-    FastCloneShallowArrayStub stub(mode, length);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  }
-}
-
-
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
-                            Register result,
-                            Register source,
-                            int* offset) {
-  ASSERT(!source.is(a2));
-  ASSERT(!result.is(a2));
-
-  // Only elements backing stores for non-COW arrays need to be copied.
-  Handle<FixedArrayBase> elements(object->elements());
-  bool has_elements = elements->length() > 0 &&
-      elements->map() != isolate()->heap()->fixed_cow_array_map();
-
-  // Increase the offset so that subsequent objects end up right after
-  // this object and its backing store.
-  int object_offset = *offset;
-  int object_size = object->map()->instance_size();
-  int elements_offset = *offset + object_size;
-  int elements_size = has_elements ? elements->Size() : 0;
-  *offset += object_size + elements_size;
-
-  // Copy object header.
-  ASSERT(object->properties()->length() == 0);
-  int inobject_properties = object->map()->inobject_properties();
-  int header_size = object_size - inobject_properties * kPointerSize;
-  for (int i = 0; i < header_size; i += kPointerSize) {
-    if (has_elements && i == JSObject::kElementsOffset) {
-      __ Addu(a2, result, Operand(elements_offset));
-    } else {
-      __ lw(a2, FieldMemOperand(source, i));
-    }
-    __ sw(a2, FieldMemOperand(result, object_offset + i));
-  }
-
-  // Copy in-object properties.
-  for (int i = 0; i < inobject_properties; i++) {
-    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
-    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
-    if (value->IsJSObject()) {
-      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-      __ Addu(a2, result, Operand(*offset));
-      __ sw(a2, FieldMemOperand(result, total_offset));
-      __ LoadHeapObject(source, value_object);
-      EmitDeepCopy(value_object, result, source, offset);
-    } else if (value->IsHeapObject()) {
-      __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
-      __ sw(a2, FieldMemOperand(result, total_offset));
-    } else {
-      __ li(a2, Operand(value));
-      __ sw(a2, FieldMemOperand(result, total_offset));
-    }
-  }
-
-
-  if (has_elements) {
-    // Copy elements backing store header.
-    __ LoadHeapObject(source, elements);
-    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
-      __ lw(a2, FieldMemOperand(source, i));
-      __ sw(a2, FieldMemOperand(result, elements_offset + i));
-    }
-
-    // Copy elements backing store content.
-    int elements_length = has_elements ? elements->length() : 0;
-    if (elements->IsFixedDoubleArray()) {
-      Handle<FixedDoubleArray> double_array =
-          Handle<FixedDoubleArray>::cast(elements);
-      for (int i = 0; i < elements_length; i++) {
-        int64_t value = double_array->get_representation(i);
-        // We only support little endian mode...
-        int32_t value_low = value & 0xFFFFFFFF;
-        int32_t value_high = value >> 32;
-        int total_offset =
-            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
-        __ li(a2, Operand(value_low));
-        __ sw(a2, FieldMemOperand(result, total_offset));
-        __ li(a2, Operand(value_high));
-        __ sw(a2, FieldMemOperand(result, total_offset + 4));
-      }
-    } else if (elements->IsFixedArray()) {
-      for (int i = 0; i < elements_length; i++) {
-        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
-        if (value->IsJSObject()) {
-          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-          __ Addu(a2, result, Operand(*offset));
-          __ sw(a2, FieldMemOperand(result, total_offset));
-          __ LoadHeapObject(source, value_object);
-          EmitDeepCopy(value_object, result, source, offset);
-        } else if (value->IsHeapObject()) {
-          __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
-          __ sw(a2, FieldMemOperand(result, total_offset));
-        } else {
-          __ li(a2, Operand(value));
-          __ sw(a2, FieldMemOperand(result, total_offset));
-        }
-      }
-    } else {
-      UNREACHABLE();
-    }
-  }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
-  int size = instr->hydrogen()->total_size();
-
-  // Allocate all objects that are part of the literal in one big
-  // allocation. This avoids multiple limit checks.
-  Label allocated, runtime_allocate;
-  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
-  __ jmp(&allocated);
-
-  __ bind(&runtime_allocate);
-  __ li(a0, Operand(Smi::FromInt(size)));
-  __ push(a0);
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
-  __ bind(&allocated);
-  int offset = 0;
-  __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
-  EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
-  ASSERT_EQ(size, offset);
-}
-
-
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-  Handle<FixedArray> literals(instr->environment()->closure()->literals());
-  Handle<FixedArray> constant_properties =
-      instr->hydrogen()->constant_properties();
-
-  // Set up the parameters to the stub/runtime call.
-  __ LoadHeapObject(t0, literals);
-  __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ li(a2, Operand(constant_properties));
-  int flags = instr->hydrogen()->fast_elements()
-      ? ObjectLiteral::kFastElements
-      : ObjectLiteral::kNoFlags;
-  __ li(a1, Operand(Smi::FromInt(flags)));
-  __ Push(t0, a3, a2, a1);
-
-  // Pick the right runtime function or stub to call.
-  int properties_count = constant_properties->length() / 2;
-  if (instr->hydrogen()->depth() > 1) {
-    CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
-  } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  }
-}
-
-
-void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  ASSERT(ToRegister(instr->InputAt(0)).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
-  __ push(a0);
-  CallRuntime(Runtime::kToFastProperties, 1, instr);
-}
-
-
-void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  Label materialized;
-  // Registers will be used as follows:
-  // a3 = JS function.
-  // t3 = literals array.
-  // a1 = regexp literal.
-  // a0 = regexp literal clone.
-  // a2 and t0-t2 are used as temporaries.
-  __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
-  int literal_offset = FixedArray::kHeaderSize +
-      instr->hydrogen()->literal_index() * kPointerSize;
-  __ lw(a1, FieldMemOperand(t3, literal_offset));
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&materialized, ne, a1, Operand(at));
-
-  // Create regexp literal using runtime function
-  // Result will be in v0.
-  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
-  __ li(t1, Operand(instr->hydrogen()->pattern()));
-  __ li(t0, Operand(instr->hydrogen()->flags()));
-  __ Push(t3, t2, t1, t0);
-  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
-  __ mov(a1, v0);
-
-  __ bind(&materialized);
-  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-  Label allocated, runtime_allocate;
-
-  __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
-  __ jmp(&allocated);
-
-  __ bind(&runtime_allocate);
-  __ li(a0, Operand(Smi::FromInt(size)));
-  __ Push(a1, a0);
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-  __ pop(a1);
-
-  __ bind(&allocated);
-  // Copy the content into the newly allocated memory.
-  // (Unroll copy loop once for better throughput).
-  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
-    __ lw(a3, FieldMemOperand(a1, i));
-    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
-    __ sw(a3, FieldMemOperand(v0, i));
-    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
-  }
-  if ((size % (2 * kPointerSize)) != 0) {
-    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
-    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
-  }
-}
-
-
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  // Use the fast case closure allocation code that allocates in new
-  // space for nested functions that don't need literals cloning.
-  Handle<SharedFunctionInfo> shared_info = instr->shared_info();
-  bool pretenure = instr->hydrogen()->pretenure();
-  if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(shared_info->language_mode());
-    __ li(a1, Operand(shared_info));
-    __ push(a1);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-  } else {
-    __ li(a2, Operand(shared_info));
-    __ li(a1, Operand(pretenure
-                       ? factory()->true_value()
-                       : factory()->false_value()));
-    __ Push(cp, a2, a1);
-    CallRuntime(Runtime::kNewClosure, 3, instr);
-  }
-}
-
-
-void LCodeGen::DoTypeof(LTypeof* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
-  Register input = ToRegister(instr->InputAt(0));
-  __ push(input);
-  CallRuntime(Runtime::kTypeof, 1, instr);
-}
-
-
-void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
-  Register input = ToRegister(instr->InputAt(0));
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* true_label = chunk_->GetAssemblyLabel(true_block);
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  Register cmp1 = no_reg;
-  Operand cmp2 = Operand(no_reg);
-
-  Condition final_branch_condition = EmitTypeofIs(true_label,
-                                                  false_label,
-                                                  input,
-                                                  instr->type_literal(),
-                                                  cmp1,
-                                                  cmp2);
-
-  ASSERT(cmp1.is_valid());
-  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
-
-  if (final_branch_condition != kNoCondition) {
-    EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
-  }
-}
-
-
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
-                                 Label* false_label,
-                                 Register input,
-                                 Handle<String> type_name,
-                                 Register& cmp1,
-                                 Operand& cmp2) {
-  // This function utilizes the delay slot heavily. This is used to load
-  // values that are always usable without depending on the type of the input
-  // register.
-  Condition final_branch_condition = kNoCondition;
-  Register scratch = scratch0();
-  if (type_name->Equals(heap()->number_symbol())) {
-    __ JumpIfSmi(input, true_label);
-    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
-    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-    cmp1 = input;
-    cmp2 = Operand(at);
-    final_branch_condition = eq;
-
-  } else if (type_name->Equals(heap()->string_symbol())) {
-    __ JumpIfSmi(input, false_label);
-    __ GetObjectType(input, input, scratch);
-    __ Branch(USE_DELAY_SLOT, false_label,
-              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
-    // input is an object so we can load the BitFieldOffset even if we take the
-    // other branch.
-    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
-    __ And(at, at, 1 << Map::kIsUndetectable);
-    cmp1 = at;
-    cmp2 = Operand(zero_reg);
-    final_branch_condition = eq;
-
-  } else if (type_name->Equals(heap()->boolean_symbol())) {
-    __ LoadRoot(at, Heap::kTrueValueRootIndex);
-    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
-    __ LoadRoot(at, Heap::kFalseValueRootIndex);
-    cmp1 = at;
-    cmp2 = Operand(input);
-    final_branch_condition = eq;
-
-  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
-    __ LoadRoot(at, Heap::kNullValueRootIndex);
-    cmp1 = at;
-    cmp2 = Operand(input);
-    final_branch_condition = eq;
-
-  } else if (type_name->Equals(heap()->undefined_symbol())) {
-    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
-    // The first instruction of JumpIfSmi is an And - it is safe in the delay
-    // slot.
-    __ JumpIfSmi(input, false_label);
-    // Check for undetectable objects => true.
-    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
-    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
-    __ And(at, at, 1 << Map::kIsUndetectable);
-    cmp1 = at;
-    cmp2 = Operand(zero_reg);
-    final_branch_condition = ne;
-
-  } else if (type_name->Equals(heap()->function_symbol())) {
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ JumpIfSmi(input, false_label);
-    __ GetObjectType(input, scratch, input);
-    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
-    cmp1 = input;
-    cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
-    final_branch_condition = eq;
-
-  } else if (type_name->Equals(heap()->object_symbol())) {
-    __ JumpIfSmi(input, false_label);
-    if (!FLAG_harmony_typeof) {
-      __ LoadRoot(at, Heap::kNullValueRootIndex);
-      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
-    }
-    // input is an object, it is safe to use GetObjectType in the delay slot.
-    __ GetObjectType(input, input, scratch);
-    __ Branch(USE_DELAY_SLOT, false_label,
-              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    // Still an object, so the InstanceType can be loaded.
-    __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
-    __ Branch(USE_DELAY_SLOT, false_label,
-              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    // Still an object, so the BitField can be loaded.
-    // Check for undetectable objects => false.
-    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
-    __ And(at, at, 1 << Map::kIsUndetectable);
-    cmp1 = at;
-    cmp2 = Operand(zero_reg);
-    final_branch_condition = eq;
-
-  } else {
-    cmp1 = at;
-    cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
-    __ Branch(false_label);
-  }
-
-  return final_branch_condition;
-}
-
-
-void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
-  Register temp1 = ToRegister(instr->TempAt(0));
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  EmitIsConstructCall(temp1, scratch0());
-
-  EmitBranch(true_block, false_block, eq, temp1,
-             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
-}
-
-
-void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
-  ASSERT(!temp1.is(temp2));
-  // Get the frame pointer for the calling frame.
-  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-
-  // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
-  __ Branch(&check_frame_marker, ne, temp2,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
-
-  // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
-}
-
-
-void LCodeGen::EnsureSpaceForLazyDeopt() {
-  // Ensure that we have enough space after the previous lazy-bailout
-  // instruction for patching the code here.
-  int current_pc = masm()->pc_offset();
-  int patch_size = Deoptimizer::patch_size();
-  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
-    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
-    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
-    while (padding_size > 0) {
-      __ nop();
-      padding_size -= Assembler::kInstrSize;
-    }
-  }
-  last_lazy_deopt_pc_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
-  EnsureSpaceForLazyDeopt();
-  ASSERT(instr->HasEnvironment());
-  LEnvironment* env = instr->environment();
-  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
-  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
-  DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
-  Register object = ToRegister(instr->object());
-  Register key = ToRegister(instr->key());
-  Register strict = scratch0();
-  __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
-  __ Push(object, key, strict);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator safepoint_generator(
-      this, pointers, Safepoint::kLazyDeopt);
-  __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoIn(LIn* instr) {
-  Register obj = ToRegister(instr->object());
-  Register key = ToRegister(instr->key());
-  __ Push(key, obj);
-  ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
-  LPointerMap* pointers = instr->pointer_map();
-  RecordPosition(pointers->position());
-  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
-  __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
-}
-
-
-void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-  RecordSafepointWithLazyDeopt(
-      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasEnvironment());
-  LEnvironment* env = instr->environment();
-  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-}
-
-
-void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  class DeferredStackCheck: public LDeferredCode {
-   public:
-    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LStackCheck* instr_;
-  };
-
-  ASSERT(instr->HasEnvironment());
-  LEnvironment* env = instr->environment();
-  // There is no LLazyBailout instruction for stack-checks. We have to
-  // prepare for lazy deoptimization explicitly here.
-  if (instr->hydrogen()->is_function_entry()) {
-    // Perform stack overflow check.
-    Label done;
-    __ LoadRoot(at, Heap::kStackLimitRootIndex);
-    __ Branch(&done, hs, sp, Operand(at));
-    StackCheckStub stub;
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-    EnsureSpaceForLazyDeopt();
-    __ bind(&done);
-    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
-    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-  } else {
-    ASSERT(instr->hydrogen()->is_backwards_branch());
-    // Perform stack overflow check if this goto needs it before jumping.
-    DeferredStackCheck* deferred_stack_check =
-        new DeferredStackCheck(this, instr);
-    __ LoadRoot(at, Heap::kStackLimitRootIndex);
-    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
-    EnsureSpaceForLazyDeopt();
-    __ bind(instr->done_label());
-    deferred_stack_check->SetExit(instr->done_label());
-    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
-    // Don't record a deoptimization index for the safepoint here.
-    // This will be done explicitly when emitting call and the safepoint in
-    // the deferred code.
-  }
-}
-
-
-void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
-  // This is a pseudo-instruction that ensures that the environment here is
-  // properly registered for deoptimization and records the assembler's PC
-  // offset.
-  LEnvironment* environment = instr->environment();
-  environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
-                                   instr->SpilledDoubleRegisterArray());
-
-  // If the environment were already registered, we would have no way of
-  // backpatching it with the spill slot operands.
-  ASSERT(!environment->HasBeenRegistered());
-  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(osr_pc_offset_ == -1);
-  osr_pc_offset_ = masm()->pc_offset();
-}
-
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  Register result = ToRegister(instr->result());
-  Register object = ToRegister(instr->object());
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
-
-  Register null_value = t1;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
-
-  __ And(at, object, kSmiTagMask);
-  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
-
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ GetObjectType(object, a1, a1);
-  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
-
-  Label use_cache, call_runtime;
-  ASSERT(object.is(a0));
-  __ CheckEnumCache(null_value, &call_runtime);
-
-  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
-  __ Branch(&use_cache);
-
-  // Get the set of properties to enumerate.
-  __ bind(&call_runtime);
-  __ push(object);
-  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
-  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-  ASSERT(result.is(v0));
-  __ LoadRoot(at, Heap::kMetaMapRootIndex);
-  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
-  __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
-  Register map = ToRegister(instr->map());
-  Register result = ToRegister(instr->result());
-  __ LoadInstanceDescriptors(map, result);
-  __ lw(result,
-        FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
-  __ lw(result,
-        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
-  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
-  Register object = ToRegister(instr->value());
-  Register map = ToRegister(instr->map());
-  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  Register object = ToRegister(instr->object());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-  Register scratch = scratch0();
-
-  Label out_of_object, done;
-  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
-  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
-
-  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
-  __ Addu(scratch, object, scratch);
-  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
-
-  __ Branch(&done);
-
-  __ bind(&out_of_object);
-  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
-  // Index is equal to negated out of object property index plus 1.
-  __ Subu(scratch, result, scratch);
-  __ lw(result, FieldMemOperand(scratch,
-                                FixedArray::kHeaderSize - kPointerSize));
-  __ bind(&done);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index b508256..2aec684 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,421 +29,35 @@
 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
 
 #include "mips/lithium-mips.h"
-#include "mips/lithium-gap-resolver-mips.h"
+
 #include "deoptimizer.h"
 #include "safepoint-table.h"
 #include "scopes.h"
 
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LDeferredCode;
-class SafepointGenerator;
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
-      : chunk_(chunk),
-        masm_(assembler),
-        info_(info),
-        current_block_(-1),
-        current_instruction_(-1),
-        instructions_(chunk->instructions()),
-        deoptimizations_(4),
-        deopt_jump_table_(4),
-        deoptimization_literals_(8),
-        inlined_function_count_(0),
-        scope_(info->scope()),
-        status_(UNUSED),
-        deferred_(8),
-        osr_pc_offset_(-1),
-        last_lazy_deopt_pc_(0),
-        resolver_(this),
-        expected_safepoint_kind_(Safepoint::kSimple) {
-    PopulateDeoptimizationLiteralsWithInlinedFunctions();
-  }
-
-
-  // Simple accessors.
-  MacroAssembler* masm() const { return masm_; }
-  CompilationInfo* info() const { return info_; }
-  Isolate* isolate() const { return info_->isolate(); }
-  Factory* factory() const { return isolate()->factory(); }
-  Heap* heap() const { return isolate()->heap(); }
-
-  // Support for converting LOperands to assembler types.
-  // LOperand must be a register.
-  Register ToRegister(LOperand* op) const;
-
-  // LOperand is loaded into scratch, unless already a register.
-  Register EmitLoadRegister(LOperand* op, Register scratch);
-
-  // LOperand must be a double register.
-  DoubleRegister ToDoubleRegister(LOperand* op) const;
-
-  // LOperand is loaded into dbl_scratch, unless already a double register.
-  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
-                                        FloatRegister flt_scratch,
-                                        DoubleRegister dbl_scratch);
-  int ToInteger32(LConstantOperand* op) const;
-  double ToDouble(LConstantOperand* op) const;
-  Operand ToOperand(LOperand* op);
-  MemOperand ToMemOperand(LOperand* op) const;
-  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
-  MemOperand ToHighMemOperand(LOperand* op) const;
-
-  bool IsInteger32(LConstantOperand* op) const;
-  Handle<Object> ToHandle(LConstantOperand* op) const;
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
 
   // Try to generate code for the entire chunk, but it may fail if the
   // chunk contains constructs we cannot handle. Returns true if the
   // code generation attempt succeeded.
-  bool GenerateCode();
+  bool GenerateCode() {
+    UNIMPLEMENTED();
+    return false;
+  }
 
   // Finish the code by setting stack height, safepoint, and bailout
   // information on it.
-  void FinishCode(Handle<Code> code);
-
-  void DoDeferredNumberTagD(LNumberTagD* instr);
-  void DoDeferredNumberTagI(LNumberTagI* instr);
-  void DoDeferredTaggedToI(LTaggedToI* instr);
-  void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
-  void DoDeferredStackCheck(LStackCheck* instr);
-  void DoDeferredRandom(LRandom* instr);
-  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
-  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredAllocateObject(LAllocateObject* instr);
-  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                       Label* map_check);
-
-  void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
-
-  // Parallel move support.
-  void DoParallelMove(LParallelMove* move);
-  void DoGap(LGap* instr);
-
-  // Emit frame translation commands for an environment.
-  void WriteTranslation(LEnvironment* environment, Translation* translation);
-
-  // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) void Do##type(L##type* node);
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
-  enum Status {
-    UNUSED,
-    GENERATING,
-    DONE,
-    ABORTED
-  };
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_generating() const { return status_ == GENERATING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  StrictModeFlag strict_mode_flag() const {
-    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
-  }
-
-  LChunk* chunk() const { return chunk_; }
-  Scope* scope() const { return scope_; }
-  HGraph* graph() const { return chunk_->graph(); }
-
-  Register scratch0() { return kLithiumScratchReg; }
-  Register scratch1() { return kLithiumScratchReg2; }
-  DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
-
-  int GetNextEmittedBlock(int block);
-  LInstruction* GetNextInstruction();
-
-  void EmitClassOfTest(Label* if_true,
-                       Label* if_false,
-                       Handle<String> class_name,
-                       Register input,
-                       Register temporary,
-                       Register temporary2);
-
-  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
-  int GetParameterCount() const { return scope()->num_parameters(); }
-
-  void Abort(const char* format, ...);
-  void Comment(const char* format, ...);
-
-  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code); }
-
-  // Code generation passes.  Returns true if code generation should
-  // continue.
-  bool GeneratePrologue();
-  bool GenerateBody();
-  bool GenerateDeferredCode();
-  bool GenerateDeoptJumpTable();
-  bool GenerateSafepointTable();
-
-  enum SafepointMode {
-    RECORD_SIMPLE_SAFEPOINT,
-    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
-  };
-
-  void CallCode(Handle<Code> code,
-                RelocInfo::Mode mode,
-                LInstruction* instr);
-
-  void CallCodeGeneric(Handle<Code> code,
-                       RelocInfo::Mode mode,
-                       LInstruction* instr,
-                       SafepointMode safepoint_mode);
-
-  void CallRuntime(const Runtime::Function* function,
-                   int num_arguments,
-                   LInstruction* instr);
-
-  void CallRuntime(Runtime::FunctionId id,
-                   int num_arguments,
-                   LInstruction* instr) {
-    const Runtime::Function* function = Runtime::FunctionForId(id);
-    CallRuntime(function, num_arguments, instr);
-  }
-
-  void CallRuntimeFromDeferred(Runtime::FunctionId id,
-                               int argc,
-                               LInstruction* instr);
-
-  // Generate a direct call to a known function.  Expects the function
-  // to be in a1.
-  void CallKnownFunction(Handle<JSFunction> function,
-                         int arity,
-                         LInstruction* instr,
-                         CallKind call_kind);
-
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
-
-  void RecordSafepointWithLazyDeopt(LInstruction* instr,
-                                    SafepointMode safepoint_mode);
-
-  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
-                                            Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc,
-                    LEnvironment* environment,
-                    Register src1 = zero_reg,
-                    const Operand& src2 = Operand(zero_reg));
-
-  void AddToTranslation(Translation* translation,
-                        LOperand* op,
-                        bool is_tagged);
-  void PopulateDeoptimizationData(Handle<Code> code);
-  int DefineDeoptimizationLiteral(Handle<Object> literal);
-
-  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
-
-  Register ToRegister(int index) const;
-  DoubleRegister ToDoubleRegister(int index) const;
-
-  // Specific math operations - used from DoUnaryMathOperation.
-  void EmitIntegerMathAbs(LUnaryMathOperation* instr);
-  void DoMathAbs(LUnaryMathOperation* instr);
-  void DoMathFloor(LUnaryMathOperation* instr);
-  void DoMathRound(LUnaryMathOperation* instr);
-  void DoMathSqrt(LUnaryMathOperation* instr);
-  void DoMathPowHalf(LUnaryMathOperation* instr);
-  void DoMathLog(LUnaryMathOperation* instr);
-  void DoMathTan(LUnaryMathOperation* instr);
-  void DoMathCos(LUnaryMathOperation* instr);
-  void DoMathSin(LUnaryMathOperation* instr);
-
-  // Support for recording safepoint and position information.
-  void RecordSafepoint(LPointerMap* pointers,
-                       Safepoint::Kind kind,
-                       int arguments,
-                       Safepoint::DeoptMode mode);
-  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
-  void RecordSafepoint(Safepoint::DeoptMode mode);
-  void RecordSafepointWithRegisters(LPointerMap* pointers,
-                                    int arguments,
-                                    Safepoint::DeoptMode mode);
-  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
-                                              int arguments,
-                                              Safepoint::DeoptMode mode);
-  void RecordPosition(int position);
-
-  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
-  void EmitGoto(int block);
-  void EmitBranch(int left_block,
-                  int right_block,
-                  Condition cc,
-                  Register src1,
-                  const Operand& src2);
-  void EmitBranchF(int left_block,
-                   int right_block,
-                   Condition cc,
-                   FPURegister src1,
-                   FPURegister src2);
-  void EmitCmpI(LOperand* left, LOperand* right);
-  void EmitNumberUntagD(Register input,
-                        DoubleRegister result,
-                        bool deoptimize_on_undefined,
-                        bool deoptimize_on_minus_zero,
-                        LEnvironment* env);
-
-  // Emits optimized code for typeof x == "y".  Modifies input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  // Returns two registers in cmp1 and cmp2 that can be used in the
-  // Branch instruction after EmitTypeofIs.
-  Condition EmitTypeofIs(Label* true_label,
-                         Label* false_label,
-                         Register input,
-                         Handle<String> type_name,
-                         Register& cmp1,
-                         Operand& cmp2);
-
-  // Emits optimized code for %_IsObject(x).  Preserves input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitIsObject(Register input,
-                         Register temp1,
-                         Register temp2,
-                         Label* is_not_object,
-                         Label* is_object);
-
-  // Emits optimized code for %_IsString(x).  Preserves input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitIsString(Register input,
-                         Register temp1,
-                         Label* is_not_string);
-
-  // Emits optimized code for %_IsConstructCall().
-  // Caller should branch on equal condition.
-  void EmitIsConstructCall(Register temp1, Register temp2);
-
-  void EmitLoadFieldOrConstantFunction(Register result,
-                                       Register object,
-                                       Handle<Map> type,
-                                       Handle<String> name);
-
-  // Emits optimized code to deep-copy the contents of statically known
-  // object graphs (e.g. object literal boilerplate).
-  void EmitDeepCopy(Handle<JSObject> object,
-                    Register result,
-                    Register source,
-                    int* offset);
-
-  struct JumpTableEntry {
-    explicit inline JumpTableEntry(Address entry)
-        : label(),
-          address(entry) { }
-    Label label;
-    Address address;
-  };
-
-  void EnsureSpaceForLazyDeopt();
-
-  LChunk* const chunk_;
-  MacroAssembler* const masm_;
-  CompilationInfo* const info_;
-
-  int current_block_;
-  int current_instruction_;
-  const ZoneList<LInstruction*>* instructions_;
-  ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<JumpTableEntry> deopt_jump_table_;
-  ZoneList<Handle<Object> > deoptimization_literals_;
-  int inlined_function_count_;
-  Scope* const scope_;
-  Status status_;
-  TranslationBuffer translations_;
-  ZoneList<LDeferredCode*> deferred_;
-  int osr_pc_offset_;
-  int last_lazy_deopt_pc_;
-
-  // Builder that keeps track of safepoints in the code. The table
-  // itself is emitted at the end of the generated code.
-  SafepointTableBuilder safepoints_;
-
-  // Compiler from a set of parallel moves to a sequential list of moves.
-  LGapResolver resolver_;
-
-  Safepoint::Kind expected_safepoint_kind_;
-
-  class PushSafepointRegistersScope BASE_EMBEDDED {
-   public:
-    PushSafepointRegistersScope(LCodeGen* codegen,
-                                Safepoint::Kind kind)
-        : codegen_(codegen) {
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = kind;
-
-      switch (codegen_->expected_safepoint_kind_) {
-        case Safepoint::kWithRegisters:
-          codegen_->masm_->PushSafepointRegisters();
-          break;
-        case Safepoint::kWithRegistersAndDoubles:
-          codegen_->masm_->PushSafepointRegistersAndDoubles();
-          break;
-        default:
-          UNREACHABLE();
-      }
-    }
-
-    ~PushSafepointRegistersScope() {
-      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
-      ASSERT((kind & Safepoint::kWithRegisters) != 0);
-      switch (kind) {
-        case Safepoint::kWithRegisters:
-          codegen_->masm_->PopSafepointRegisters();
-          break;
-        case Safepoint::kWithRegistersAndDoubles:
-          codegen_->masm_->PopSafepointRegistersAndDoubles();
-          break;
-        default:
-          UNREACHABLE();
-      }
-      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
-    }
-
-   private:
-    LCodeGen* codegen_;
-  };
-
-  friend class LDeferredCode;
-  friend class LEnvironment;
-  friend class SafepointGenerator;
-  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
-};
-
-
-class LDeferredCode: public ZoneObject {
- public:
-  explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen),
-        external_exit_(NULL),
-        instruction_index_(codegen->current_instruction_) {
-    codegen->AddDeferredCode(this);
-  }
-
-  virtual ~LDeferredCode() { }
-  virtual void Generate() = 0;
-  virtual LInstruction* instr() = 0;
-
-  void SetExit(Label* exit) { external_exit_ = exit; }
-  Label* entry() { return &entry_; }
-  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-  int instruction_index() const { return instruction_index_; }
-
- protected:
-  LCodeGen* codegen() const { return codegen_; }
-  MacroAssembler* masm() const { return codegen_->masm(); }
-
- private:
-  LCodeGen* codegen_;
-  Label entry_;
-  Label exit_;
-  Label* external_exit_;
-  int instruction_index_;
+  void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
 };
 
 } }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
deleted file mode 100644
index 41b060d..0000000
--- a/src/mips/lithium-gap-resolver-mips.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "mips/lithium-gap-resolver-mips.h"
-#include "mips/lithium-codegen-mips.h"
-
-namespace v8 {
-namespace internal {
-
-static const Register kSavedValueRegister = kLithiumScratchReg;
-
-LGapResolver::LGapResolver(LCodeGen* owner)
-    : cgen_(owner),
-      moves_(32),
-      root_index_(0),
-      in_cycle_(false),
-      saved_destination_(NULL) {}
-
-
-void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  ASSERT(moves_.is_empty());
-  // Build up a worklist of moves.
-  BuildInitialMoveList(parallel_move);
-
-  for (int i = 0; i < moves_.length(); ++i) {
-    LMoveOperands move = moves_[i];
-    // Skip constants to perform them last.  They don't block other moves
-    // and skipping such moves with register destinations keeps those
-    // registers free for the whole algorithm.
-    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
-      root_index_ = i;  // Any cycle is found when by reaching this move again.
-      PerformMove(i);
-      if (in_cycle_) {
-        RestoreValue();
-      }
-    }
-  }
-
-  // Perform the moves with constant sources.
-  for (int i = 0; i < moves_.length(); ++i) {
-    if (!moves_[i].IsEliminated()) {
-      ASSERT(moves_[i].source()->IsConstantOperand());
-      EmitMove(i);
-    }
-  }
-
-  moves_.Rewind(0);
-}
-
-
-void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
-  // Perform a linear sweep of the moves to add them to the initial list of
-  // moves to perform, ignoring any move that is redundant (the source is
-  // the same as the destination, the destination is ignored and
-  // unallocated, or the move was already eliminated).
-  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
-  for (int i = 0; i < moves->length(); ++i) {
-    LMoveOperands move = moves->at(i);
-    if (!move.IsRedundant()) moves_.Add(move);
-  }
-  Verify();
-}
-
-
-void LGapResolver::PerformMove(int index) {
-  // Each call to this function performs a move and deletes it from the move
-  // graph.  We first recursively perform any move blocking this one.  We
-  // mark a move as "pending" on entry to PerformMove in order to detect
-  // cycles in the move graph.
-
-  // We can only find a cycle, when doing a depth-first traversal of moves,
-  // be encountering the starting move again. So by spilling the source of
-  // the starting move, we break the cycle.  All moves are then unblocked,
-  // and the starting move is completed by writing the spilled value to
-  // its destination.  All other moves from the spilled source have been
-  // completed prior to breaking the cycle.
-  // An additional complication is that moves to MemOperands with large
-  // offsets (more than 1K or 4K) require us to spill this spilled value to
-  // the stack, to free up the register.
-  ASSERT(!moves_[index].IsPending());
-  ASSERT(!moves_[index].IsRedundant());
-
-  // Clear this move's destination to indicate a pending move.  The actual
-  // destination is saved in a stack allocated local.  Multiple moves can
-  // be pending because this function is recursive.
-  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
-  LOperand* destination = moves_[index].destination();
-  moves_[index].set_destination(NULL);
-
-  // Perform a depth-first traversal of the move graph to resolve
-  // dependencies.  Any unperformed, unpending move with a source the same
-  // as this one's destination blocks this one so recursively perform all
-  // such moves.
-  for (int i = 0; i < moves_.length(); ++i) {
-    LMoveOperands other_move = moves_[i];
-    if (other_move.Blocks(destination) && !other_move.IsPending()) {
-      PerformMove(i);
-      // If there is a blocking, pending move it must be moves_[root_index_]
-      // and all other moves with the same source as moves_[root_index_] are
-      // sucessfully executed (because they are cycle-free) by this loop.
-    }
-  }
-
-  // We are about to resolve this move and don't need it marked as
-  // pending, so restore its destination.
-  moves_[index].set_destination(destination);
-
-  // The move may be blocked on a pending move, which must be the starting move.
-  // In this case, we have a cycle, and we save the source of this move to
-  // a scratch register to break it.
-  LMoveOperands other_move = moves_[root_index_];
-  if (other_move.Blocks(destination)) {
-    ASSERT(other_move.IsPending());
-    BreakCycle(index);
-    return;
-  }
-
-  // This move is no longer blocked.
-  EmitMove(index);
-}
-
-
-void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
-  // No operand should be the destination for more than one move.
-  for (int i = 0; i < moves_.length(); ++i) {
-    LOperand* destination = moves_[i].destination();
-    for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
-    }
-  }
-#endif
-}
-
-#define __ ACCESS_MASM(cgen_->masm())
-
-void LGapResolver::BreakCycle(int index) {
-  // We save in a register the value that should end up in the source of
-  // moves_[root_index].  After performing all moves in the tree rooted
-  // in that move, we save the value to that source.
-  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
-  ASSERT(!in_cycle_);
-  in_cycle_ = true;
-  LOperand* source = moves_[index].source();
-  saved_destination_ = moves_[index].destination();
-  if (source->IsRegister()) {
-    __ mov(kSavedValueRegister, cgen_->ToRegister(source));
-  } else if (source->IsStackSlot()) {
-    __ lw(kSavedValueRegister, cgen_->ToMemOperand(source));
-  } else if (source->IsDoubleRegister()) {
-    __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
-  } else if (source->IsDoubleStackSlot()) {
-    __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
-  } else {
-    UNREACHABLE();
-  }
-  // This move will be done by restoring the saved value to the destination.
-  moves_[index].Eliminate();
-}
-
-
-void LGapResolver::RestoreValue() {
-  ASSERT(in_cycle_);
-  ASSERT(saved_destination_ != NULL);
-
-  // Spilled value is in kSavedValueRegister or kLithiumScratchDouble.
-  if (saved_destination_->IsRegister()) {
-    __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
-  } else if (saved_destination_->IsStackSlot()) {
-    __ sw(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
-  } else if (saved_destination_->IsDoubleRegister()) {
-    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
-            kLithiumScratchDouble);
-  } else if (saved_destination_->IsDoubleStackSlot()) {
-    __ sdc1(kLithiumScratchDouble,
-            cgen_->ToMemOperand(saved_destination_));
-  } else {
-    UNREACHABLE();
-  }
-
-  in_cycle_ = false;
-  saved_destination_ = NULL;
-}
-
-
-void LGapResolver::EmitMove(int index) {
-  LOperand* source = moves_[index].source();
-  LOperand* destination = moves_[index].destination();
-
-  // Dispatch on the source and destination operand kinds.  Not all
-  // combinations are possible.
-
-  if (source->IsRegister()) {
-    Register source_register = cgen_->ToRegister(source);
-    if (destination->IsRegister()) {
-      __ mov(cgen_->ToRegister(destination), source_register);
-    } else {
-      ASSERT(destination->IsStackSlot());
-      __ sw(source_register, cgen_->ToMemOperand(destination));
-    }
-
-  } else if (source->IsStackSlot()) {
-    MemOperand source_operand = cgen_->ToMemOperand(source);
-    if (destination->IsRegister()) {
-      __ lw(cgen_->ToRegister(destination), source_operand);
-    } else {
-      ASSERT(destination->IsStackSlot());
-      MemOperand destination_operand = cgen_->ToMemOperand(destination);
-      if (in_cycle_) {
-        if (!destination_operand.OffsetIsInt16Encodable()) {
-          // 'at' is overwritten while saving the value to the destination.
-          // Therefore we can't use 'at'.  It is OK if the read from the source
-          // destroys 'at', since that happens before the value is read.
-          // This uses only a single reg of the double reg-pair.
-          __ lwc1(kLithiumScratchDouble, source_operand);
-          __ swc1(kLithiumScratchDouble, destination_operand);
-        } else {
-          __ lw(at, source_operand);
-          __ sw(at, destination_operand);
-        }
-      } else {
-        __ lw(kSavedValueRegister, source_operand);
-        __ sw(kSavedValueRegister, destination_operand);
-      }
-    }
-
-  } else if (source->IsConstantOperand()) {
-    LConstantOperand* constant_source = LConstantOperand::cast(source);
-    if (destination->IsRegister()) {
-      Register dst = cgen_->ToRegister(destination);
-      if (cgen_->IsInteger32(constant_source)) {
-        __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
-      } else {
-        __ LoadObject(dst, cgen_->ToHandle(constant_source));
-      }
-    } else {
-      ASSERT(destination->IsStackSlot());
-      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
-      if (cgen_->IsInteger32(constant_source)) {
-        __ li(kSavedValueRegister,
-              Operand(cgen_->ToInteger32(constant_source)));
-      } else {
-        __ LoadObject(kSavedValueRegister,
-                      cgen_->ToHandle(constant_source));
-      }
-      __ sw(kSavedValueRegister, cgen_->ToMemOperand(destination));
-    }
-
-  } else if (source->IsDoubleRegister()) {
-    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
-    if (destination->IsDoubleRegister()) {
-      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
-    } else {
-      ASSERT(destination->IsDoubleStackSlot());
-      MemOperand destination_operand = cgen_->ToMemOperand(destination);
-      __ sdc1(source_register, destination_operand);
-    }
-
-  } else if (source->IsDoubleStackSlot()) {
-    MemOperand source_operand = cgen_->ToMemOperand(source);
-    if (destination->IsDoubleRegister()) {
-      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
-    } else {
-      ASSERT(destination->IsDoubleStackSlot());
-      MemOperand destination_operand = cgen_->ToMemOperand(destination);
-      if (in_cycle_) {
-        // kLithiumScratchDouble was used to break the cycle,
-        // but kSavedValueRegister is free.
-        MemOperand source_high_operand =
-            cgen_->ToHighMemOperand(source);
-        MemOperand destination_high_operand =
-            cgen_->ToHighMemOperand(destination);
-        __ lw(kSavedValueRegister, source_operand);
-        __ sw(kSavedValueRegister, destination_operand);
-        __ lw(kSavedValueRegister, source_high_operand);
-        __ sw(kSavedValueRegister, destination_high_operand);
-      } else {
-        __ ldc1(kLithiumScratchDouble, source_operand);
-        __ sdc1(kLithiumScratchDouble, destination_operand);
-      }
-    }
-  } else {
-    UNREACHABLE();
-  }
-
-  moves_[index].Eliminate();
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
diff --git a/src/mips/lithium-gap-resolver-mips.h b/src/mips/lithium-gap-resolver-mips.h
deleted file mode 100644
index 2506e38..0000000
--- a/src/mips/lithium-gap-resolver-mips.h
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
-
-#include "v8.h"
-
-#include "lithium.h"
-
-namespace v8 {
-namespace internal {
-
-class LCodeGen;
-class LGapResolver;
-
-class LGapResolver BASE_EMBEDDED {
- public:
-  explicit LGapResolver(LCodeGen* owner);
-
-  // Resolve a set of parallel moves, emitting assembler instructions.
-  void Resolve(LParallelMove* parallel_move);
-
- private:
-  // Build the initial list of moves.
-  void BuildInitialMoveList(LParallelMove* parallel_move);
-
-  // Perform the move at the moves_ index in question (possibly requiring
-  // other moves to satisfy dependencies).
-  void PerformMove(int index);
-
-  // If a cycle is found in the series of moves, save the blocking value to
-  // a scratch register.  The cycle must be found by hitting the root of the
-  // depth-first search.
-  void BreakCycle(int index);
-
-  // After a cycle has been resolved, restore the value from the scratch
-  // register to its proper destination.
-  void RestoreValue();
-
-  // Emit a move and remove it from the move graph.
-  void EmitMove(int index);
-
-  // Verify the move list before performing moves.
-  void Verify();
-
-  LCodeGen* cgen_;
-
-  // List of moves not yet resolved.
-  ZoneList<LMoveOperands> moves_;
-
-  int root_index_;
-  bool in_cycle_;
-  LOperand* saved_destination_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
deleted file mode 100644
index 1e0c216..0000000
--- a/src/mips/lithium-mips.cc
+++ /dev/null
@@ -1,2330 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "lithium-allocator-inl.h"
-#include "mips/lithium-mips.h"
-#include "mips/lithium-codegen-mips.h"
-
-namespace v8 {
-namespace internal {
-
-#define DEFINE_COMPILE(type)                            \
-  void L##type::CompileToNative(LCodeGen* generator) {  \
-    generator->Do##type(this);                          \
-  }
-LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
-#undef DEFINE_COMPILE
-
-LOsrEntry::LOsrEntry() {
-  for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
-    register_spills_[i] = NULL;
-  }
-  for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
-    double_register_spills_[i] = NULL;
-  }
-}
-
-
-void LOsrEntry::MarkSpilledRegister(int allocation_index,
-                                    LOperand* spill_operand) {
-  ASSERT(spill_operand->IsStackSlot());
-  ASSERT(register_spills_[allocation_index] == NULL);
-  register_spills_[allocation_index] = spill_operand;
-}
-
-
-#ifdef DEBUG
-void LInstruction::VerifyCall() {
-  // Call instructions can use only fixed registers as temporaries and
-  // outputs because all registers are blocked by the calling convention.
-  // Inputs operands must use a fixed register or use-at-start policy or
-  // a non-register policy.
-  ASSERT(Output() == NULL ||
-         LUnallocated::cast(Output())->HasFixedPolicy() ||
-         !LUnallocated::cast(Output())->HasRegisterPolicy());
-  for (UseIterator it(this); !it.Done(); it.Advance()) {
-    LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||
-           operand->IsUsedAtStart());
-  }
-  for (TempIterator it(this); !it.Done(); it.Advance()) {
-    LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
-  }
-}
-#endif
-
-
-void LOsrEntry::MarkSpilledDoubleRegister(int allocation_index,
-                                          LOperand* spill_operand) {
-  ASSERT(spill_operand->IsDoubleStackSlot());
-  ASSERT(double_register_spills_[allocation_index] == NULL);
-  double_register_spills_[allocation_index] = spill_operand;
-}
-
-
-void LInstruction::PrintTo(StringStream* stream) {
-  stream->Add("%s ", this->Mnemonic());
-
-  PrintOutputOperandTo(stream);
-
-  PrintDataTo(stream);
-
-  if (HasEnvironment()) {
-    stream->Add(" ");
-    environment()->PrintTo(stream);
-  }
-
-  if (HasPointerMap()) {
-    stream->Add(" ");
-    pointer_map()->PrintTo(stream);
-  }
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  for (int i = 0; i < inputs_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    inputs_[i]->PrintTo(stream);
-  }
-}
-
-
-template<int R, int I, int T>
-void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
-  for (int i = 0; i < results_.length(); i++) {
-    if (i > 0) stream->Add(" ");
-    results_[i]->PrintTo(stream);
-  }
-}
-
-
-void LLabel::PrintDataTo(StringStream* stream) {
-  LGap::PrintDataTo(stream);
-  LLabel* rep = replacement();
-  if (rep != NULL) {
-    stream->Add(" Dead block replaced with B%d", rep->block_id());
-  }
-}
-
-
-bool LGap::IsRedundant() const {
-  for (int i = 0; i < 4; i++) {
-    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
-      return false;
-    }
-  }
-
-  return true;
-}
-
-
-void LGap::PrintDataTo(StringStream* stream) {
-  for (int i = 0; i < 4; i++) {
-    stream->Add("(");
-    if (parallel_moves_[i] != NULL) {
-      parallel_moves_[i]->PrintDataTo(stream);
-    }
-    stream->Add(") ");
-  }
-}
-
-
-const char* LArithmeticD::Mnemonic() const {
-  switch (op()) {
-    case Token::ADD: return "add-d";
-    case Token::SUB: return "sub-d";
-    case Token::MUL: return "mul-d";
-    case Token::DIV: return "div-d";
-    case Token::MOD: return "mod-d";
-    default:
-      UNREACHABLE();
-      return NULL;
-  }
-}
-
-
-const char* LArithmeticT::Mnemonic() const {
-  switch (op()) {
-    case Token::ADD: return "add-t";
-    case Token::SUB: return "sub-t";
-    case Token::MUL: return "mul-t";
-    case Token::MOD: return "mod-t";
-    case Token::DIV: return "div-t";
-    case Token::BIT_AND: return "bit-and-t";
-    case Token::BIT_OR: return "bit-or-t";
-    case Token::BIT_XOR: return "bit-xor-t";
-    case Token::SHL: return "sll-t";
-    case Token::SAR: return "sra-t";
-    case Token::SHR: return "srl-t";
-    default:
-      UNREACHABLE();
-      return NULL;
-  }
-}
-
-
-void LGoto::PrintDataTo(StringStream* stream) {
-  stream->Add("B%d", block_id());
-}
-
-
-void LBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
-  InputAt(0)->PrintTo(stream);
-}
-
-
-void LCmpIDAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if ");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(" %s ", Token::String(op()));
-  InputAt(1)->PrintTo(stream);
-  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if ");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(kind() == kStrictEquality ? " === " : " == ");
-  stream->Add(nil() == kNullValue ? "null" : "undefined");
-  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_object(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_string(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_smi(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_undetectable(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if string_compare(");
-  InputAt(0)->PrintTo(stream);
-  InputAt(1)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_instance_type(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if has_cached_array_index(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
-void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if class_of_test(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(", \"%o\") then B%d else B%d",
-              *hydrogen()->class_name(),
-              true_block_id(),
-              false_block_id());
-}
-
-
-void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if typeof ");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(" == \"%s\" then B%d else B%d",
-              *hydrogen()->type_literal()->ToCString(),
-              true_block_id(), false_block_id());
-}
-
-
-void LCallConstantFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("#%d / ", arity());
-}
-
-
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
-  stream->Add("/%s ", hydrogen()->OpName());
-  InputAt(0)->PrintTo(stream);
-}
-
-
-void LLoadContextSlot::PrintDataTo(StringStream* stream) {
-  InputAt(0)->PrintTo(stream);
-  stream->Add("[%d]", slot_index());
-}
-
-
-void LStoreContextSlot::PrintDataTo(StringStream* stream) {
-  InputAt(0)->PrintTo(stream);
-  stream->Add("[%d] <- ", slot_index());
-  InputAt(1)->PrintTo(stream);
-}
-
-
-void LInvokeFunction::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(" #%d / ", arity());
-}
-
-
-void LCallKeyed::PrintDataTo(StringStream* stream) {
-  stream->Add("[a2] #%d / ", arity());
-}
-
-
-void LCallNamed::PrintDataTo(StringStream* stream) {
-  SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallGlobal::PrintDataTo(StringStream* stream) {
-  SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
-}
-
-
-void LCallKnownGlobal::PrintDataTo(StringStream* stream) {
-  stream->Add("#%d / ", arity());
-}
-
-
-void LCallNew::PrintDataTo(StringStream* stream) {
-  stream->Add("= ");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(" #%d / ", arity());
-}
-
-
-void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
-  arguments()->PrintTo(stream);
-
-  stream->Add(" length ");
-  length()->PrintTo(stream);
-
-  stream->Add(" index ");
-  index()->PrintTo(stream);
-}
-
-
-void LStoreNamedField::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(*String::cast(*name())->ToCString());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
-void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(".");
-  stream->Add(*String::cast(*name())->ToCString());
-  stream->Add(" <- ");
-  value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
-  elements()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
-void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add("[");
-  key()->PrintTo(stream);
-  stream->Add("] <- ");
-  value()->PrintTo(stream);
-}
-
-
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
-LChunk::LChunk(CompilationInfo* info, HGraph* graph)
-    : spill_slot_count_(0),
-      info_(info),
-      graph_(graph),
-      instructions_(32),
-      pointer_maps_(8),
-      inlined_closures_(1) {
-}
-
-
-int LChunk::GetNextSpillIndex(bool is_double) {
-  // Skip a slot if for a double-width slot.
-  if (is_double) spill_slot_count_++;
-  return spill_slot_count_++;
-}
-
-
-LOperand* LChunk::GetNextSpillSlot(bool is_double)  {
-  int index = GetNextSpillIndex(is_double);
-  if (is_double) {
-    return LDoubleStackSlot::Create(index);
-  } else {
-    return LStackSlot::Create(index);
-  }
-}
-
-
-void LChunk::MarkEmptyBlocks() {
-  HPhase phase("L_Mark empty blocks", this);
-  for (int i = 0; i < graph()->blocks()->length(); ++i) {
-    HBasicBlock* block = graph()->blocks()->at(i);
-    int first = block->first_instruction_index();
-    int last = block->last_instruction_index();
-    LInstruction* first_instr = instructions()->at(first);
-    LInstruction* last_instr = instructions()->at(last);
-
-    LLabel* label = LLabel::cast(first_instr);
-    if (last_instr->IsGoto()) {
-      LGoto* goto_instr = LGoto::cast(last_instr);
-      if (label->IsRedundant() &&
-          !label->is_loop_header()) {
-        bool can_eliminate = true;
-        for (int i = first + 1; i < last && can_eliminate; ++i) {
-          LInstruction* cur = instructions()->at(i);
-          if (cur->IsGap()) {
-            LGap* gap = LGap::cast(cur);
-            if (!gap->IsRedundant()) {
-              can_eliminate = false;
-            }
-          } else {
-            can_eliminate = false;
-          }
-        }
-
-        if (can_eliminate) {
-          label->set_replacement(GetLabel(goto_instr->block_id()));
-        }
-      }
-    }
-  }
-}
-
-
-void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
-  int index = -1;
-  if (instr->IsControl()) {
-    instructions_.Add(gap);
-    index = instructions_.length();
-    instructions_.Add(instr);
-  } else {
-    index = instructions_.length();
-    instructions_.Add(instr);
-    instructions_.Add(gap);
-  }
-  if (instr->HasPointerMap()) {
-    pointer_maps_.Add(instr->pointer_map());
-    instr->pointer_map()->set_lithium_position(index);
-  }
-}
-
-
-LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
-  return LConstantOperand::Create(constant->id());
-}
-
-
-int LChunk::GetParameterStackSlot(int index) const {
-  // The receiver is at index 0, the first parameter at index 1, so we
-  // shift all parameter indexes down by the number of parameters, and
-  // make sure they end up negative so they are distinguishable from
-  // spill slots.
-  int result = index - info()->scope()->num_parameters() - 1;
-  ASSERT(result < 0);
-  return result;
-}
-
-// A parameter relative to ebp in the arguments stub.
-int LChunk::ParameterAt(int index) {
-  ASSERT(-1 <= index);  // -1 is the receiver.
-  return (1 + info()->scope()->num_parameters() - index) *
-      kPointerSize;
-}
-
-
-LGap* LChunk::GetGapAt(int index) const {
-  return LGap::cast(instructions_[index]);
-}
-
-
-bool LChunk::IsGapAt(int index) const {
-  return instructions_[index]->IsGap();
-}
-
-
-int LChunk::NearestGapPos(int index) const {
-  while (!IsGapAt(index)) index--;
-  return index;
-}
-
-
-void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
-  GetGapAt(index)->GetOrCreateParallelMove(LGap::START)->AddMove(from, to);
-}
-
-
-Handle<Object> LChunk::LookupLiteral(LConstantOperand* operand) const {
-  return HConstant::cast(graph_->LookupValue(operand->index()))->handle();
-}
-
-
-Representation LChunk::LookupLiteralRepresentation(
-    LConstantOperand* operand) const {
-  return graph_->LookupValue(operand->index())->representation();
-}
-
-
-LChunk* LChunkBuilder::Build() {
-  ASSERT(is_unused());
-  chunk_ = new(zone()) LChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
-  status_ = BUILDING;
-  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
-  for (int i = 0; i < blocks->length(); i++) {
-    HBasicBlock* next = NULL;
-    if (i < blocks->length() - 1) next = blocks->at(i + 1);
-    DoBasicBlock(blocks->at(i), next);
-    if (is_aborted()) return NULL;
-  }
-  status_ = DONE;
-  return chunk_;
-}
-
-
-void LChunkBuilder::Abort(const char* format, ...) {
-  if (FLAG_trace_bailout) {
-    SmartArrayPointer<char> name(
-        info()->shared_info()->DebugName()->ToCString());
-    PrintF("Aborting LChunk building in @\"%s\": ", *name);
-    va_list arguments;
-    va_start(arguments, format);
-    OS::VPrint(format, arguments);
-    va_end(arguments);
-    PrintF("\n");
-  }
-  status_ = ABORTED;
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
-                                  Register::ToAllocationIndex(reg));
-}
-
-
-LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                                  DoubleRegister::ToAllocationIndex(reg));
-}
-
-
-LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
-  return Use(value, ToUnallocated(fixed_register));
-}
-
-
-LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
-  return Use(value, ToUnallocated(reg));
-}
-
-
-LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
-  return Use(value,
-             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
-                                      LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
-}
-
-
-LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
-                                     LUnallocated::USED_AT_START));
-}
-
-
-LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
-  return value->IsConstant()
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : Use(value);
-}
-
-
-LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
-  return value->IsConstant()
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : UseAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
-  return value->IsConstant()
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : UseRegister(value);
-}
-
-
-LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
-  return value->IsConstant()
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      : UseRegisterAtStart(value);
-}
-
-
-LOperand* LChunkBuilder::UseAny(HValue* value) {
-  return value->IsConstant()
-      ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
-}
-
-
-LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
-  if (value->EmitAtUses()) {
-    HInstruction* instr = HInstruction::cast(value);
-    VisitInstruction(instr);
-  }
-  operand->set_virtual_register(value->id());
-  return operand;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
-                                    LUnallocated* result) {
-  result->set_virtual_register(current_instruction_->id());
-  instr->set_result(result);
-  return instr;
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsRegister(
-    LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineAsSpilled(
-    LTemplateInstruction<1, I, T>* instr, int index) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineSameAsFirst(
-    LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixed(
-    LTemplateInstruction<1, I, T>* instr, Register reg) {
-  return Define(instr, ToUnallocated(reg));
-}
-
-
-template<int I, int T>
-LInstruction* LChunkBuilder::DefineFixedDouble(
-    LTemplateInstruction<1, I, T>* instr, DoubleRegister reg) {
-  return Define(instr, ToUnallocated(reg));
-}
-
-
-LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
-  HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator));
-  return instr;
-}
-
-
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
-    LInstruction* instr, int ast_id) {
-  ASSERT(instruction_pending_deoptimization_environment_ == NULL);
-  ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
-  instruction_pending_deoptimization_environment_ = instr;
-  pending_deoptimization_ast_id_ = ast_id;
-  return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
-  instruction_pending_deoptimization_environment_ = NULL;
-  pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
-                                        HInstruction* hinstr,
-                                        CanDeoptimize can_deoptimize) {
-#ifdef DEBUG
-  instr->VerifyCall();
-#endif
-  instr->MarkAsCall();
-  instr = AssignPointerMap(instr);
-
-  if (hinstr->HasObservableSideEffects()) {
-    ASSERT(hinstr->next()->IsSimulate());
-    HSimulate* sim = HSimulate::cast(hinstr->next());
-    instr = SetInstructionPendingDeoptimizationEnvironment(
-        instr, sim->ast_id());
-  }
-
-  // If instruction does not have side-effects lazy deoptimization
-  // after the call will try to deoptimize to the point before the call.
-  // Thus we still need to attach environment to this call even if
-  // call sequence can not deoptimize eagerly.
-  bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
-      !hinstr->HasObservableSideEffects();
-  if (needs_environment && !instr->HasEnvironment()) {
-    instr = AssignEnvironment(instr);
-  }
-
-  return instr;
-}
-
-
-LInstruction* LChunkBuilder::MarkAsSaveDoubles(LInstruction* instr) {
-  instr->MarkAsSaveDoubles();
-  return instr;
-}
-
-
-LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new(zone()) LPointerMap(position_));
-  return instr;
-}
-
-
-LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand =
-      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  operand->set_virtual_register(allocator_->GetVirtualRegister());
-  if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
-  return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(Register reg) {
-  LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
-  return operand;
-}
-
-
-LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
-  LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
-  return operand;
-}
-
-
-LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new(zone()) LLabel(instr->block());
-}
-
-
-LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
-}
-
-
-LInstruction* LChunkBuilder::DoShift(Token::Value op,
-                                     HBitwiseBinaryOperation* instr) {
-  if (instr->representation().IsTagged()) {
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), a1);
-    LOperand* right = UseFixed(instr->right(), a0);
-    LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
-    return MarkAsCall(DefineFixed(result, v0), instr);
-  }
-
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().IsInteger32());
-  ASSERT(instr->right()->representation().IsInteger32());
-  LOperand* left = UseRegisterAtStart(instr->left());
-
-  HValue* right_value = instr->right();
-  LOperand* right = NULL;
-  int constant_value = 0;
-  if (right_value->IsConstant()) {
-    HConstant* constant = HConstant::cast(right_value);
-    right = chunk_->DefineConstantOperand(constant);
-    constant_value = constant->Integer32Value() & 0x1f;
-  } else {
-    right = UseRegisterAtStart(right_value);
-  }
-
-  // Shift operations can only deoptimize if we do a logical shift
-  // by 0 and the result cannot be truncated to int32.
-  bool may_deopt = (op == Token::SHR && constant_value == 0);
-  bool does_deopt = false;
-  if (may_deopt) {
-    for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
-      if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
-        does_deopt = true;
-        break;
-      }
-    }
-  }
-
-  LInstruction* result =
-      DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
-  return does_deopt ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
-                                           HArithmeticBinaryOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->left()->representation().IsDouble());
-  ASSERT(instr->right()->representation().IsDouble());
-  ASSERT(op != Token::MOD);
-  LOperand* left = UseRegisterAtStart(instr->left());
-  LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
-  return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
-                                           HArithmeticBinaryOperation* instr) {
-  ASSERT(op == Token::ADD ||
-         op == Token::DIV ||
-         op == Token::MOD ||
-         op == Token::MUL ||
-         op == Token::SUB);
-  HValue* left = instr->left();
-  HValue* right = instr->right();
-  ASSERT(left->representation().IsTagged());
-  ASSERT(right->representation().IsTagged());
-  LOperand* left_operand = UseFixed(left, a1);
-  LOperand* right_operand = UseFixed(right, a0);
-  LArithmeticT* result =
-      new(zone()) LArithmeticT(op, left_operand, right_operand);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  ASSERT(is_building());
-  current_block_ = block;
-  next_block_ = next_block;
-  if (block->IsStartBlock()) {
-    block->UpdateEnvironment(graph_->start_environment());
-    argument_count_ = 0;
-  } else if (block->predecessors()->length() == 1) {
-    // We have a single predecessor => copy environment and outgoing
-    // argument count from the predecessor.
-    ASSERT(block->phis()->length() == 0);
-    HBasicBlock* pred = block->predecessors()->at(0);
-    HEnvironment* last_environment = pred->last_environment();
-    ASSERT(last_environment != NULL);
-    // Only copy the environment, if it is later used again.
-    if (pred->end()->SecondSuccessor() == NULL) {
-      ASSERT(pred->end()->FirstSuccessor() == block);
-    } else {
-      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
-          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
-        last_environment = last_environment->Copy();
-      }
-    }
-    block->UpdateEnvironment(last_environment);
-    ASSERT(pred->argument_count() >= 0);
-    argument_count_ = pred->argument_count();
-  } else {
-    // We are at a state join => process phis.
-    HBasicBlock* pred = block->predecessors()->at(0);
-    // No need to copy the environment, it cannot be used later.
-    HEnvironment* last_environment = pred->last_environment();
-    for (int i = 0; i < block->phis()->length(); ++i) {
-      HPhi* phi = block->phis()->at(i);
-      last_environment->SetValueAt(phi->merged_index(), phi);
-    }
-    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
-      last_environment->SetValueAt(block->deleted_phis()->at(i),
-                                   graph_->GetConstantUndefined());
-    }
-    block->UpdateEnvironment(last_environment);
-    // Pick up the outgoing argument count of one of the predecessors.
-    argument_count_ = pred->argument_count();
-  }
-  HInstruction* current = block->first();
-  int start = chunk_->instructions()->length();
-  while (current != NULL && !is_aborted()) {
-    // Code for constants in registers is generated lazily.
-    if (!current->EmitAtUses()) {
-      VisitInstruction(current);
-    }
-    current = current->next();
-  }
-  int end = chunk_->instructions()->length() - 1;
-  if (end >= start) {
-    block->set_first_instruction_index(start);
-    block->set_last_instruction_index(end);
-  }
-  block->set_argument_count(argument_count_);
-  next_block_ = NULL;
-  current_block_ = NULL;
-}
-
-
-void LChunkBuilder::VisitInstruction(HInstruction* current) {
-  HInstruction* old_current = current_instruction_;
-  current_instruction_ = current;
-  if (current->has_position()) position_ = current->position();
-  LInstruction* instr = current->CompileToLithium(this);
-
-  if (instr != NULL) {
-    if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
-      instr = AssignPointerMap(instr);
-    }
-    if (FLAG_stress_environments && !instr->HasEnvironment()) {
-      instr = AssignEnvironment(instr);
-    }
-    instr->set_hydrogen_value(current);
-    chunk_->AddInstruction(instr, current_block_);
-  }
-  current_instruction_ = old_current;
-}
-
-
-LEnvironment* LChunkBuilder::CreateEnvironment(
-    HEnvironment* hydrogen_env,
-    int* argument_index_accumulator) {
-  if (hydrogen_env == NULL) return NULL;
-
-  LEnvironment* outer =
-      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
-  int ast_id = hydrogen_env->ast_id();
-  ASSERT(ast_id != AstNode::kNoNumber ||
-         hydrogen_env->frame_type() != JS_FUNCTION);
-  int value_count = hydrogen_env->length();
-  LEnvironment* result = new(zone()) LEnvironment(
-      hydrogen_env->closure(),
-      hydrogen_env->frame_type(),
-      ast_id,
-      hydrogen_env->parameter_count(),
-      argument_count_,
-      value_count,
-      outer);
-  int argument_index = *argument_index_accumulator;
-  for (int i = 0; i < value_count; ++i) {
-    if (hydrogen_env->is_special_index(i)) continue;
-
-    HValue* value = hydrogen_env->values()->at(i);
-    LOperand* op = NULL;
-    if (value->IsArgumentsObject()) {
-      op = NULL;
-    } else if (value->IsPushArgument()) {
-      op = new(zone()) LArgument(argument_index++);
-    } else {
-      op = UseAny(value);
-    }
-    result->AddValue(op, value->representation());
-  }
-
-  if (hydrogen_env->frame_type() == JS_FUNCTION) {
-    *argument_index_accumulator = argument_index;
-  }
-
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
-}
-
-
-LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* value = instr->value();
-  if (value->EmitAtUses()) {
-    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
-        ? instr->FirstSuccessor()
-        : instr->SecondSuccessor();
-    return new(zone()) LGoto(successor->block_id());
-  }
-
-  LBranch* result = new(zone()) LBranch(UseRegister(value));
-  // Tagged values that are not known smis or booleans require a
-  // deoptimization environment.
-  Representation rep = value->representation();
-  HType type = value->type();
-  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
-    return AssignEnvironment(result);
-  }
-  return result;
-}
-
-
-LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  return new(zone()) LCmpMapAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(
-      new(zone()) LArgumentsLength(UseRegister(length->value())));
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new(zone()) LArgumentsElements);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
-  LInstanceOf* result =
-      new(zone()) LInstanceOf(UseFixed(instr->left(), a0),
-                              UseFixed(instr->right(), a1));
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
-    HInstanceOfKnownGlobal* instr) {
-  LInstanceOfKnownGlobal* result =
-      new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), a0),
-                                         FixedTemp(t0));
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
-  LOperand* receiver = UseRegisterAtStart(instr->receiver());
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
-  return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
-LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
-  LOperand* function = UseFixed(instr->function(), a1);
-  LOperand* receiver = UseFixed(instr->receiver(), a0);
-  LOperand* length = UseFixed(instr->length(), a2);
-  LOperand* elements = UseFixed(instr->elements(), a3);
-  LApplyArguments* result = new(zone()) LApplyArguments(function,
-                                                        receiver,
-                                                        length,
-                                                        elements);
-  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
-  ++argument_count_;
-  LOperand* argument = Use(instr->argument());
-  return new(zone()) LPushArgument(argument);
-}
-
-
-LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses()
-      ? NULL
-      : DefineAsRegister(new(zone()) LThisFunction);
-}
-
-
-LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
-}
-
-
-LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
-  LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
-  return MarkAsCall(new(zone()) LDeclareGlobals, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
-  LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalObject(context));
-}
-
-
-LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
-  LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
-}
-
-
-LInstruction* LChunkBuilder::DoCallConstantFunction(
-    HCallConstantFunction* instr) {
-  argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), a1);
-  argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new(zone()) LInvokeFunction(function);
-  return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
-  BuiltinFunctionId op = instr->op();
-  if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
-    LOperand* input = UseFixedDouble(instr->value(), f4);
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
-    return MarkAsCall(DefineFixedDouble(result, f4), instr);
-  } else if (op == kMathPowHalf) {
-    // Input cannot be the same as the result.
-    // See lithium-codegen-mips.cc::DoMathPowHalf.
-    LOperand* input = UseFixedDouble(instr->value(), f8);
-    LOperand* temp = FixedTemp(f6);
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
-    return DefineFixedDouble(result, f4);
-  } else {
-    LOperand* input = UseRegisterAtStart(instr->value());
-    LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
-    switch (op) {
-      case kMathAbs:
-        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-      case kMathFloor:
-        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-      case kMathSqrt:
-        return DefineAsRegister(result);
-      case kMathRound:
-        return AssignEnvironment(DefineAsRegister(result));
-      default:
-        UNREACHABLE();
-        return NULL;
-    }
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
-  ASSERT(instr->key()->representation().IsTagged());
-  argument_count_ -= instr->argument_count();
-  LOperand* key = UseFixed(instr->key(), a2);
-  return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
-  argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallNamed, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
-  argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
-  argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
-  LOperand* constructor = UseFixed(instr->constructor(), a1);
-  argument_count_ -= instr->argument_count();
-  LCallNew* result = new(zone()) LCallNew(constructor);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), a1);
-  argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), v0),
-                    instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
-  argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShr(HShr* instr) {
-  return DoShift(Token::SHR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoSar(HSar* instr) {
-  return DoShift(Token::SAR, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoShl(HShl* instr) {
-  return DoShift(Token::SHL, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineAsRegister(new(zone()) LBitI(left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), a1);
-    LOperand* right = UseFixed(instr->right(), a0);
-    LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
-    return MarkAsCall(DefineFixed(result, v0), instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
-  ASSERT(instr->value()->representation().IsInteger32());
-  ASSERT(instr->representation().IsInteger32());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LBitNotI(value));
-}
-
-
-LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
-  if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::DIV, instr);
-  } else if (instr->representation().IsInteger32()) {
-    // TODO(1042) The fixed register allocation
-    // is needed because we call TypeRecordingBinaryOpStub from
-    // the generated code, which requires registers a0
-    // and a1 to be used. We should remove that
-    // when we provide a native implementation.
-    LOperand* dividend = UseFixed(instr->left(), a0);
-    LOperand* divisor = UseFixed(instr->right(), a1);
-    return AssignEnvironment(AssignPointerMap(
-             DefineFixed(new(zone()) LDivI(dividend, divisor), v0)));
-  } else {
-    return DoArithmeticT(Token::DIV, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoMod(HMod* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LModI* mod;
-    if (instr->HasPowerOf2Divisor()) {
-      ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
-      LOperand* value = UseRegisterAtStart(instr->left());
-      mod = new(zone()) LModI(value, UseOrConstant(instr->right()));
-    } else {
-      LOperand* dividend = UseRegister(instr->left());
-      LOperand* divisor = UseRegister(instr->right());
-      mod = new(zone()) LModI(dividend,
-                              divisor,
-                              TempRegister(),
-                              FixedTemp(f20),
-                              FixedTemp(f22));
-    }
-
-    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
-        instr->CheckFlag(HValue::kCanBeDivByZero)) {
-      return AssignEnvironment(DefineAsRegister(mod));
-    } else {
-      return DefineAsRegister(mod);
-    }
-  } else if (instr->representation().IsTagged()) {
-    return DoArithmeticT(Token::MOD, instr);
-  } else {
-    ASSERT(instr->representation().IsDouble());
-    // We call a C function for double modulo. It can't trigger a GC.
-    // We need to use fixed result register for the call.
-    // TODO(fschneider): Allow any register as input registers.
-    LOperand* left = UseFixedDouble(instr->left(), f2);
-    LOperand* right = UseFixedDouble(instr->right(), f4);
-    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
-    return MarkAsCall(DefineFixedDouble(result, f2), instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoMul(HMul* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left;
-    LOperand* right = UseOrConstant(instr->MostConstantOperand());
-    LOperand* temp = NULL;
-    if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
-        (instr->CheckFlag(HValue::kCanOverflow) ||
-        !right->IsConstantOperand())) {
-      left = UseRegister(instr->LeastConstantOperand());
-      temp = TempRegister();
-    } else {
-      left = UseRegisterAtStart(instr->LeastConstantOperand());
-    }
-    LMulI* mul = new(zone()) LMulI(left, right, temp);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      AssignEnvironment(mul);
-    }
-    return DefineAsRegister(mul);
-
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::MUL, instr);
-
-  } else {
-    return DoArithmeticT(Token::MUL, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoSub(HSub* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new(zone()) LSubI(left, right);
-    LInstruction* result = DefineAsRegister(sub);
-    if (instr->CheckFlag(HValue::kCanOverflow)) {
-      result = AssignEnvironment(result);
-    }
-    return result;
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::SUB, instr);
-  } else {
-    return DoArithmeticT(Token::SUB, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new(zone()) LAddI(left, right);
-    LInstruction* result = DefineAsRegister(add);
-    if (instr->CheckFlag(HValue::kCanOverflow)) {
-      result = AssignEnvironment(result);
-    }
-    return result;
-  } else if (instr->representation().IsDouble()) {
-    return DoArithmeticD(Token::ADD, instr);
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    return DoArithmeticT(Token::ADD, instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  ASSERT(instr->representation().IsDouble());
-  // We call a C function for double power. It can't trigger a GC.
-  // We need to use fixed result register for the call.
-  Representation exponent_type = instr->right()->representation();
-  ASSERT(instr->left()->representation().IsDouble());
-  LOperand* left = UseFixedDouble(instr->left(), f2);
-  LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), f4) :
-      UseFixed(instr->right(), a2);
-  LPower* result = new(zone()) LPower(left, right);
-  return MarkAsCall(DefineFixedDouble(result, f0),
-                    instr,
-                    CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->global_object()->representation().IsTagged());
-  LOperand* global_object = UseFixed(instr->global_object(), a0);
-  LRandom* result = new(zone()) LRandom(global_object);
-  return MarkAsCall(DefineFixedDouble(result, f0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  Representation r = instr->GetInputRepresentation();
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
-  LOperand* left = UseFixed(instr->left(), a1);
-  LOperand* right = UseFixed(instr->right(), a0);
-  LCmpT* result = new(zone()) LCmpT(left, right);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareIDAndBranch(
-    HCompareIDAndBranch* instr) {
-  Representation r = instr->GetInputRepresentation();
-  if (r.IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
-    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
-    return new(zone()) LCmpIDAndBranch(left, right);
-  } else {
-    ASSERT(r.IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
-    LOperand* left = UseRegisterAtStart(instr->left());
-    LOperand* right = UseRegisterAtStart(instr->right());
-    return new(zone()) LCmpIDAndBranch(left, right);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
-    HCompareObjectEqAndBranch* instr) {
-  LOperand* left = UseRegisterAtStart(instr->left());
-  LOperand* right = UseRegisterAtStart(instr->right());
-  return new(zone()) LCmpObjectEqAndBranch(left, right);
-}
-
-
-LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
-    HCompareConstantEqAndBranch* instr) {
-  return new(zone()) LCmpConstantEqAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* temp = TempRegister();
-  return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
-                                        temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* temp = TempRegister();
-  return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
-                                        temp);
-}
-
-
-LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
-    HIsUndetectableAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsUndetectableAndBranch(
-      UseRegisterAtStart(instr->value()), TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
-    HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
-  LOperand* left = UseFixed(instr->left(), a1);
-  LOperand* right = UseFixed(instr->right(), a0);
-  LStringCompareAndBranch* result =
-      new(zone()) LStringCompareAndBranch(left, right);
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
-    HHasInstanceTypeAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LHasInstanceTypeAndBranch(value);
-}
-
-
-LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
-    HGetCachedArrayIndex* instr)  {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
-}
-
-
-LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
-    HHasCachedArrayIndexAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LHasCachedArrayIndexAndBranch(
-      UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
-    HClassOfTestAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
-                                           TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
-  LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LJSArrayLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
-    HFixedArrayBaseLength* instr) {
-  LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
-}
-
-
-LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
-  LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LElementsKind(object));
-}
-
-
-LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
-  LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new(zone()) LValueOf(object, TempRegister());
-  return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
-  LOperand* object = UseFixed(instr->value(), a0);
-  LDateField* result = new LDateField(object, FixedTemp(a1), instr->index());
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  LOperand* value = UseRegisterAtStart(instr->index());
-  LOperand* length = UseRegister(instr->length());
-  return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
-}
-
-
-LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
-  // The control instruction marking the end of a block that completed
-  // abruptly (e.g., threw an exception).  There is nothing specific to do.
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
-  LOperand* value = UseFixed(instr->value(), a0);
-  return MarkAsCall(new(zone()) LThrow(value), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
-  // All HForceRepresentation instructions should be eliminated in the
-  // representation change phase of Hydrogen.
-  UNREACHABLE();
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoChange(HChange* instr) {
-  Representation from = instr->from();
-  Representation to = instr->to();
-  if (from.IsTagged()) {
-    if (to.IsDouble()) {
-      LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
-      return AssignEnvironment(DefineAsRegister(res));
-    } else {
-      ASSERT(to.IsInteger32());
-      LOperand* value = UseRegisterAtStart(instr->value());
-      bool needs_check = !instr->value()->type().IsSmi();
-      LInstruction* res = NULL;
-      if (!needs_check) {
-        res = DefineAsRegister(new(zone()) LSmiUntag(value, needs_check));
-      } else {
-        LOperand* temp1 = TempRegister();
-        LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
-                                                      : NULL;
-        LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
-                                                      : NULL;
-        res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
-                                                       temp1,
-                                                       temp2,
-                                                       temp3));
-        res = AssignEnvironment(res);
-      }
-      return res;
-    }
-  } else if (from.IsDouble()) {
-    if (to.IsTagged()) {
-      LOperand* value = UseRegister(instr->value());
-      LOperand* temp1 = TempRegister();
-      LOperand* temp2 = TempRegister();
-
-      // Make sure that the temp and result_temp registers are
-      // different.
-      LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
-      Define(result, result_temp);
-      return AssignPointerMap(result);
-    } else {
-      ASSERT(to.IsInteger32());
-      LOperand* value = UseRegister(instr->value());
-      LOperand* temp1 = TempRegister();
-      LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL;
-      LDoubleToI* res = new(zone()) LDoubleToI(value, temp1, temp2);
-      return AssignEnvironment(DefineAsRegister(res));
-    }
-  } else if (from.IsInteger32()) {
-    if (to.IsTagged()) {
-      HValue* val = instr->value();
-      LOperand* value = UseRegisterAtStart(val);
-      if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineAsRegister(new(zone()) LSmiTag(value));
-      } else {
-        LNumberTagI* result = new(zone()) LNumberTagI(value);
-        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-      }
-    } else {
-      ASSERT(to.IsDouble());
-      LOperand* value = Use(instr->value());
-      return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
-    }
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new(zone()) LCheckInstanceType(value);
-  return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
-  LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
-  return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckSmi(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckFunction(value));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LInstruction* result = new(zone()) LCheckMap(value);
-  return AssignEnvironment(result);
-}
-
-
-LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
-  HValue* value = instr->value();
-  Representation input_rep = value->representation();
-  LOperand* reg = UseRegister(value);
-  if (input_rep.IsDouble()) {
-    // Revisit this decision, here and 8 lines below.
-    return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(f22)));
-  } else if (input_rep.IsInteger32()) {
-    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
-  } else {
-    ASSERT(input_rep.IsTagged());
-    // Register allocator doesn't (yet) support allocation of double
-    // temps. Reserve f22 explicitly.
-    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, FixedTemp(f22));
-    return AssignEnvironment(DefineAsRegister(result));
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new(zone()) LReturn(UseFixed(instr->value(), v0));
-}
-
-
-LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
-  Representation r = instr->representation();
-  if (r.IsInteger32()) {
-    return DefineAsRegister(new(zone()) LConstantI);
-  } else if (r.IsDouble()) {
-    return DefineAsRegister(new(zone()) LConstantD);
-  } else if (r.IsTagged()) {
-    return DefineAsRegister(new(zone()) LConstantT);
-  } else {
-    UNREACHABLE();
-    return NULL;
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
-  return instr->RequiresHoleCheck()
-      ? AssignEnvironment(DefineAsRegister(result))
-      : DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
-  LOperand* global_object = UseFixed(instr->global_object(), a0);
-  LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  LOperand* value = UseRegister(instr->value());
-  // Use a temp to check the value in the cell in the case where we perform
-  // a hole check.
-  return instr->RequiresHoleCheck()
-      ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
-      : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
-  LOperand* global_object = UseFixed(instr->global_object(), a1);
-  LOperand* value = UseFixed(instr->value(), a0);
-  LStoreGlobalGeneric* result =
-      new(zone()) LStoreGlobalGeneric(global_object, value);
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
-  LOperand* context = UseRegisterAtStart(instr->value());
-  LInstruction* result =
-      DefineAsRegister(new(zone()) LLoadContextSlot(context));
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
-  LOperand* context;
-  LOperand* value;
-  if (instr->NeedsWriteBarrier()) {
-    context = UseTempRegister(instr->context());
-    value = UseTempRegister(instr->value());
-  } else {
-    context = UseRegister(instr->context());
-    value = UseRegister(instr->value());
-  }
-  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
-  return DefineAsRegister(
-      new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
-    HLoadNamedFieldPolymorphic* instr) {
-  ASSERT(instr->representation().IsTagged());
-  if (instr->need_generic()) {
-    LOperand* obj = UseFixed(instr->object(), a0);
-    LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(obj);
-    return MarkAsCall(DefineFixed(result, v0), instr);
-  } else {
-    LOperand* obj = UseRegisterAtStart(instr->object());
-    LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(obj);
-    return AssignEnvironment(DefineAsRegister(result));
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
-  LOperand* object = UseFixed(instr->object(), a0);
-  LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0);
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
-    HLoadFunctionPrototype* instr) {
-  return AssignEnvironment(DefineAsRegister(
-      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
-  LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadElements(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
-    HLoadExternalArrayPointer* instr) {
-  LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
-    HLoadKeyedFastElement* instr) {
-  ASSERT(instr->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsInteger32());
-  LOperand* obj = UseRegisterAtStart(instr->object());
-  LOperand* key = UseRegisterAtStart(instr->key());
-  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
-  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
-  return DefineAsRegister(result);
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
-    HLoadKeyedFastDoubleElement* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->key()->representation().IsInteger32());
-  LOperand* elements = UseTempRegister(instr->elements());
-  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-  LLoadKeyedFastDoubleElement* result =
-      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
-  return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
-    HLoadKeyedSpecializedArrayElement* instr) {
-  ElementsKind elements_kind = instr->elements_kind();
-  Representation representation(instr->representation());
-  ASSERT(
-      (representation.IsInteger32() &&
-       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
-       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
-       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
-       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
-  ASSERT(instr->key()->representation().IsInteger32());
-  LOperand* external_pointer = UseRegister(instr->external_pointer());
-  LOperand* key = UseRegisterOrConstant(instr->key());
-  LLoadKeyedSpecializedArrayElement* result =
-      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
-  LInstruction* load_instr = DefineAsRegister(result);
-  // An unsigned int array load might overflow and cause a deopt, make sure it
-  // has an environment.
-  return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
-      AssignEnvironment(load_instr) : load_instr;
-}
-
-
-LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
-  LOperand* object = UseFixed(instr->object(), a1);
-  LOperand* key = UseFixed(instr->key(), a0);
-
-  LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), v0);
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
-    HStoreKeyedFastElement* instr) {
-  bool needs_write_barrier = instr->NeedsWriteBarrier();
-  ASSERT(instr->value()->representation().IsTagged());
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsInteger32());
-
-  LOperand* obj = UseTempRegister(instr->object());
-  LOperand* val = needs_write_barrier
-      ? UseTempRegister(instr->value())
-      : UseRegisterAtStart(instr->value());
-  LOperand* key = needs_write_barrier
-      ? UseTempRegister(instr->key())
-      : UseRegisterOrConstantAtStart(instr->key());
-  return new(zone()) LStoreKeyedFastElement(obj, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
-    HStoreKeyedFastDoubleElement* instr) {
-  ASSERT(instr->value()->representation().IsDouble());
-  ASSERT(instr->elements()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsInteger32());
-
-  LOperand* elements = UseRegisterAtStart(instr->elements());
-  LOperand* val = UseTempRegister(instr->value());
-  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-
-  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
-    HStoreKeyedSpecializedArrayElement* instr) {
-  Representation representation(instr->value()->representation());
-  ElementsKind elements_kind = instr->elements_kind();
-  ASSERT(
-      (representation.IsInteger32() &&
-       (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
-       (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (representation.IsDouble() &&
-       ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
-       (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
-  ASSERT(instr->external_pointer()->representation().IsExternal());
-  ASSERT(instr->key()->representation().IsInteger32());
-
-  LOperand* external_pointer = UseRegister(instr->external_pointer());
-  bool val_is_temp_register =
-      elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
-      elements_kind == EXTERNAL_FLOAT_ELEMENTS;
-  LOperand* val = val_is_temp_register
-      ? UseTempRegister(instr->value())
-      : UseRegister(instr->value());
-  LOperand* key = UseRegisterOrConstant(instr->key());
-
-  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                        key,
-                                                        val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
-  LOperand* obj = UseFixed(instr->object(), a2);
-  LOperand* key = UseFixed(instr->key(), a1);
-  LOperand* val = UseFixed(instr->value(), a0);
-
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsTagged());
-  ASSERT(instr->value()->representation().IsTagged());
-
-  return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
-    HTransitionElementsKind* instr) {
-  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
-      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
-    LOperand* object = UseRegister(instr->object());
-    LOperand* new_map_reg = TempRegister();
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
-    return DefineSameAsFirst(result);
-  } else {
-    LOperand* object = UseFixed(instr->object(), a0);
-    LOperand* fixed_object_reg = FixedTemp(a2);
-    LOperand* new_map_reg = FixedTemp(a3);
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object,
-                                            new_map_reg,
-                                            fixed_object_reg);
-    return MarkAsCall(DefineFixed(result, v0), instr);
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
-  bool needs_write_barrier = instr->NeedsWriteBarrier();
-
-  LOperand* obj = needs_write_barrier
-      ? UseTempRegister(instr->object())
-      : UseRegisterAtStart(instr->object());
-
-  LOperand* val = needs_write_barrier
-      ? UseTempRegister(instr->value())
-      : UseRegister(instr->value());
-
-  return new(zone()) LStoreNamedField(obj, val);
-}
-
-
-LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
-  LOperand* obj = UseFixed(instr->object(), a1);
-  LOperand* val = UseFixed(instr->value(), a0);
-
-  LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val);
-  return MarkAsCall(result, instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
-  LOperand* left = UseRegisterAtStart(instr->left());
-  LOperand* right = UseRegisterAtStart(instr->right());
-  return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), v0),
-                    instr);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
-  LOperand* string = UseTempRegister(instr->string());
-  LOperand* index = UseTempRegister(instr->index());
-  LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
-  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
-}
-
-
-LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
-  LOperand* char_code = UseRegister(instr->value());
-  LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
-  LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
-  LAllocateObject* result = new(zone()) LAllocateObject(
-      TempRegister(), TempRegister());
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
-  LOperand* object = UseFixed(instr->object(), a0);
-  LOperand* key = UseFixed(instr->key(), a1);
-  LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  allocator_->MarkAsOsrEntry();
-  current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new(zone()) LOsrEntry);
-}
-
-
-LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
-  int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
-  int spill_index = chunk()->GetNextSpillIndex(false);  // Not double-width.
-  if (spill_index > LUnallocated::kMaxFixedIndex) {
-    Abort("Too many spill slots needed for OSR");
-    spill_index = 0;
-  }
-  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
-}
-
-
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
-  argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallStub, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
-  // There are no real uses of the arguments object.
-  // arguments.length and element access are supported directly on
-  // stack arguments, and any real arguments object use causes a bailout.
-  // So this value is never used.
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
-  LOperand* arguments = UseRegister(instr->arguments());
-  LOperand* length = UseTempRegister(instr->length());
-  LOperand* index = UseRegister(instr->index());
-  LAccessArgumentsAt* result =
-      new(zone()) LAccessArgumentsAt(arguments, length, index);
-  return AssignEnvironment(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
-  LOperand* object = UseFixed(instr->value(), a0);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), a0));
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
-    HIsConstructCallAndBranch* instr) {
-  return new(zone()) LIsConstructCallAndBranch(TempRegister());
-}
-
-
-LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
-  HEnvironment* env = current_block_->last_environment();
-  ASSERT(env != NULL);
-
-  env->set_ast_id(instr->ast_id());
-
-  env->Drop(instr->pop_count());
-  for (int i = 0; i < instr->values()->length(); ++i) {
-    HValue* value = instr->values()->at(i);
-    if (instr->HasAssignedIndexAt(i)) {
-      env->Bind(instr->GetAssignedIndexAt(i), value);
-    } else {
-      env->Push(value);
-    }
-  }
-
-  // If there is an instruction pending deoptimization environment create a
-  // lazy bailout instruction to capture the environment.
-  if (pending_deoptimization_ast_id_ == instr->ast_id()) {
-    LInstruction* result = new(zone()) LLazyBailout;
-    result = AssignEnvironment(result);
-    instruction_pending_deoptimization_environment_->
-        set_deoptimization_environment(result->environment());
-    ClearInstructionPendingDeoptimizationEnvironment();
-    return result;
-  }
-
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
-  if (instr->is_function_entry()) {
-    return MarkAsCall(new(zone()) LStackCheck, instr);
-  } else {
-    ASSERT(instr->is_backwards_branch());
-    return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
-  }
-}
-
-
-LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment();
-  HConstant* undefined = graph()->GetConstantUndefined();
-  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
-                                               instr->function(),
-                                               undefined,
-                                               instr->call_kind(),
-                                               instr->is_construct());
-  current_block_->UpdateEnvironment(inner);
-  chunk_->AddInlinedClosure(instr->closure());
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment()->
-      DiscardInlined(false);
-  current_block_->UpdateEnvironment(outer);
-  return NULL;
-}
-
-
-LInstruction* LChunkBuilder::DoIn(HIn* instr) {
-  LOperand* key = UseRegisterAtStart(instr->key());
-  LOperand* object = UseRegisterAtStart(instr->object());
-  LIn* result = new(zone()) LIn(key, object);
-  return MarkAsCall(DefineFixed(result, v0), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
-  LOperand* object = UseFixed(instr->enumerable(), a0);
-  LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
-  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
-  LOperand* map = UseRegister(instr->map());
-  return AssignEnvironment(DefineAsRegister(
-      new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* map = UseRegisterAtStart(instr->map());
-  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
-  LOperand* object = UseRegister(instr->object());
-  LOperand* index = UseRegister(instr->index());
-  return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 5a7bf4d..ebc1e43 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,311 +32,131 @@
 #include "lithium-allocator.h"
 #include "lithium.h"
 #include "safepoint-table.h"
-#include "utils.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
 
 namespace v8 {
 namespace internal {
 
 // Forward declarations.
 class LCodeGen;
-
-#define LITHIUM_ALL_INSTRUCTION_LIST(V)         \
-  V(ControlInstruction)                         \
-  V(Call)                                       \
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
-
-
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
-  V(AccessArgumentsAt)                          \
-  V(AddI)                                       \
-  V(AllocateObject)                             \
-  V(ApplyArguments)                             \
-  V(ArgumentsElements)                          \
-  V(ArgumentsLength)                            \
-  V(ArithmeticD)                                \
-  V(ArithmeticT)                                \
-  V(ArrayLiteral)                               \
-  V(BitI)                                       \
-  V(BitNotI)                                    \
-  V(BoundsCheck)                                \
-  V(Branch)                                     \
-  V(CallConstantFunction)                       \
-  V(CallFunction)                               \
-  V(CallGlobal)                                 \
-  V(CallKeyed)                                  \
-  V(CallKnownGlobal)                            \
-  V(CallNamed)                                  \
-  V(CallNew)                                    \
-  V(CallRuntime)                                \
-  V(CallStub)                                   \
-  V(CheckFunction)                              \
-  V(CheckInstanceType)                          \
-  V(CheckMap)                                   \
-  V(CheckNonSmi)                                \
-  V(CheckPrototypeMaps)                         \
-  V(CheckSmi)                                   \
-  V(ClampDToUint8)                              \
-  V(ClampIToUint8)                              \
-  V(ClampTToUint8)                              \
-  V(ClassOfTestAndBranch)                       \
-  V(CmpConstantEqAndBranch)                     \
-  V(CmpIDAndBranch)                             \
-  V(CmpObjectEqAndBranch)                       \
-  V(CmpMapAndBranch)                            \
-  V(CmpT)                                       \
-  V(ConstantD)                                  \
-  V(ConstantI)                                  \
-  V(ConstantT)                                  \
-  V(Context)                                    \
-  V(DeclareGlobals)                             \
-  V(DeleteProperty)                             \
-  V(Deoptimize)                                 \
-  V(DivI)                                       \
-  V(DoubleToI)                                  \
-  V(ElementsKind)                               \
-  V(FastLiteral)                                \
-  V(FixedArrayBaseLength)                       \
-  V(FunctionLiteral)                            \
-  V(GetCachedArrayIndex)                        \
-  V(GlobalObject)                               \
-  V(GlobalReceiver)                             \
-  V(Goto)                                       \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceTypeAndBranch)                   \
-  V(In)                                         \
-  V(InstanceOf)                                 \
-  V(InstanceOfKnownGlobal)                      \
-  V(InstructionGap)                             \
-  V(Integer32ToDouble)                          \
-  V(InvokeFunction)                             \
-  V(IsConstructCallAndBranch)                   \
-  V(IsNilAndBranch)                             \
-  V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
-  V(IsSmiAndBranch)                             \
-  V(IsUndetectableAndBranch)                    \
-  V(StringCompareAndBranch)                     \
-  V(JSArrayLength)                              \
-  V(Label)                                      \
-  V(LazyBailout)                                \
-  V(LoadContextSlot)                            \
-  V(LoadElements)                               \
-  V(LoadExternalArrayPointer)                   \
-  V(LoadFunctionPrototype)                      \
-  V(LoadGlobalCell)                             \
-  V(LoadGlobalGeneric)                          \
-  V(LoadKeyedFastDoubleElement)                 \
-  V(LoadKeyedFastElement)                       \
-  V(LoadKeyedGeneric)                           \
-  V(LoadKeyedSpecializedArrayElement)           \
-  V(LoadNamedField)                             \
-  V(LoadNamedFieldPolymorphic)                  \
-  V(LoadNamedGeneric)                           \
-  V(ModI)                                       \
-  V(MulI)                                       \
-  V(NumberTagD)                                 \
-  V(NumberTagI)                                 \
-  V(NumberUntagD)                               \
-  V(ObjectLiteral)                              \
-  V(OsrEntry)                                   \
-  V(OuterContext)                               \
-  V(Parameter)                                  \
-  V(Power)                                      \
-  V(PushArgument)                               \
-  V(Random)                                     \
-  V(RegExpLiteral)                              \
-  V(Return)                                     \
-  V(ShiftI)                                     \
-  V(SmiTag)                                     \
-  V(SmiUntag)                                   \
-  V(StackCheck)                                 \
-  V(StoreContextSlot)                           \
-  V(StoreGlobalCell)                            \
-  V(StoreGlobalGeneric)                         \
-  V(StoreKeyedFastDoubleElement)                \
-  V(StoreKeyedFastElement)                      \
-  V(StoreKeyedGeneric)                          \
-  V(StoreKeyedSpecializedArrayElement)          \
-  V(StoreNamedField)                            \
-  V(StoreNamedGeneric)                          \
-  V(StringAdd)                                  \
-  V(StringCharCodeAt)                           \
-  V(StringCharFromCode)                         \
-  V(StringLength)                               \
-  V(SubI)                                       \
-  V(TaggedToI)                                  \
-  V(ThisFunction)                               \
-  V(Throw)                                      \
-  V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
-  V(Typeof)                                     \
-  V(TypeofIsAndBranch)                          \
-  V(UnaryMathOperation)                         \
-  V(UnknownOSRValue)                            \
-  V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)
-
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
-  virtual Opcode opcode() const { return LInstruction::k##type; } \
-  virtual void CompileToNative(LCodeGen* generator);              \
-  virtual const char* Mnemonic() const { return mnemonic; }       \
-  static L##type* cast(LInstruction* instr) {                     \
-    ASSERT(instr->Is##type());                                    \
-    return reinterpret_cast<L##type*>(instr);                     \
-  }
-
-
-#define DECLARE_HYDROGEN_ACCESSOR(type)     \
-  H##type* hydrogen() const {               \
-    return H##type::cast(hydrogen_value()); \
-  }
-
+class LEnvironment;
+class Translation;
 
 class LInstruction: public ZoneObject {
  public:
-  LInstruction()
-      :  environment_(NULL),
-         hydrogen_value_(NULL),
-         is_call_(false),
-         is_save_doubles_(false) { }
+  LInstruction() { }
   virtual ~LInstruction() { }
 
-  virtual void CompileToNative(LCodeGen* generator) = 0;
-  virtual const char* Mnemonic() const = 0;
-  virtual void PrintTo(StringStream* stream);
-  virtual void PrintDataTo(StringStream* stream) = 0;
-  virtual void PrintOutputOperandTo(StringStream* stream) = 0;
-
-  enum Opcode {
-    // Declare a unique enum value for each instruction.
-#define DECLARE_OPCODE(type) k##type,
-    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
-    kNumberOfInstructions
-#undef DECLARE_OPCODE
-  };
-
-  virtual Opcode opcode() const = 0;
-
-  // Declare non-virtual type testers for all leaf IR classes.
-#define DECLARE_PREDICATE(type) \
-  bool Is##type() const { return opcode() == k##type; }
-  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
-#undef DECLARE_PREDICATE
-
-  // Declare virtual predicates for instructions that don't have
-  // an opcode.
-  virtual bool IsGap() const { return false; }
-
-  virtual bool IsControl() const { return false; }
-
-  void set_environment(LEnvironment* env) { environment_ = env; }
-  LEnvironment* environment() const { return environment_; }
-  bool HasEnvironment() const { return environment_ != NULL; }
-
-  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
-  LPointerMap* pointer_map() const { return pointer_map_.get(); }
-  bool HasPointerMap() const { return pointer_map_.is_set(); }
-
-  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
-  HValue* hydrogen_value() const { return hydrogen_value_; }
-
-  void set_deoptimization_environment(LEnvironment* env) {
-    deoptimization_environment_.set(env);
+  // Predicates should be generated by macro as in lithium-ia32.h.
+  virtual bool IsLabel() const {
+    UNIMPLEMENTED();
+    return false;
   }
-  LEnvironment* deoptimization_environment() const {
-    return deoptimization_environment_.get();
-  }
-  bool HasDeoptimizationEnvironment() const {
-    return deoptimization_environment_.is_set();
+  virtual bool IsOsrEntry() const {
+    UNIMPLEMENTED();
+    return false;
   }
 
-  void MarkAsCall() { is_call_ = true; }
-  void MarkAsSaveDoubles() { is_save_doubles_ = true; }
+  LPointerMap* pointer_map() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool HasPointerMap() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
+
+  LEnvironment* environment() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool HasEnvironment() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+
+  virtual bool IsControl() const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  void MarkAsCall() { UNIMPLEMENTED(); }
+  void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
 
   // Interface to the register allocator and iterators.
-  bool IsMarkedAsCall() const { return is_call_; }
-  bool IsMarkedAsSaveDoubles() const { return is_save_doubles_; }
+  bool IsMarkedAsCall() const {
+    UNIMPLEMENTED();
+    return false;
+  }
 
-  virtual bool HasResult() const = 0;
-  virtual LOperand* result() = 0;
+  bool IsMarkedAsSaveDoubles() const {
+    UNIMPLEMENTED();
+    return false;
+  }
 
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
-  virtual int TempCount() = 0;
-  virtual LOperand* TempAt(int i) = 0;
+  virtual bool HasResult() const {
+    UNIMPLEMENTED();
+    return false;
+  }
 
-  LOperand* FirstInput() { return InputAt(0); }
-  LOperand* Output() { return HasResult() ? result() : NULL; }
+  virtual LOperand* result() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  virtual int InputCount() {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  virtual LOperand* InputAt(int i) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  virtual int TempCount() {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  virtual LOperand* TempAt(int i) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand* FirstInput() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LOperand* Output() {
+    UNIMPLEMENTED();
+    return NULL;
+  }
 
 #ifdef DEBUG
-  void VerifyCall();
+  void VerifyCall() { UNIMPLEMENTED(); }
 #endif
-
- private:
-  LEnvironment* environment_;
-  SetOncePointer<LPointerMap> pointer_map_;
-  HValue* hydrogen_value_;
-  SetOncePointer<LEnvironment> deoptimization_environment_;
-  bool is_call_;
-  bool is_save_doubles_;
 };
 
 
-// R = number of result operands (0 or 1).
-// I = number of input operands.
-// T = number of temporary operands.
-template<int R, int I, int T>
-class LTemplateInstruction: public LInstruction {
+class LGap: public LInstruction {
  public:
-  // Allow 0 or 1 output operands.
-  STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const { return R != 0; }
-  void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() { return results_[0]; }
+  explicit LGap(HBasicBlock* block) { }
 
-  int InputCount() { return I; }
-  LOperand* InputAt(int i) { return inputs_[i]; }
-
-  int TempCount() { return T; }
-  LOperand* TempAt(int i) { return temps_[i]; }
-
-  virtual void PrintDataTo(StringStream* stream);
-  virtual void PrintOutputOperandTo(StringStream* stream);
-
- protected:
-  EmbeddedContainer<LOperand*, R> results_;
-  EmbeddedContainer<LOperand*, I> inputs_;
-  EmbeddedContainer<LOperand*, T> temps_;
-};
-
-
-class LGap: public LTemplateInstruction<0, 0, 0> {
- public:
-  explicit LGap(HBasicBlock* block)
-      : block_(block) {
-    parallel_moves_[BEFORE] = NULL;
-    parallel_moves_[START] = NULL;
-    parallel_moves_[END] = NULL;
-    parallel_moves_[AFTER] = NULL;
+  HBasicBlock* block() const {
+    UNIMPLEMENTED();
+    return NULL;
   }
 
-  // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const { return true; }
-  virtual void PrintDataTo(StringStream* stream);
-  static LGap* cast(LInstruction* instr) {
-    ASSERT(instr->IsGap());
-    return reinterpret_cast<LGap*>(instr);
-  }
-
-  bool IsRedundant() const;
-
-  HBasicBlock* block() const { return block_; }
-
   enum InnerPosition {
     BEFORE,
     START,
@@ -346,2047 +166,141 @@
     LAST_INNER_POSITION = AFTER
   };
 
-  LParallelMove* GetOrCreateParallelMove(InnerPosition pos)  {
-    if (parallel_moves_[pos] == NULL) parallel_moves_[pos] = new LParallelMove;
-    return parallel_moves_[pos];
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+    UNIMPLEMENTED();
+    return NULL;
   }
 
   LParallelMove* GetParallelMove(InnerPosition pos)  {
-    return parallel_moves_[pos];
+    UNIMPLEMENTED();
+    return NULL;
   }
-
- private:
-  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
-  HBasicBlock* block_;
-};
-
-
-class LInstructionGap: public LGap {
- public:
-  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
-};
-
-
-class LGoto: public LTemplateInstruction<0, 0, 0> {
- public:
-  explicit LGoto(int block_id) : block_id_(block_id) { }
-
-  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream);
-  virtual bool IsControl() const { return true; }
-
-  int block_id() const { return block_id_; }
-
- private:
-  int block_id_;
-};
-
-
-class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
- public:
-  LLazyBailout() : gap_instructions_size_(0) { }
-
-  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
-
-  void set_gap_instructions_size(int gap_instructions_size) {
-    gap_instructions_size_ = gap_instructions_size;
-  }
-  int gap_instructions_size() { return gap_instructions_size_; }
-
- private:
-  int gap_instructions_size_;
-};
-
-
-class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
 };
 
 
 class LLabel: public LGap {
  public:
-  explicit LLabel(HBasicBlock* block)
-      : LGap(block), replacement_(NULL) { }
-
-  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  int block_id() const { return block()->block_id(); }
-  bool is_loop_header() const { return block()->IsLoopHeader(); }
-  Label* label() { return &label_; }
-  LLabel* replacement() const { return replacement_; }
-  void set_replacement(LLabel* label) { replacement_ = label; }
-  bool HasReplacement() const { return replacement_ != NULL; }
-
- private:
-  Label label_;
-  LLabel* replacement_;
-};
-
-
-class LParameter: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+  explicit LLabel(HBasicBlock* block) : LGap(block) { }
 };
 
 
-class LCallStub: public LTemplateInstruction<1, 0, 0> {
+class LOsrEntry: public LInstruction {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
-  DECLARE_HYDROGEN_ACCESSOR(CallStub)
-
-  TranscendentalCache::Type transcendental_type() {
-    return hydrogen()->transcendental_type();
+  // Function could be generated by a macro as in lithium-ia32.h.
+  static LOsrEntry* cast(LInstruction* instr) {
+    UNIMPLEMENTED();
+    return NULL;
   }
-};
-
-
-class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
-};
-
-
-template<int I, int T>
-class LControlInstruction: public LTemplateInstruction<0, I, T> {
- public:
-  virtual bool IsControl() const { return true; }
-
-  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
-  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
-  int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
-  int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
 
- private:
-  HControlInstruction* hydrogen() {
-    return HControlInstruction::cast(this->hydrogen_value());
+  LOperand** SpilledRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
   }
-};
-
-
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
-  LWrapReceiver(LOperand* receiver, LOperand* function) {
-    inputs_[0] = receiver;
-    inputs_[1] = function;
+  LOperand** SpilledDoubleRegisterArray() {
+    UNIMPLEMENTED();
+    return NULL;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
-  LOperand* receiver() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-};
-
-
-class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
- public:
-  LApplyArguments(LOperand* function,
-                  LOperand* receiver,
-                  LOperand* length,
-                  LOperand* elements) {
-    inputs_[0] = function;
-    inputs_[1] = receiver;
-    inputs_[2] = length;
-    inputs_[3] = elements;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
-
-  LOperand* function() { return inputs_[0]; }
-  LOperand* receiver() { return inputs_[1]; }
-  LOperand* length() { return inputs_[2]; }
-  LOperand* elements() { return inputs_[3]; }
-};
-
-
-class LAccessArgumentsAt: public LTemplateInstruction<1, 3, 0> {
- public:
-  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
-    inputs_[0] = arguments;
-    inputs_[1] = length;
-    inputs_[2] = index;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
-
-  LOperand* arguments() { return inputs_[0]; }
-  LOperand* length() { return inputs_[1]; }
-  LOperand* index() { return inputs_[2]; }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LArgumentsLength: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LArgumentsLength(LOperand* elements) {
-    inputs_[0] = elements;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
-};
-
-
-class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
- public:
-  LArgumentsElements() { }
-
-  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
-};
-
-
-class LModI: public LTemplateInstruction<1, 2, 3> {
- public:
-  // Used when the right hand is a constant power of 2.
-  LModI(LOperand* left,
-        LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-    temps_[0] = NULL;
-    temps_[1] = NULL;
-    temps_[2] = NULL;
-  }
-
-  // Used for the standard case.
-  LModI(LOperand* left,
-        LOperand* right,
-        LOperand* temp1,
-        LOperand* temp2,
-        LOperand* temp3) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = temp3;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
-  DECLARE_HYDROGEN_ACCESSOR(Mod)
-};
-
-
-class LDivI: public LTemplateInstruction<1, 2, 0> {
- public:
-  LDivI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
-  DECLARE_HYDROGEN_ACCESSOR(Div)
-};
-
-
-class LMulI: public LTemplateInstruction<1, 2, 1> {
- public:
-  LMulI(LOperand* left, LOperand* right, LOperand* temp) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
-  DECLARE_HYDROGEN_ACCESSOR(Mul)
-};
-
-
-class LCmpIDAndBranch: public LControlInstruction<2, 0> {
- public:
-  LCmpIDAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
-
-  Token::Value op() const { return hydrogen()->token(); }
-  bool is_double() const {
-    return hydrogen()->GetInputRepresentation().IsDouble();
-  }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
- public:
-  LUnaryMathOperation(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
-  virtual void PrintDataTo(StringStream* stream);
-  BuiltinFunctionId op() const { return hydrogen()->op(); }
-};
-
-
-class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
- public:
-  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
-                               "cmp-object-eq-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
-};
-
-
-class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
- public:
-  explicit LCmpConstantEqAndBranch(LOperand* left) {
-    inputs_[0] = left;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
-                               "cmp-constant-eq-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
-};
-
-
-class LIsNilAndBranch: public LControlInstruction<1, 0> {
- public:
-  explicit LIsNilAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
-
-  EqualityKind kind() const { return hydrogen()->kind(); }
-  NilValue nil() const { return hydrogen()->nil(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 1> {
- public:
-  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
-  LIsStringAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsSmiAndBranch: public LControlInstruction<1, 0> {
- public:
-  explicit LIsSmiAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
- public:
-  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
-                               "is-undetectable-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
-  LStringCompareAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
-                               "string-compare-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
-  Token::Value op() const { return hydrogen()->token(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
- public:
-  explicit LHasInstanceTypeAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
-                               "has-instance-type-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGetCachedArrayIndex(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
-  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
-};
-
-
-class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
- public:
-  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
-                               "has-cached-array-index-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
- public:
-  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
-                               "class-of-test-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpT: public LTemplateInstruction<1, 2, 0> {
- public:
-  LCmpT(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
-  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
-
-  Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
- public:
-  LInstanceOf(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
-};
-
-
-class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
- public:
-  LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
-                               "instance-of-known-global")
-  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
-  Handle<JSFunction> function() const { return hydrogen()->function(); }
-};
-
-
-class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
- public:
-  LBoundsCheck(LOperand* index, LOperand* length) {
-    inputs_[0] = index;
-    inputs_[1] = length;
-  }
-
-  LOperand* index() { return inputs_[0]; }
-  LOperand* length() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
-};
-
-
-class LBitI: public LTemplateInstruction<1, 2, 0> {
- public:
-  LBitI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  Token::Value op() const { return hydrogen()->op(); }
-
-  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
-};
-
-
-class LShiftI: public LTemplateInstruction<1, 2, 0> {
- public:
-  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
-      : op_(op), can_deopt_(can_deopt) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  Token::Value op() const { return op_; }
-
-  bool can_deopt() const { return can_deopt_; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
-
- private:
-  Token::Value op_;
-  bool can_deopt_;
-};
-
-
-class LSubI: public LTemplateInstruction<1, 2, 0> {
- public:
-  LSubI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
-  DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
-class LConstantI: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  int32_t value() const { return hydrogen()->Integer32Value(); }
-};
-
-
-class LConstantD: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  double value() const { return hydrogen()->DoubleValue(); }
-};
-
-
-class LConstantT: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
-  DECLARE_HYDROGEN_ACCESSOR(Constant)
-
-  Handle<Object> value() const { return hydrogen()->handle(); }
-};
-
-
-class LBranch: public LControlInstruction<1, 0> {
- public:
-  explicit LBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
-  DECLARE_HYDROGEN_ACCESSOR(Branch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LCmpMapAndBranch: public LTemplateInstruction<0, 1, 1> {
- public:
-  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
-
-  virtual bool IsControl() const { return true; }
-
-  Handle<Map> map() const { return hydrogen()->map(); }
-  int true_block_id() const {
-    return hydrogen()->FirstSuccessor()->block_id();
-  }
-  int false_block_id() const {
-    return hydrogen()->SecondSuccessor()->block_id();
-  }
-};
-
-
-class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LJSArrayLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(JSArrayLength, "js-array-length")
-  DECLARE_HYDROGEN_ACCESSOR(JSArrayLength)
-};
-
-
-class LFixedArrayBaseLength: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LFixedArrayBaseLength(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(FixedArrayBaseLength,
-                               "fixed-array-base-length")
-  DECLARE_HYDROGEN_ACCESSOR(FixedArrayBaseLength)
-};
-
-
-class LElementsKind: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LElementsKind(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
-  DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
-};
-
-
-class LValueOf: public LTemplateInstruction<1, 1, 1> {
- public:
-  LValueOf(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
-  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-};
-
-
-class LDateField: public LTemplateInstruction<1, 1, 1> {
- public:
-  LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
-    inputs_[0] = date;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
-  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-  Smi* index() const { return index_; }
-
- private:
-  Smi* index_;
-};
-
-
-class LThrow: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LThrow(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Throw, "throw")
-};
-
-
-class LBitNotI: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LBitNotI(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(BitNotI, "bit-not-i")
-};
-
-
-class LAddI: public LTemplateInstruction<1, 2, 0> {
- public:
-  LAddI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
-  DECLARE_HYDROGEN_ACCESSOR(Add)
-};
-
-
-class LPower: public LTemplateInstruction<1, 2, 0> {
- public:
-  LPower(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
-  DECLARE_HYDROGEN_ACCESSOR(Power)
-};
-
-
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LRandom(LOperand* global_object) {
-    inputs_[0] = global_object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
-  DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
-class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
- public:
-  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  Token::Value op() const { return op_; }
-
-  virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
-  virtual void CompileToNative(LCodeGen* generator);
-  virtual const char* Mnemonic() const;
-
- private:
-  Token::Value op_;
-};
-
-
-class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
- public:
-  LArithmeticT(Token::Value op, LOperand* left, LOperand* right)
-      : op_(op) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
-  virtual void CompileToNative(LCodeGen* generator);
-  virtual const char* Mnemonic() const;
-
-  Token::Value op() const { return op_; }
-
- private:
-  Token::Value op_;
-};
-
-
-class LReturn: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LReturn(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
-};
-
-
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadNamedField(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
-};
-
-
-class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadNamedFieldPolymorphic(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
-
-  LOperand* object() { return inputs_[0]; }
-};
-
-
-class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadNamedGeneric(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
-
-  LOperand* object() { return inputs_[0]; }
-  Handle<Object> name() const { return hydrogen()->name(); }
-};
-
-
-class LLoadFunctionPrototype: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadFunctionPrototype(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
-  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
-
-  LOperand* function() { return inputs_[0]; }
-};
-
-
-class LLoadElements: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadElements(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadElements, "load-elements")
-};
-
-
-class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadExternalArrayPointer(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
-                               "load-external-array-pointer")
-};
-
-
-class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
-    inputs_[0] = elements;
-    inputs_[1] = key;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
-
-  LOperand* elements() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
-    inputs_[0] = elements;
-    inputs_[1] = key;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
-                               "load-keyed-fast-double-element")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
-
-  LOperand* elements() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
-                                    LOperand* key) {
-    inputs_[0] = external_pointer;
-    inputs_[1] = key;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
-                               "load-keyed-specialized-array-element")
-  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
-
-  LOperand* external_pointer() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-  ElementsKind elements_kind() const {
-    return hydrogen()->elements_kind();
-  }
-};
-
-
-class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
-    inputs_[0] = obj;
-    inputs_[1] = key;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-};
-
-
-class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
-class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadGlobalGeneric(LOperand* global_object) {
-    inputs_[0] = global_object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
-
-  LOperand* global_object() { return inputs_[0]; }
-  Handle<Object> name() const { return hydrogen()->name(); }
-  bool for_typeof() const { return hydrogen()->for_typeof(); }
-};
-
-
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
- public:
-  LStoreGlobalCell(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
-  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
-  LOperand* value() { return inputs_[0]; }
-};
-
-
-class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
-  explicit LStoreGlobalGeneric(LOperand* global_object,
-                               LOperand* value) {
-    inputs_[0] = global_object;
-    inputs_[1] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
-
-  LOperand* global_object() { return InputAt(0); }
-  Handle<Object> name() const { return hydrogen()->name(); }
-  LOperand* value() { return InputAt(1); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LLoadContextSlot(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
-  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
-
-  LOperand* context() { return InputAt(0); }
-  int slot_index() { return hydrogen()->slot_index(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
- public:
-  LStoreContextSlot(LOperand* context, LOperand* value) {
-    inputs_[0] = context;
-    inputs_[1] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
-  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
-
-  LOperand* context() { return InputAt(0); }
-  LOperand* value() { return InputAt(1); }
-  int slot_index() { return hydrogen()->slot_index(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LPushArgument: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LPushArgument(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
-};
-
-
-class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
-  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
-};
-
-
-class LContext: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
-};
-
-
-class LOuterContext: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LOuterContext(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
-
-  LOperand* context() { return InputAt(0); }
-};
-
-
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
-  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
-class LGlobalObject: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGlobalObject(LOperand* context) {
-    inputs_[0] = context;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
-
-  LOperand* context() { return InputAt(0); }
-};
-
-
-class LGlobalReceiver: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LGlobalReceiver(LOperand* global_object) {
-    inputs_[0] = global_object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver, "global-receiver")
-
-  LOperand* global() { return InputAt(0); }
-};
-
-
-class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction, "call-constant-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallConstantFunction)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  Handle<JSFunction> function() { return hydrogen()->function(); }
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LInvokeFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
-  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
-
-  LOperand* function() { return inputs_[0]; }
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallKeyed(LOperand* key) {
-    inputs_[0] = key;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
-  DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-
-class LCallNamed: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named")
-  DECLARE_HYDROGEN_ACCESSOR(CallNamed)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  Handle<String> name() const { return hydrogen()->name(); }
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
-  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
-
-  LOperand* function() { return inputs_[0]; }
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global")
-  DECLARE_HYDROGEN_ACCESSOR(CallGlobal)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  Handle<String> name() const {return hydrogen()->name(); }
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal, "call-known-global")
-  DECLARE_HYDROGEN_ACCESSOR(CallKnownGlobal)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  Handle<JSFunction> target() const { return hydrogen()->target();  }
-  int arity() const { return hydrogen()->argument_count() - 1;  }
-};
-
-
-class LCallNew: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LCallNew(LOperand* constructor) {
-    inputs_[0] = constructor;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
-  DECLARE_HYDROGEN_ACCESSOR(CallNew)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  int arity() const { return hydrogen()->argument_count() - 1; }
-};
-
-
-class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
-  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
-
-  const Runtime::Function* function() const { return hydrogen()->function(); }
-  int arity() const { return hydrogen()->argument_count(); }
-};
-
-
-class LInteger32ToDouble: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LInteger32ToDouble(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
-};
-
-
-class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LNumberTagI(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
-};
-
-
-class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
- public:
-  LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
-};
-
-
-// Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
- public:
-  LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
-  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-// Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
- public:
-  LTaggedToI(LOperand* value,
-             LOperand* temp1,
-             LOperand* temp2,
-             LOperand* temp3) {
-    inputs_[0] = value;
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-    temps_[2] = temp3;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
-  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
-
-  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
-};
-
-
-class LSmiTag: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LSmiTag(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
-};
-
-
-class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LNumberUntagD(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
-  DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
-class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
- public:
-  LSmiUntag(LOperand* value, bool needs_check)
-      : needs_check_(needs_check) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
-
-  bool needs_check() const { return needs_check_; }
-
- private:
-  bool needs_check_;
-};
-
-
-class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
- public:
-  LStoreNamedField(LOperand* obj, LOperand* val) {
-    inputs_[0] = obj;
-    inputs_[1] = val;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* value() { return inputs_[1]; }
-
-  Handle<Object> name() const { return hydrogen()->name(); }
-  bool is_in_object() { return hydrogen()->is_in_object(); }
-  int offset() { return hydrogen()->offset(); }
-  Handle<Map> transition() const { return hydrogen()->transition(); }
-};
-
-
-class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
- public:
-  LStoreNamedGeneric(LOperand* obj, LOperand* val) {
-    inputs_[0] = obj;
-    inputs_[1] = val;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* value() { return inputs_[1]; }
-  Handle<Object> name() const { return hydrogen()->name(); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
- public:
-  LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
-    inputs_[0] = obj;
-    inputs_[1] = key;
-    inputs_[2] = val;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
-                               "store-keyed-fast-element")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
- public:
-  LStoreKeyedFastDoubleElement(LOperand* elements,
-                               LOperand* key,
-                               LOperand* val) {
-    inputs_[0] = elements;
-    inputs_[1] = key;
-    inputs_[2] = val;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
-                               "store-keyed-fast-double-element")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* elements() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-};
-
-
-class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
- public:
-  LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
-    inputs_[0] = obj;
-    inputs_[1] = key;
-    inputs_[2] = val;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
- public:
-  LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
-                                     LOperand* key,
-                                     LOperand* val) {
-    inputs_[0] = external_pointer;
-    inputs_[1] = key;
-    inputs_[2] = val;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
-                               "store-keyed-specialized-array-element")
-  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
-
-  LOperand* external_pointer() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-  LOperand* value() { return inputs_[2]; }
-  ElementsKind elements_kind() const {
-    return hydrogen()->elements_kind();
-  }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
-  LTransitionElementsKind(LOperand* object,
-                          LOperand* new_map_temp,
-                          LOperand* temp_reg) {
-    inputs_[0] = object;
-    temps_[0] = new_map_temp;
-    temps_[1] = temp_reg;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
-                               "transition-elements-kind")
-  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* new_map_reg() { return temps_[0]; }
-  LOperand* temp_reg() { return temps_[1]; }
-  Handle<Map> original_map() { return hydrogen()->original_map(); }
-  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
-};
-
-
-class LStringAdd: public LTemplateInstruction<1, 2, 0> {
- public:
-  LStringAdd(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
-  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-};
-
-
-
-class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
- public:
-  LStringCharCodeAt(LOperand* string, LOperand* index) {
-    inputs_[0] = string;
-    inputs_[1] = index;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
-  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
-
-  LOperand* string() { return inputs_[0]; }
-  LOperand* index() { return inputs_[1]; }
-};
-
-
-class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LStringCharFromCode(LOperand* char_code) {
-    inputs_[0] = char_code;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
-  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
-
-  LOperand* char_code() { return inputs_[0]; }
-};
-
-
-class LStringLength: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LStringLength(LOperand* string) {
-    inputs_[0] = string;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringLength, "string-length")
-  DECLARE_HYDROGEN_ACCESSOR(StringLength)
-
-  LOperand* string() { return inputs_[0]; }
-};
-
-
-class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckFunction(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return InputAt(0); }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
-  DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
-};
-
-
-class LCheckInstanceType: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckInstanceType(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
-  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
-};
-
-
-class LCheckMap: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckMap(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMap, "check-map")
-  DECLARE_HYDROGEN_ACCESSOR(CheckMap)
-};
-
-
-class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
- public:
-  LCheckPrototypeMaps(LOperand* temp1, LOperand* temp2)  {
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
-  DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
-
-  Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
-  Handle<JSObject> holder() const { return hydrogen()->holder(); }
-};
-
-
-class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
-};
-
-
-class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
- public:
-  explicit LCheckNonSmi(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
-};
-
-
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
-  LClampDToUint8(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* unclamped() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
-};
-
-
-class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LClampIToUint8(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* unclamped() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
-};
-
-
-class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
- public:
-  LClampTToUint8(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  LOperand* unclamped() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
-};
-
-
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
- public:
-  LAllocateObject(LOperand* temp1, LOperand* temp2) {
-    temps_[0] = temp1;
-    temps_[1] = temp2;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
-  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
-class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
-  DECLARE_HYDROGEN_ACCESSOR(ArrayLiteral)
-};
-
-
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
-  DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
-};
-
-
-class LRegExpLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
-  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
-};
-
-
-class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
-  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-
-  Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); }
-};
-
-
-class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LToFastProperties(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
-  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
-};
-
-
-class LTypeof: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LTypeof(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
-};
-
-
-class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
- public:
-  explicit LTypeofIsAndBranch(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
-
-  Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
- public:
-  explicit LIsConstructCallAndBranch(LOperand* temp) {
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
-                               "is-construct-call-and-branch")
-};
-
-
-class LDeleteProperty: public LTemplateInstruction<1, 2, 0> {
- public:
-  LDeleteProperty(LOperand* obj, LOperand* key) {
-    inputs_[0] = obj;
-    inputs_[1] = key;
+  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
+    UNIMPLEMENTED();
   }
-
-  DECLARE_CONCRETE_INSTRUCTION(DeleteProperty, "delete-property")
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* key() { return inputs_[1]; }
-};
-
-
-class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
- public:
-  LOsrEntry();
-
-  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
-
-  LOperand** SpilledRegisterArray() { return register_spills_; }
-  LOperand** SpilledDoubleRegisterArray() { return double_register_spills_; }
-
-  void MarkSpilledRegister(int allocation_index, LOperand* spill_operand);
   void MarkSpilledDoubleRegister(int allocation_index,
-                                 LOperand* spill_operand);
-
- private:
-  // Arrays of spill slot operands for registers with an assigned spill
-  // slot, i.e., that must also be restored to the spill slot on OSR entry.
-  // NULL if the register has no assigned spill slot.  Indexed by allocation
-  // index.
-  LOperand* register_spills_[Register::kNumAllocatableRegisters];
-  LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
-};
-
-
-class LStackCheck: public LTemplateInstruction<0, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
-  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
-
-  Label* done_label() { return &done_label_; }
-
- private:
-  Label done_label_;
-};
-
-
-class LIn: public LTemplateInstruction<1, 2, 0> {
- public:
-  LIn(LOperand* key, LOperand* object) {
-    inputs_[0] = key;
-    inputs_[1] = object;
-  }
-
-  LOperand* key() { return inputs_[0]; }
-  LOperand* object() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(In, "in")
-};
-
-
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInPrepareMap(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInCacheArray(LOperand* map) {
-    inputs_[0] = map;
-  }
-
-  LOperand* map() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
-  int idx() {
-    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+                                 LOperand* spill_operand) {
+    UNIMPLEMENTED();
   }
 };
 
 
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
-  LCheckMapValue(LOperand* value, LOperand* map) {
-    inputs_[0] = value;
-    inputs_[1] = map;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* map() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadFieldByIndex(LOperand* object, LOperand* index) {
-    inputs_[0] = object;
-    inputs_[1] = index;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* index() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
-class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
-  explicit LChunk(CompilationInfo* info, HGraph* graph);
+  explicit LChunk(HGraph* graph) { }
 
-  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
-  LConstantOperand* DefineConstantOperand(HConstant* constant);
-  Handle<Object> LookupLiteral(LConstantOperand* operand) const;
-  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+  HGraph* graph() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
 
-  int GetNextSpillIndex(bool is_double);
-  LOperand* GetNextSpillSlot(bool is_double);
+  const ZoneList<LPointerMap*>* pointer_maps() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
 
-  int ParameterAt(int index);
-  int GetParameterStackSlot(int index) const;
-  int spill_slot_count() const { return spill_slot_count_; }
-  CompilationInfo* info() const { return info_; }
-  HGraph* graph() const { return graph_; }
-  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
-  void AddGapMove(int index, LOperand* from, LOperand* to);
-  LGap* GetGapAt(int index) const;
-  bool IsGapAt(int index) const;
-  int NearestGapPos(int index) const;
-  void MarkEmptyBlocks();
-  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LOperand* GetNextSpillSlot(bool double_slot) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  LConstantOperand* DefineConstantOperand(HConstant* constant) {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
   LLabel* GetLabel(int block_id) const {
-    HBasicBlock* block = graph_->blocks()->at(block_id);
-    int first_instruction = block->first_instruction_index();
-    return LLabel::cast(instructions_[first_instruction]);
-  }
-  int LookupDestination(int block_id) const {
-    LLabel* cur = GetLabel(block_id);
-    while (cur->replacement() != NULL) {
-      cur = cur->replacement();
-    }
-    return cur->block_id();
-  }
-  Label* GetAssemblyLabel(int block_id) const {
-    LLabel* label = GetLabel(block_id);
-    ASSERT(!label->HasReplacement());
-    return label->label();
+    UNIMPLEMENTED();
+    return NULL;
   }
 
-  const ZoneList<Handle<JSFunction> >* inlined_closures() const {
-    return &inlined_closures_;
+  const ZoneList<LInstruction*>* instructions() const {
+    UNIMPLEMENTED();
+    return NULL;
   }
 
-  void AddInlinedClosure(Handle<JSFunction> closure) {
-    inlined_closures_.Add(closure);
+  int GetParameterStackSlot(int index) const {
+    UNIMPLEMENTED();
+    return 0;
   }
 
- private:
-  int spill_slot_count_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  ZoneList<LInstruction*> instructions_;
-  ZoneList<LPointerMap*> pointer_maps_;
-  ZoneList<Handle<JSFunction> > inlined_closures_;
+  void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+
+  LGap* GetGapAt(int index) const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+  bool IsGapAt(int index) const {
+    UNIMPLEMENTED();
+    return false;
+  }
+
+  int NearestGapPos(int index) const {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+
+  CompilationInfo* info() const {
+    UNIMPLEMENTED();
+    return NULL;
+  }
+
+#ifdef DEBUG
+  void Verify() { UNIMPLEMENTED(); }
+#endif
 };
 
 
 class LChunkBuilder BASE_EMBEDDED {
  public:
-  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : chunk_(NULL),
-        info_(info),
-        graph_(graph),
-        zone_(graph->isolate()->zone()),
-        status_(UNUSED),
-        current_instruction_(NULL),
-        current_block_(NULL),
-        next_block_(NULL),
-        argument_count_(0),
-        allocator_(allocator),
-        position_(RelocInfo::kNoPosition),
-        instruction_pending_deoptimization_environment_(NULL),
-        pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
+  LChunkBuilder(CompilationInfo*&, HGraph* graph, LAllocator* allocator) { }
 
   // Build the sequence for the graph.
-  LChunk* Build();
-
-  // Declare methods that deal with the individual node types.
-#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
-  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
-#undef DECLARE_DO
-
- private:
-  enum Status {
-    UNUSED,
-    BUILDING,
-    DONE,
-    ABORTED
+  LChunk* Build() {
+    UNIMPLEMENTED();
+    return NULL;
   };
 
-  LChunk* chunk() const { return chunk_; }
-  CompilationInfo* info() const { return info_; }
-  HGraph* graph() const { return graph_; }
-  Zone* zone() const { return zone_; }
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_building() const { return status_ == BUILDING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  void Abort(const char* format, ...);
-
-  // Methods for getting operands for Use / Define / Temp.
-  LUnallocated* ToUnallocated(Register reg);
-  LUnallocated* ToUnallocated(DoubleRegister reg);
-
-  // Methods for setting up define-use relationships.
-  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
-  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
-  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
-                                           DoubleRegister fixed_register);
-
-  // A value that is guaranteed to be allocated to a register.
-  // Operand created by UseRegister is guaranteed to be live until the end of
-  // instruction. This means that register allocator will not reuse it's
-  // register for any other operand inside instruction.
-  // Operand created by UseRegisterAtStart is guaranteed to be live only at
-  // instruction start. Register allocator is free to assign the same register
-  // to some other operand used inside instruction (i.e. temporary or
-  // output).
-  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
-  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
-
-  // An input operand in a register that may be trashed.
-  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
-
-  // An input operand in a register or stack slot.
-  MUST_USE_RESULT LOperand* Use(HValue* value);
-  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
-
-  // An input operand in a register, stack slot or a constant operand.
-  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
-  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
-
-  // An input operand in a register or a constant operand.
-  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
-  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
-
-  // An input operand in register, stack slot or a constant operand.
-  // Will not be moved to a register even if one is freely available.
-  MUST_USE_RESULT LOperand* UseAny(HValue* value);
-
-  // Temporary operand that must be in a register.
-  MUST_USE_RESULT LUnallocated* TempRegister();
-  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
-  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
-
-  // Methods for setting up define-use relationships.
-  // Return the same instruction that they are passed.
-  template<int I, int T>
-      LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
-                           LUnallocated* result);
-  template<int I, int T>
-      LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
-  template<int I, int T>
-      LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
-                                    int index);
-  template<int I, int T>
-      LInstruction* DefineSameAsFirst(LTemplateInstruction<1, I, T>* instr);
-  template<int I, int T>
-      LInstruction* DefineFixed(LTemplateInstruction<1, I, T>* instr,
-                                Register reg);
-  template<int I, int T>
-      LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
-                                      DoubleRegister reg);
-  LInstruction* AssignEnvironment(LInstruction* instr);
-  LInstruction* AssignPointerMap(LInstruction* instr);
-
-  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
-
-  // By default we assume that instruction sequences generated for calls
-  // cannot deoptimize eagerly and we do not attach environment to this
-  // instruction.
-  LInstruction* MarkAsCall(
-      LInstruction* instr,
-      HInstruction* hinstr,
-      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
-  LInstruction* MarkAsSaveDoubles(LInstruction* instr);
-
-  LInstruction* SetInstructionPendingDeoptimizationEnvironment(
-      LInstruction* instr, int ast_id);
-  void ClearInstructionPendingDeoptimizationEnvironment();
-
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
-                                  int* argument_index_accumulator);
-
-  void VisitInstruction(HInstruction* current);
-
-  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
-  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
-  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
-  LInstruction* DoArithmeticD(Token::Value op,
-                              HArithmeticBinaryOperation* instr);
-  LInstruction* DoArithmeticT(Token::Value op,
-                              HArithmeticBinaryOperation* instr);
-
-  LChunk* chunk_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  Zone* zone_;
-  Status status_;
-  HInstruction* current_instruction_;
-  HBasicBlock* current_block_;
-  HBasicBlock* next_block_;
-  int argument_count_;
-  LAllocator* allocator_;
-  int position_;
-  LInstruction* instruction_pending_deoptimization_environment_;
-  int pending_deoptimization_ast_id_;
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
+    UNIMPLEMENTED(); \
+    return NULL; \
+  }
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
 
   DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
 };
 
-#undef DECLARE_HYDROGEN_ACCESSOR
-#undef DECLARE_CONCRETE_INSTRUCTION
 
 } }  // namespace v8::internal
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e93a417..1c0af5d 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,8 +42,7 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true),
-      has_frame_(false) {
+      allow_stub_calls_(true) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -81,16 +80,36 @@
 }
 
 
-void MacroAssembler::LoadHeapObject(Register result,
-                                    Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    li(result, Operand(cell));
-    lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
-  } else {
-    li(result, Operand(object));
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register address,
+                                       Register scratch) {
+  if (emit_debug_code()) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, ne, &not_in_new_space);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
   }
+
+  // Calculate page address: Clear bits from 0 to kPageSizeBits.
+  if (mips32r2) {
+    Ins(object, zero_reg, 0, kPageSizeBits);
+  } else {
+    // The Ins macro is slow on r1, so use shifts instead.
+    srl(object, object, kPageSizeBits);
+    sll(object, object, kPageSizeBits);
+  }
+
+  // Calculate region number.
+  Ext(address, address, Page::kRegionSizeLog2,
+      kPageSizeBits - Page::kRegionSizeLog2);
+
+  // Mark region dirty.
+  lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+  li(at, Operand(1));
+  sllv(at, at, address);
+  or_(scratch, scratch, at);
+  sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
 }
 
 
@@ -100,9 +119,7 @@
   // stack, so adjust the stack for unsaved registers.
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   ASSERT(num_unsaved >= 0);
-  if (num_unsaved > 0) {
-    Subu(sp, sp, Operand(num_unsaved * kPointerSize));
-  }
+  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
   MultiPush(kSafepointSavedRegisters);
 }
 
@@ -110,9 +127,7 @@
 void MacroAssembler::PopSafepointRegisters() {
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   MultiPop(kSafepointSavedRegisters);
-  if (num_unsaved > 0) {
-    Addu(sp, sp, Operand(num_unsaved * kPointerSize));
-  }
+  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
 }
 
 
@@ -165,7 +180,6 @@
 
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
-  UNIMPLEMENTED_MIPS();
   // General purpose registers are pushed last on the stack.
   int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -173,6 +187,8 @@
 }
 
 
+
+
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
                                 Condition cc,
@@ -184,53 +200,38 @@
 }
 
 
-void MacroAssembler::RecordWriteField(
-    Register object,
-    int offset,
-    Register value,
-    Register dst,
-    RAStatus ra_status,
-    SaveFPRegsMode save_fp,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check) {
-  ASSERT(!AreAliased(value, dst, t8, object));
-  // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis.
+// Will clobber 4 registers: object, scratch0, scratch1, at. The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+                                 Operand offset,
+                                 Register scratch0,
+                                 Register scratch1) {
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are cp.
+  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
+
   Label done;
 
-  // Skip barrier if writing a smi.
-  if (smi_check == INLINE_SMI_CHECK) {
-    JumpIfSmi(value, &done);
-  }
+  // First, test that the object is not in the new space.  We cannot set
+  // region marks for new space pages.
+  InNewSpace(object, scratch0, eq, &done);
 
-  // Although the object register is tagged, the offset is relative to the start
-  // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  // Add offset into the object.
+  Addu(scratch0, object, offset);
 
-  Addu(dst, object, Operand(offset - kHeapObjectTag));
-  if (emit_debug_code()) {
-    Label ok;
-    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
-    Branch(&ok, eq, t8, Operand(zero_reg));
-    stop("Unaligned cell in write barrier");
-    bind(&ok);
-  }
-
-  RecordWrite(object,
-              dst,
-              value,
-              ra_status,
-              save_fp,
-              remembered_set_action,
-              OMIT_SMI_CHECK);
+  // Record the actual write.
+  RecordWriteHelper(object, scratch0, scratch1);
 
   bind(&done);
 
-  // Clobber clobbered input registers when running with the debug-code flag
+  // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
-    li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+    li(object, Operand(BitCast<int32_t>(kZapValue)));
+    li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+    li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
   }
 }
 
@@ -240,102 +241,29 @@
 // tag is shifted away.
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value,
-                                 RAStatus ra_status,
-                                 SaveFPRegsMode fp_mode,
-                                 RememberedSetAction remembered_set_action,
-                                 SmiCheck smi_check) {
-  ASSERT(!AreAliased(object, address, value, t8));
-  ASSERT(!AreAliased(object, address, value, t9));
+                                 Register scratch) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are cp.
-  ASSERT(!address.is(cp) && !value.is(cp));
-
-  if (emit_debug_code()) {
-    lw(at, MemOperand(address));
-    Assert(
-        eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
-  }
+  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
 
   Label done;
 
-  if (smi_check == INLINE_SMI_CHECK) {
-    ASSERT_EQ(0, kSmiTag);
-    JumpIfSmi(value, &done);
-  }
-
-  CheckPageFlag(value,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersToHereAreInterestingMask,
-                eq,
-                &done);
-  CheckPageFlag(object,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersFromHereAreInterestingMask,
-                eq,
-                &done);
+  // First, test that the object is not in the new space.  We cannot set
+  // region marks for new space pages.
+  InNewSpace(object, scratch, eq, &done);
 
   // Record the actual write.
-  if (ra_status == kRAHasNotBeenSaved) {
-    push(ra);
-  }
-  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
-  CallStub(&stub);
-  if (ra_status == kRAHasNotBeenSaved) {
-    pop(ra);
-  }
+  RecordWriteHelper(object, address, scratch);
 
   bind(&done);
 
-  // Clobber clobbered registers when running with the debug-code flag
+  // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
-    li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
-  }
-}
-
-
-void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
-                                         Register address,
-                                         Register scratch,
-                                         SaveFPRegsMode fp_mode,
-                                         RememberedSetFinalAction and_then) {
-  Label done;
-  if (emit_debug_code()) {
-    Label ok;
-    JumpIfNotInNewSpace(object, scratch, &ok);
-    stop("Remembered set pointer is in new space");
-    bind(&ok);
-  }
-  // Load store buffer top.
-  ExternalReference store_buffer =
-      ExternalReference::store_buffer_top(isolate());
-  li(t8, Operand(store_buffer));
-  lw(scratch, MemOperand(t8));
-  // Store pointer to buffer and increment buffer top.
-  sw(address, MemOperand(scratch));
-  Addu(scratch, scratch, kPointerSize);
-  // Write back new top of buffer.
-  sw(scratch, MemOperand(t8));
-  // Call stub on end of buffer.
-  // Check for end of buffer.
-  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
-  if (and_then == kFallThroughAtEnd) {
-    Branch(&done, eq, t8, Operand(zero_reg));
-  } else {
-    ASSERT(and_then == kReturnAtEnd);
-    Ret(eq, t8, Operand(zero_reg));
-  }
-  push(ra);
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(fp_mode);
-  CallStub(&store_buffer_overflow);
-  pop(ra);
-  bind(&done);
-  if (and_then == kReturnAtEnd) {
-    Ret();
+    li(object, Operand(BitCast<int32_t>(kZapValue)));
+    li(address, Operand(BitCast<int32_t>(kZapValue)));
+    li(scratch, Operand(BitCast<int32_t>(kZapValue)));
   }
 }
 
@@ -444,10 +372,8 @@
   xor_(reg0, reg0, at);
 
   // hash = hash * 2057;
-  sll(scratch, reg0, 11);
-  sll(at, reg0, 3);
-  addu(reg0, reg0, at);
-  addu(reg0, reg0, scratch);
+  li(scratch, Operand(2057));
+  mul(reg0, reg0, scratch);
 
   // hash = hash ^ (hash >> 16);
   srl(at, reg0, 16);
@@ -574,22 +500,12 @@
 
 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
-    if (kArchVariant == kLoongson) {
-      mult(rs, rt.rm());
-      mflo(rd);
-    } else {
-      mul(rd, rs, rt.rm());
-    }
+    mul(rd, rs, rt.rm());
   } else {
     // li handles the relocation.
     ASSERT(!rs.is(at));
     li(at, rt);
-    if (kArchVariant == kLoongson) {
-      mult(rs, at);
-      mflo(rd);
-    } else {
-      mul(rd, rs, at);
-    }
+    mul(rd, rs, at);
   }
 }
 
@@ -744,7 +660,7 @@
 
 
 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
-  if (kArchVariant == kMips32r2) {
+  if (mips32r2) {
     if (rt.is_reg()) {
       rotrv(rd, rs, rt.rm());
     } else {
@@ -768,30 +684,31 @@
   }
 }
 
+
 //------------Pseudo-instructions-------------
 
-void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
+void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
   ASSERT(!j.is_reg());
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
+  if (!MustUseReg(j.rmode_) && !gen2instr) {
     // Normal load of an immediate value which does not need Relocation Info.
     if (is_int16(j.imm32_)) {
       addiu(rd, zero_reg, j.imm32_);
     } else if (!(j.imm32_ & kHiMask)) {
       ori(rd, zero_reg, j.imm32_);
     } else if (!(j.imm32_ & kImm16Mask)) {
-      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
+      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
     } else {
-      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
+      lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
       ori(rd, rd, (j.imm32_ & kImm16Mask));
     }
-  } else {
+  } else if (MustUseReg(j.rmode_) || gen2instr) {
     if (MustUseReg(j.rmode_)) {
       RecordRelocInfo(j.rmode_, j.imm32_);
     }
-    // We always need the same number of instructions as we may need to patch
+    // We need always the same number of instructions as we may need to patch
     // this code to load another value which may need 2 instructions to load.
-    lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
+    lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
     ori(rd, rd, (j.imm32_ & kImm16Mask));
   }
 }
@@ -802,7 +719,7 @@
   int16_t stack_offset = num_to_push * kPointerSize;
 
   Subu(sp, sp, Operand(stack_offset));
-  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       stack_offset -= kPointerSize;
       sw(ToRegister(i), MemOperand(sp, stack_offset));
@@ -841,7 +758,7 @@
 void MacroAssembler::MultiPopReversed(RegList regs) {
   int16_t stack_offset = 0;
 
-  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, stack_offset));
       stack_offset += kPointerSize;
@@ -857,7 +774,7 @@
   int16_t stack_offset = num_to_push * kDoubleSize;
 
   Subu(sp, sp, Operand(stack_offset));
-  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       stack_offset -= kDoubleSize;
       sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
@@ -899,7 +816,7 @@
   CpuFeatures::Scope scope(FPU);
   int16_t stack_offset = 0;
 
-  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+  for (int16_t i = kNumRegisters; i > 0; i--) {
     if ((regs & (1 << i)) != 0) {
       ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
       stack_offset += kDoubleSize;
@@ -909,21 +826,6 @@
 }
 
 
-void MacroAssembler::FlushICache(Register address, unsigned instructions) {
-  RegList saved_regs = kJSCallerSaved | ra.bit();
-  MultiPush(saved_regs);
-  AllowExternalCallThatCantCauseGC scope(this);
-
-  // Save to a0 in case address == t0.
-  Move(a0, address);
-  PrepareCallCFunction(2, t0);
-
-  li(a1, instructions * kInstrSize);
-  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
-  MultiPop(saved_regs);
-}
-
-
 void MacroAssembler::Ext(Register rt,
                          Register rs,
                          uint16_t pos,
@@ -931,7 +833,7 @@
   ASSERT(pos < 32);
   ASSERT(pos + size < 33);
 
-  if (kArchVariant == kMips32r2) {
+  if (mips32r2) {
     ext_(rt, rs, pos, size);
   } else {
     // Move rs to rt and shift it left then right to get the
@@ -952,21 +854,34 @@
                          uint16_t pos,
                          uint16_t size) {
   ASSERT(pos < 32);
-  ASSERT(pos + size <= 32);
-  ASSERT(size != 0);
+  ASSERT(pos + size < 32);
 
-  if (kArchVariant == kMips32r2) {
+  if (mips32r2) {
     ins_(rt, rs, pos, size);
   } else {
     ASSERT(!rt.is(t8) && !rs.is(t8));
-    Subu(at, zero_reg, Operand(1));
-    srl(at, at, 32 - size);
-    and_(t8, rs, at);
-    sll(t8, t8, pos);
-    sll(at, at, pos);
-    nor(at, at, zero_reg);
-    and_(at, rt, at);
-    or_(rt, t8, at);
+
+    srl(t8, rt, pos + size);
+    // The left chunk from rt that needs to
+    // be saved is on the right side of t8.
+    sll(at, t8, pos + size);
+    // The 'at' register now contains the left chunk on
+    // the left (proper position) and zeroes.
+    sll(t8, rt, 32 - pos);
+    // t8 now contains the right chunk on the left and zeroes.
+    srl(t8, t8, 32 - pos);
+    // t8 now contains the right chunk on
+    // the right (proper position) and zeroes.
+    or_(rt, at, t8);
+    // rt now contains the left and right chunks from the original rt
+    // in their proper position and zeroes in the middle.
+    sll(t8, rs, 32 - size);
+    // t8 now contains the chunk from rs on the left and zeroes.
+    srl(t8, t8, 32 - size - pos);
+    // t8 now contains the original chunk from rs in
+    // the middle (proper position).
+    or_(rt, rt, t8);
+    // rt now contains the result of the ins instruction in R2 mode.
   }
 }
 
@@ -1025,48 +940,6 @@
   mtc1(t8, fd);
 }
 
-void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
-    trunc_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
-  } else {
-    trunc_w_d(fd, fs);
-  }
-}
-
-void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
-    round_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
-  } else {
-    round_w_d(fd, fs);
-  }
-}
-
-
-void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
-    floor_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
-  } else {
-    floor_w_d(fd, fs);
-  }
-}
-
-
-void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
-    ceil_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
-  } else {
-    ceil_w_d(fd, fs);
-  }
-}
-
 
 void MacroAssembler::Trunc_uw_d(FPURegister fd,
                                 Register rs,
@@ -1079,9 +952,11 @@
   mtc1(at, FPURegister::from_code(scratch.code() + 1));
   mtc1(zero_reg, scratch);
   // Test if scratch > fd.
-  // If fd < 2^31 we can convert it normally.
+  c(OLT, D, fd, scratch);
+
   Label simple_convert;
-  BranchF(&simple_convert, NULL, lt, fd, scratch);
+  // If fd < 2^31 we can convert it normally.
+  bc1t(&simple_convert);
 
   // First we subtract 2^31 from fd, then trunc it to rs
   // and add 2^31 to rs.
@@ -1101,200 +976,6 @@
 }
 
 
-void MacroAssembler::BranchF(Label* target,
-                             Label* nan,
-                             Condition cc,
-                             FPURegister cmp1,
-                             FPURegister cmp2,
-                             BranchDelaySlot bd) {
-  if (cc == al) {
-    Branch(bd, target);
-    return;
-  }
-
-  ASSERT(nan || target);
-  // Check for unordered (NaN) cases.
-  if (nan) {
-    c(UN, D, cmp1, cmp2);
-    bc1t(nan);
-  }
-
-  if (target) {
-    // Here NaN cases were either handled by this function or are assumed to
-    // have been handled by the caller.
-    // Unsigned conditions are treated as their signed counterpart.
-    switch (cc) {
-      case Uless:
-      case less:
-        c(OLT, D, cmp1, cmp2);
-        bc1t(target);
-        break;
-      case Ugreater:
-      case greater:
-        c(ULE, D, cmp1, cmp2);
-        bc1f(target);
-        break;
-      case Ugreater_equal:
-      case greater_equal:
-        c(ULT, D, cmp1, cmp2);
-        bc1f(target);
-        break;
-      case Uless_equal:
-      case less_equal:
-        c(OLE, D, cmp1, cmp2);
-        bc1t(target);
-        break;
-      case eq:
-        c(EQ, D, cmp1, cmp2);
-        bc1t(target);
-        break;
-      case ne:
-        c(EQ, D, cmp1, cmp2);
-        bc1f(target);
-        break;
-      default:
-        CHECK(0);
-    };
-  }
-
-  if (bd == PROTECT) {
-    nop();
-  }
-}
-
-
-void MacroAssembler::Move(FPURegister dst, double imm) {
-  ASSERT(CpuFeatures::IsEnabled(FPU));
-  static const DoubleRepresentation minus_zero(-0.0);
-  static const DoubleRepresentation zero(0.0);
-  DoubleRepresentation value(imm);
-  // Handle special values first.
-  bool force_load = dst.is(kDoubleRegZero);
-  if (value.bits == zero.bits && !force_load) {
-    mov_d(dst, kDoubleRegZero);
-  } else if (value.bits == minus_zero.bits && !force_load) {
-    neg_d(dst, kDoubleRegZero);
-  } else {
-    uint32_t lo, hi;
-    DoubleAsTwoUInt32(imm, &lo, &hi);
-    // Move the low part of the double into the lower of the corresponding FPU
-    // register of FPU register pair.
-    if (lo != 0) {
-      li(at, Operand(lo));
-      mtc1(at, dst);
-    } else {
-      mtc1(zero_reg, dst);
-    }
-    // Move the high part of the double into the higher of the corresponding FPU
-    // register of FPU register pair.
-    if (hi != 0) {
-      li(at, Operand(hi));
-      mtc1(at, dst.high());
-    } else {
-      mtc1(zero_reg, dst.high());
-    }
-  }
-}
-
-
-void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
-  if (kArchVariant == kLoongson) {
-    Label done;
-    Branch(&done, ne, rt, Operand(zero_reg));
-    mov(rd, rs);
-    bind(&done);
-  } else {
-    movz(rd, rs, rt);
-  }
-}
-
-
-void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
-  if (kArchVariant == kLoongson) {
-    Label done;
-    Branch(&done, eq, rt, Operand(zero_reg));
-    mov(rd, rs);
-    bind(&done);
-  } else {
-    movn(rd, rs, rt);
-  }
-}
-
-
-void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
-  if (kArchVariant == kLoongson) {
-    // Tests an FP condition code and then conditionally move rs to rd.
-    // We do not currently use any FPU cc bit other than bit 0.
-    ASSERT(cc == 0);
-    ASSERT(!(rs.is(t8) || rd.is(t8)));
-    Label done;
-    Register scratch = t8;
-    // For testing purposes we need to fetch content of the FCSR register and
-    // than test its cc (floating point condition code) bit (for cc = 0, it is
-    // 24. bit of the FCSR).
-    cfc1(scratch, FCSR);
-    // For the MIPS I, II and III architectures, the contents of scratch is
-    // UNPREDICTABLE for the instruction immediately following CFC1.
-    nop();
-    srl(scratch, scratch, 16);
-    andi(scratch, scratch, 0x0080);
-    Branch(&done, eq, scratch, Operand(zero_reg));
-    mov(rd, rs);
-    bind(&done);
-  } else {
-    movt(rd, rs, cc);
-  }
-}
-
-
-void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
-  if (kArchVariant == kLoongson) {
-    // Tests an FP condition code and then conditionally move rs to rd.
-    // We do not currently use any FPU cc bit other than bit 0.
-    ASSERT(cc == 0);
-    ASSERT(!(rs.is(t8) || rd.is(t8)));
-    Label done;
-    Register scratch = t8;
-    // For testing purposes we need to fetch content of the FCSR register and
-    // than test its cc (floating point condition code) bit (for cc = 0, it is
-    // 24. bit of the FCSR).
-    cfc1(scratch, FCSR);
-    // For the MIPS I, II and III architectures, the contents of scratch is
-    // UNPREDICTABLE for the instruction immediately following CFC1.
-    nop();
-    srl(scratch, scratch, 16);
-    andi(scratch, scratch, 0x0080);
-    Branch(&done, ne, scratch, Operand(zero_reg));
-    mov(rd, rs);
-    bind(&done);
-  } else {
-    movf(rd, rs, cc);
-  }
-}
-
-
-void MacroAssembler::Clz(Register rd, Register rs) {
-  if (kArchVariant == kLoongson) {
-    ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
-    Register mask = t8;
-    Register scratch = t9;
-    Label loop, end;
-    mov(at, rs);
-    mov(rd, zero_reg);
-    lui(mask, 0x8000);
-    bind(&loop);
-    and_(scratch, at, mask);
-    Branch(&end, ne, scratch, Operand(zero_reg));
-    addiu(rd, rd, 1);
-    Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
-    srl(mask, mask, 1);
-    bind(&end);
-  } else {
-    clz(rd, rs);
-  }
-}
-
-
 // Tries to get a signed int32 out of a double precision floating point heap
 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
 // 32bits signed integer range.
@@ -1327,7 +1008,7 @@
   Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
 
   // We know the exponent is smaller than 30 (biased).  If it is less than
-  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
+  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
   // it rounds to zero.
   const uint32_t zero_exponent =
       (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
@@ -1385,61 +1066,14 @@
     subu(scratch2, zero_reg, scratch);
     // Trick to check sign bit (msb) held in dest, count leading zero.
     // 0 indicates negative, save negative version with conditional move.
-    Clz(dest, dest);
-    Movz(scratch, scratch2, dest);
+    clz(dest, dest);
+    movz(scratch, scratch2, dest);
     mov(dest, scratch);
   }
   bind(&done);
 }
 
 
-void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
-                                     FPURegister result,
-                                     DoubleRegister double_input,
-                                     Register scratch1,
-                                     Register except_flag,
-                                     CheckForInexactConversion check_inexact) {
-  ASSERT(CpuFeatures::IsSupported(FPU));
-  CpuFeatures::Scope scope(FPU);
-
-  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
-
-  if (check_inexact == kDontCheckForInexactConversion) {
-    // Ingore inexact exceptions.
-    except_mask &= ~kFCSRInexactFlagMask;
-  }
-
-  // Save FCSR.
-  cfc1(scratch1, FCSR);
-  // Disable FPU exceptions.
-  ctc1(zero_reg, FCSR);
-
-  // Do operation based on rounding mode.
-  switch (rounding_mode) {
-    case kRoundToNearest:
-      Round_w_d(result, double_input);
-      break;
-    case kRoundToZero:
-      Trunc_w_d(result, double_input);
-      break;
-    case kRoundToPlusInf:
-      Ceil_w_d(result, double_input);
-      break;
-    case kRoundToMinusInf:
-      Floor_w_d(result, double_input);
-      break;
-  }  // End of switch-statement.
-
-  // Retrieve FCSR.
-  cfc1(except_flag, FCSR);
-  // Restore FCSR.
-  ctc1(scratch1, FCSR);
-
-  // Check for fpu exceptions.
-  And(except_flag, except_flag, Operand(except_mask));
-}
-
-
 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
                                                  Register input_high,
                                                  Register input_low,
@@ -1453,7 +1087,7 @@
 
   // Check for Infinity and NaNs, which should return 0.
   Subu(scratch, result, HeapNumber::kExponentMask);
-  Movz(result, zero_reg, scratch);
+  movz(result, zero_reg, scratch);
   Branch(&done, eq, scratch, Operand(zero_reg));
 
   // Express exponent as delta to (number of mantissa bits + 31).
@@ -1517,7 +1151,7 @@
   result = sign;
   sign = no_reg;
   Subu(result, zero_reg, input_high);
-  Movz(result, input_high, scratch);
+  movz(result, input_high, scratch);
   bind(&done);
 }
 
@@ -1526,21 +1160,22 @@
                                       FPURegister double_input,
                                       FPURegister single_scratch,
                                       Register scratch,
-                                      Register scratch2,
-                                      Register scratch3) {
+                                      Register input_high,
+                                      Register input_low) {
   CpuFeatures::Scope scope(FPU);
-  ASSERT(!scratch2.is(result));
-  ASSERT(!scratch3.is(result));
-  ASSERT(!scratch3.is(scratch2));
+  ASSERT(!input_high.is(result));
+  ASSERT(!input_low.is(result));
+  ASSERT(!input_low.is(input_high));
   ASSERT(!scratch.is(result) &&
-         !scratch.is(scratch2) &&
-         !scratch.is(scratch3));
+         !scratch.is(input_high) &&
+         !scratch.is(input_low));
   ASSERT(!single_scratch.is(double_input));
 
   Label done;
   Label manual;
 
   // Clear cumulative exception flags and save the FCSR.
+  Register scratch2 = input_high;
   cfc1(scratch2, FCSR);
   ctc1(zero_reg, FCSR);
   // Try a conversion to a signed integer.
@@ -1557,8 +1192,6 @@
   Branch(&done, eq, scratch, Operand(zero_reg));
 
   // Load the double value and perform a manual truncation.
-  Register input_high = scratch2;
-  Register input_low = scratch3;
   Move(input_low, input_high, double_input);
   EmitOutOfInt32RangeTruncate(result,
                               input_high,
@@ -1590,6 +1223,15 @@
     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
 
 
+bool MacroAssembler::UseAbsoluteCodePointers() {
+  if (is_trampoline_emitted()) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+
 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
   BranchShort(offset, bdslot);
 }
@@ -1603,18 +1245,11 @@
 
 
 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
-  if (L->is_bound()) {
-    if (is_near(L)) {
-      BranchShort(L, bdslot);
-    } else {
-      Jr(L, bdslot);
-    }
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Jr(L, bdslot);
   } else {
-    if (is_trampoline_emitted()) {
-      Jr(L, bdslot);
-    } else {
-      BranchShort(L, bdslot);
-    }
+    BranchShort(L, bdslot);
   }
 }
 
@@ -1622,40 +1257,19 @@
 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
                             const Operand& rt,
                             BranchDelaySlot bdslot) {
-  if (L->is_bound()) {
-    if (is_near(L)) {
-      BranchShort(L, cond, rs, rt, bdslot);
-    } else {
-      Label skip;
-      Condition neg_cond = NegateCondition(cond);
-      BranchShort(&skip, neg_cond, rs, rt);
-      Jr(L, bdslot);
-      bind(&skip);
-    }
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Label skip;
+    Condition neg_cond = NegateCondition(cond);
+    BranchShort(&skip, neg_cond, rs, rt);
+    Jr(L, bdslot);
+    bind(&skip);
   } else {
-    if (is_trampoline_emitted()) {
-      Label skip;
-      Condition neg_cond = NegateCondition(cond);
-      BranchShort(&skip, neg_cond, rs, rt);
-      Jr(L, bdslot);
-      bind(&skip);
-    } else {
-      BranchShort(L, cond, rs, rt, bdslot);
-    }
+    BranchShort(L, cond, rs, rt, bdslot);
   }
 }
 
 
-void MacroAssembler::Branch(Label* L,
-                            Condition cond,
-                            Register rs,
-                            Heap::RootListIndex index,
-                            BranchDelaySlot bdslot) {
-  LoadRoot(at, index);
-  Branch(L, cond, rs, Operand(at), bdslot);
-}
-
-
 void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
   b(offset);
 
@@ -1674,8 +1288,8 @@
   Register scratch = at;
 
   if (rt.is_reg()) {
-    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
-    // rt.
+    // We don't want any other register but scratch clobbered.
+    ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
     r2 = rt.rm_;
     switch (cond) {
       case cc_always:
@@ -2177,18 +1791,11 @@
 
 
 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
-  if (L->is_bound()) {
-    if (is_near(L)) {
-      BranchAndLinkShort(L, bdslot);
-    } else {
-      Jalr(L, bdslot);
-    }
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Jalr(L, bdslot);
   } else {
-    if (is_trampoline_emitted()) {
-      Jalr(L, bdslot);
-    } else {
-      BranchAndLinkShort(L, bdslot);
-    }
+    BranchAndLinkShort(L, bdslot);
   }
 }
 
@@ -2196,26 +1803,15 @@
 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
                                    const Operand& rt,
                                    BranchDelaySlot bdslot) {
-  if (L->is_bound()) {
-    if (is_near(L)) {
-      BranchAndLinkShort(L, cond, rs, rt, bdslot);
-    } else {
-      Label skip;
-      Condition neg_cond = NegateCondition(cond);
-      BranchShort(&skip, neg_cond, rs, rt);
-      Jalr(L, bdslot);
-      bind(&skip);
-    }
+  bool is_label_near = is_near(L);
+  if (UseAbsoluteCodePointers() && !is_label_near) {
+    Label skip;
+    Condition neg_cond = NegateCondition(cond);
+    BranchShort(&skip, neg_cond, rs, rt);
+    Jalr(L, bdslot);
+    bind(&skip);
   } else {
-    if (is_trampoline_emitted()) {
-      Label skip;
-      Condition neg_cond = NegateCondition(cond);
-      BranchShort(&skip, neg_cond, rs, rt);
-      Jalr(L, bdslot);
-      bind(&skip);
-    } else {
-      BranchAndLinkShort(L, cond, rs, rt, bdslot);
-    }
+    BranchAndLinkShort(L, cond, rs, rt, bdslot);
   }
 }
 
@@ -2447,15 +2043,8 @@
                           Register rs,
                           const Operand& rt,
                           BranchDelaySlot bd) {
-  Label skip;
-  if (cond != cc_always) {
-    Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
-  }
-  // The first instruction of 'li' may be placed in the delay slot.
-  // This is not an issue, t9 is expected to be clobbered anyway.
   li(t9, Operand(target, rmode));
-  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
-  bind(&skip);
+  Jump(t9, cond, rs, rt, bd);
 }
 
 
@@ -2550,7 +2139,7 @@
   // Must record previous source positions before the
   // li() generates a new code target.
   positions_recorder()->WriteRecordedPositions();
-  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
+  li(t9, Operand(target_int, rmode), true);
   Call(t9, cond, rs, rt, bd);
   ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
             SizeOfCodeGeneratedSince(&start));
@@ -2585,7 +2174,7 @@
     rmode = RelocInfo::CODE_TARGET_WITH_ID;
   }
   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
-  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
+  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
             SizeOfCodeGeneratedSince(&start));
 }
 
@@ -2655,16 +2244,14 @@
     nop();
 }
 
-void MacroAssembler::DropAndRet(int drop) {
-  Ret(USE_DELAY_SLOT);
-  addiu(sp, sp, drop * kPointerSize);
-}
 
 void MacroAssembler::DropAndRet(int drop,
                                 Condition cond,
                                 Register r1,
                                 const Operand& r2) {
-  // Both Drop and Ret need to be conditional.
+  // This is a workaround to make sure only one branch instruction is
+  // generated. It relies on Drop and Ret not creating branches if
+  // cond == cc_always.
   Label skip;
   if (cond != cc_always) {
     Branch(&skip, NegateCondition(cond), r1, r2);
@@ -2731,10 +2318,10 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::DebugBreak() {
-  PrepareCEntryArgs(0);
-  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
+  ASSERT(allow_stub_calls());
+  mov(a0, zero_reg);
+  li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(1);
-  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
@@ -2744,43 +2331,61 @@
 // ---------------------------------------------------------------------------
 // Exception handling.
 
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
-                                    int handler_index) {
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
 
-  // For the JSEntry handler, we must preserve a0-a3 and s0.
-  // t1-t3 are available. We will build up the handler from the bottom by
-  // pushing on the stack.
-  // Set up the code object (t1) and the state (t2) for pushing.
-  unsigned state =
-      StackHandler::IndexField::encode(handler_index) |
-      StackHandler::KindField::encode(kind);
-  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
-  li(t2, Operand(state));
+  // The return address is passed in register ra.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      li(t0, Operand(StackHandler::TRY_CATCH));
+    } else {
+      li(t0, Operand(StackHandler::TRY_FINALLY));
+    }
+    // Save the current handler as the next handler.
+    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+    lw(t1, MemOperand(t2));
 
-  // Push the frame pointer, context, state, and code object.
-  if (kind == StackHandler::JS_ENTRY) {
-    ASSERT_EQ(Smi::FromInt(0), 0);
-    // The second zero_reg indicates no context.
-    // The first zero_reg is the NULL frame pointer.
-    // The operands are reversed to match the order of MultiPush/Pop.
-    Push(zero_reg, zero_reg, t2, t1);
+    addiu(sp, sp, -StackHandlerConstants::kSize);
+    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
+    sw(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
+    sw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
+    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+    // Link this handler as the new current one.
+    sw(sp, MemOperand(t2));
+
   } else {
-    MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
-  }
+    // Must preserve a0-a3, and s0 (argv).
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for fp. We expect the code throwing an exception to check fp
+    // before dereferencing it to restore the context.
+    li(t0, Operand(StackHandler::ENTRY));
 
-  // Link the current handler as the next handler.
-  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
-  lw(t1, MemOperand(t2));
-  push(t1);
-  // Set this new handler as the current one.
-  sw(sp, MemOperand(t2));
+    // Save the current handler as the next handler.
+    li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+    lw(t1, MemOperand(t2));
+
+    ASSERT(Smi::FromInt(0) == 0);  // Used for no context.
+
+    addiu(sp, sp, -StackHandlerConstants::kSize);
+    sw(ra, MemOperand(sp, StackHandlerConstants::kPCOffset));
+    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kFPOffset));
+    sw(zero_reg, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    sw(t0, MemOperand(sp, StackHandlerConstants::kStateOffset));
+    sw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+    // Link this handler as the new current one.
+    sw(sp, MemOperand(t2));
+  }
 }
 
 
@@ -2793,36 +2398,19 @@
 }
 
 
-void MacroAssembler::JumpToHandlerEntry() {
-  // Compute the handler entry address and jump to it.  The handler table is
-  // a fixed array of (smi-tagged) code offsets.
-  // v0 = exception, a1 = code object, a2 = state.
-  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
-  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
-  sll(a2, a2, kPointerSizeLog2);
-  Addu(a2, a3, a2);
-  lw(a2, MemOperand(a2));  // Smi-tagged offset.
-  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
-  sra(t9, a2, kSmiTagSize);
-  Addu(t9, t9, a1);
-  Jump(t9);  // Jump.
-}
-
-
 void MacroAssembler::Throw(Register value) {
-  // Adjust this code if not the case.
-  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in v0.
+  // v0 is expected to hold the exception.
   Move(v0, value);
 
-  // Drop the stack pointer to the top of the top handler.
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
                                    isolate())));
   lw(sp, MemOperand(a3));
@@ -2831,60 +2419,132 @@
   pop(a2);
   sw(a2, MemOperand(a3));
 
-  // Get the code object (a1) and state (a2).  Restore the context and frame
-  // pointer.
-  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
+  // Restore context and frame pointer, discard state (a3).
+  MultiPop(a3.bit() | cp.bit() | fp.bit());
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
-  // or cp.
+  // (a3 == ENTRY) == (fp == 0) == (cp == 0), so we could test any
+  // of them.
   Label done;
-  Branch(&done, eq, cp, Operand(zero_reg));
+  Branch(&done, eq, fp, Operand(zero_reg));
   sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   bind(&done);
 
-  JumpToHandlerEntry();
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
 }
 
 
-void MacroAssembler::ThrowUncatchable(Register value) {
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+                                      Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
 
-  // The exception is expected in v0.
-  if (!value.is(v0)) {
-    mov(v0, value);
-  }
-  // Drop the stack pointer to the top of the top stack handler.
+  // v0 is expected to hold the exception.
+  Move(v0, value);
+
+  // Drop sp to the top stack handler.
   li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
   lw(sp, MemOperand(a3));
 
   // Unwind the handlers until the ENTRY handler is found.
-  Label fetch_next, check_kind;
-  jmp(&check_kind);
-  bind(&fetch_next);
-  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+  Label loop, done;
+  bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  lw(a2, MemOperand(sp, kStateOffset));
+  Branch(&done, eq, a2, Operand(StackHandler::ENTRY));
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  lw(sp, MemOperand(sp, kNextOffset));
+  jmp(&loop);
+  bind(&done);
 
-  bind(&check_kind);
-  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
-  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
-  And(a2, a2, Operand(StackHandler::KindField::kMask));
-  Branch(&fetch_next, ne, a2, Operand(zero_reg));
-
-  // Set the top handler address to next handler past the top ENTRY handler.
+  // Set the top handler address to next handler past the current ENTRY handler.
   pop(a2);
   sw(a2, MemOperand(a3));
 
-  // Get the code object (a1) and state (a2).  Clear the context and frame
-  // pointer (0 was saved in the handler).
-  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(
+           Isolate::kExternalCaughtExceptionAddress, isolate());
+    li(a0, Operand(false, RelocInfo::NONE));
+    li(a2, Operand(external_caught));
+    sw(a0, MemOperand(a2));
 
-  JumpToHandlerEntry();
+    // Set pending exception and v0 to out of memory exception.
+    Failure* out_of_memory = Failure::OutOfMemoryException();
+    li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+    li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                        isolate())));
+    sw(v0, MemOperand(a2));
+  }
+
+  // Stack layout at this point. See also StackHandlerConstants.
+  // sp ->   state (ENTRY)
+  //         cp
+  //         fp
+  //         ra
+
+  // Restore context and frame pointer, discard state (r2).
+  MultiPop(a2.bit() | cp.bit() | fp.bit());
+
+#ifdef DEBUG
+  // When emitting debug_code, set ra as return address for the jump.
+  // 5 instructions: add: 1, pop: 2, jump: 2.
+  const int kOffsetRaInstructions = 5;
+  Label find_ra;
+
+  if (emit_debug_code()) {
+    // Compute ra for the Jump(t9).
+    const int kOffsetRaBytes = kOffsetRaInstructions * Assembler::kInstrSize;
+
+    // This branch-and-link sequence is needed to get the current PC on mips,
+    // saved to the ra register. Then adjusted for instruction count.
+    bal(&find_ra);  // bal exposes branch-delay slot.
+    nop();  // Branch delay slot nop.
+    bind(&find_ra);
+    addiu(ra, ra, kOffsetRaBytes);
+  }
+#endif
+  pop(t9);  // 2 instructions: lw, add sp.
+  Jump(t9);  // 2 instructions: jr, nop (in delay slot).
+
+  if (emit_debug_code()) {
+    // Make sure that the expected number of instructions were generated.
+    ASSERT_EQ(kOffsetRaInstructions,
+              InstructionsGeneratedSince(&find_ra));
+  }
 }
 
 
@@ -2987,7 +2647,6 @@
   ASSERT(!result.is(scratch1));
   ASSERT(!result.is(scratch2));
   ASSERT(!scratch1.is(scratch2));
-  ASSERT(!object_size.is(t9));
   ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
 
   // Check relative positions of allocation top and limit addresses.
@@ -3325,185 +2984,26 @@
 }
 
 
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
-                                                Register end_offset,
-                                                Register filler) {
-  Label loop, entry;
-  Branch(&entry);
-  bind(&loop);
-  sw(filler, MemOperand(start_offset));
-  Addu(start_offset, start_offset, kPointerSize);
-  bind(&entry);
-  Branch(&loop, lt, start_offset, Operand(end_offset));
-}
-
-
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
 }
 
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, ls, scratch,
-         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
-                                              Register scratch,
-                                              Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
-                                                 Register key_reg,
-                                                 Register receiver_reg,
-                                                 Register elements_reg,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Register scratch3,
-                                                 Register scratch4,
-                                                 Label* fail) {
-  Label smi_value, maybe_nan, have_double_value, is_nan, done;
-  Register mantissa_reg = scratch2;
-  Register exponent_reg = scratch3;
-
-  // Handle smi values specially.
-  JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  CheckMap(value_reg,
-           scratch1,
-           Heap::kHeapNumberMapRootIndex,
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
-  // in the exponent.
-  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
-  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
-  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
-
-  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
-  bind(&have_double_value);
-  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  Addu(scratch1, scratch1, elements_reg);
-  sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  sw(exponent_reg, FieldMemOperand(scratch1, offset));
-  jmp(&done);
-
-  bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
-  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
-  bind(&is_nan);
-  // Load canonical NaN for storing into the double array.
-  uint64_t nan_int64 = BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
-  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
-  jmp(&have_double_value);
-
-  bind(&smi_value);
-  Addu(scratch1, elements_reg,
-      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  Addu(scratch1, scratch1, scratch2);
-  // scratch1 is now effective address of the double element
-
-  FloatingPointHelper::Destination destination;
-  if (CpuFeatures::IsSupported(FPU)) {
-    destination = FloatingPointHelper::kFPURegisters;
-  } else {
-    destination = FloatingPointHelper::kCoreRegisters;
-  }
-
-  Register untagged_value = receiver_reg;
-  SmiUntag(untagged_value, value_reg);
-  FloatingPointHelper::ConvertIntToDouble(this,
-                                          untagged_value,
-                                          destination,
-                                          f0,
-                                          mantissa_reg,
-                                          exponent_reg,
-                                          scratch4,
-                                          f2);
-  if (destination == FloatingPointHelper::kFPURegisters) {
-    CpuFeatures::Scope scope(FPU);
-    sdc1(f0, MemOperand(scratch1, 0));
-  } else {
-    sw(mantissa_reg, MemOperand(scratch1, 0));
-    sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
-  }
-  bind(&done);
-}
-
-
-void MacroAssembler::CompareMapAndBranch(Register obj,
-                                         Register scratch,
-                                         Handle<Map> map,
-                                         Label* early_success,
-                                         Condition cond,
-                                         Label* branch_to,
-                                         CompareMapMode mode) {
-  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  Operand right = Operand(map);
-  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
-    Map* transitioned_fast_element_map(
-        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
-    ASSERT(transitioned_fast_element_map == NULL ||
-           map->elements_kind() != FAST_ELEMENTS);
-    if (transitioned_fast_element_map != NULL) {
-      Branch(early_success, eq, scratch, right);
-      right = Operand(Handle<Map>(transitioned_fast_element_map));
-    }
-
-    Map* transitioned_double_map(
-        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
-    ASSERT(transitioned_double_map == NULL ||
-           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
-    if (transitioned_double_map != NULL) {
-      Branch(early_success, eq, scratch, right);
-      right = Operand(Handle<Map>(transitioned_double_map));
-    }
-  }
-
-  Branch(branch_to, cond, scratch, right);
-}
-
-
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type,
-                              CompareMapMode mode) {
+                              SmiCheckType smi_check_type) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-  Label success;
-  CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
-  bind(&success);
+  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  li(at, Operand(map));
+  Branch(fail, ne, scratch, Operand(at));
 }
 
 
@@ -3610,12 +3110,10 @@
                                     Handle<Code> code_constant,
                                     Register code_reg,
                                     Label* done,
-                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
-  *definitely_mismatches = false;
   Label regular_invoke;
 
   // Check whether the expected and actual arguments count match. If not,
@@ -3646,7 +3144,6 @@
         // arguments.
         definitely_matches = true;
       } else {
-        *definitely_mismatches = true;
         li(a2, Operand(expected.immediate()));
       }
     }
@@ -3670,9 +3167,7 @@
       SetCallKind(t1, call_kind);
       Call(adaptor);
       call_wrapper.AfterCall();
-      if (!*definitely_mismatches) {
-        Branch(done);
-      }
+      jmp(done);
     } else {
       SetCallKind(t1, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3688,30 +3183,21 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
 
-  bool definitely_mismatches = false;
-  InvokePrologue(expected, actual, Handle<Code>::null(), code,
-                 &done, &definitely_mismatches, flag,
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
                  call_wrapper, call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      call_wrapper.BeforeCall(CallSize(code));
-      SetCallKind(t1, call_kind);
-      Call(code);
-      call_wrapper.AfterCall();
-    } else {
-      ASSERT(flag == JUMP_FUNCTION);
-      SetCallKind(t1, call_kind);
-      Jump(code);
-    }
-    // Continue here if InvokePrologue does handle the invocation due to
-    // mismatched parameter counts.
-    bind(&done);
+  if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
+    Call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(t1, call_kind);
+    Jump(code);
   }
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
 }
 
 
@@ -3721,27 +3207,20 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
 
-  bool definitely_mismatches = false;
-  InvokePrologue(expected, actual, code, no_reg,
-                 &done, &definitely_mismatches, flag,
+  InvokePrologue(expected, actual, code, no_reg, &done, flag,
                  NullCallWrapper(), call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      SetCallKind(t1, call_kind);
-      Call(code, rmode);
-    } else {
-      SetCallKind(t1, call_kind);
-      Jump(code, rmode);
-    }
-    // Continue here if InvokePrologue does handle the invocation due to
-    // mismatched parameter counts.
-    bind(&done);
+  if (flag == CALL_FUNCTION) {
+    SetCallKind(t1, call_kind);
+    Call(code, rmode);
+  } else {
+    SetCallKind(t1, call_kind);
+    Jump(code, rmode);
   }
+  // Continue here if InvokePrologue does handle the invocation due to
+  // mismatched parameter counts.
+  bind(&done);
 }
 
 
@@ -3750,9 +3229,6 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -3771,24 +3247,24 @@
 }
 
 
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  ASSERT(function->is_compiled());
 
   // Get the function and setup the context.
-  LoadHeapObject(a1, function);
+  li(a1, Operand(Handle<JSFunction>(function)));
   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
+  // Invoke the cached code.
+  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-  InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
+  if (V8::UseCrankshaft()) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
+  }
 }
 
 
@@ -3829,8 +3305,7 @@
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
-                                             Label* miss,
-                                             bool miss_on_bound_function) {
+                                             Label* miss) {
   // Check that the receiver isn't a smi.
   JumpIfSmi(function, miss);
 
@@ -3838,16 +3313,6 @@
   GetObjectType(function, result, scratch);
   Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
 
-  if (miss_on_bound_function) {
-    lw(scratch,
-       FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
-    lw(scratch,
-       FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
-    And(scratch, scratch,
-        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
-    Branch(miss, ne, scratch, Operand(zero_reg));
-  }
-
   // Make sure that the function has an instance prototype.
   Label non_instance;
   lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
@@ -3894,29 +3359,53 @@
 // -----------------------------------------------------------------------------
 // Runtime calls.
 
-void MacroAssembler::CallStub(CodeStub* stub,
-                              Condition cond,
-                              Register r1,
-                              const Operand& r2,
-                              BranchDelaySlot bd) {
-  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
+void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
+                              Register r1, const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
+}
+
+
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
+                                         Register r1, const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET,
+      kNoASTId, cond, r1, r2);
+  return result;
 }
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub,
+                                             Condition cond,
+                                             Register r1,
+                                             const Operand& r2) {
+  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  Object* result;
+  { MaybeObject* maybe_result = stub->TryGetCode();
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Jump(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2);
+  return result;
+}
+
+
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
 
 
-void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
-                                              int stack_space) {
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ExternalReference function, int stack_space) {
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -3983,13 +3472,15 @@
   lw(t1, MemOperand(at));
   Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
   li(s0, Operand(stack_space));
-  LeaveExitFrame(false, s0, true);
+  LeaveExitFrame(false, s0);
+  Ret();
 
   bind(&promote_scheduled_exception);
-  TailCallExternalReference(
-      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
-      0,
-      1);
+  MaybeObject* result = TryTailCallExternalReference(
+      ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
 
   // HandleScope limit has changed. Delete allocated extensions.
   bind(&delete_allocated_handles);
@@ -4002,12 +3493,8 @@
       1);
   mov(v0, s0);
   jmp(&leave_exit_frame);
-}
 
-
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
-  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
-  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+  return result;
 }
 
 
@@ -4091,16 +3578,7 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
-
-  if (left.is(right) && dst.is(left)) {
-    ASSERT(!dst.is(t9));
-    ASSERT(!scratch.is(t9));
-    ASSERT(!left.is(t9));
-    ASSERT(!right.is(t9));
-    ASSERT(!overflow_dst.is(t9));
-    mov(t9, right);
-    right = t9;
-  }
+  ASSERT(!left.is(right));
 
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
@@ -4133,17 +3611,10 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
+  ASSERT(!left.is(right));
   ASSERT(!scratch.is(left));
   ASSERT(!scratch.is(right));
 
-  // This happens with some crankshaft code. Since Subu works fine if
-  // left == right, let's not make that restriction here.
-  if (left.is(right)) {
-    mov(dst, zero_reg);
-    mov(overflow_dst, zero_reg);
-    return;
-  }
-
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
     subu(dst, left, right);  // Left is overwritten.
@@ -4181,8 +3652,8 @@
   // arguments passed in because it is constant. At some point we
   // should remove this need and make the runtime routine entry code
   // smarter.
-  PrepareCEntryArgs(num_arguments);
-  PrepareCEntryFunction(ExternalReference(f, isolate()));
+  li(a0, num_arguments);
+  li(a1, Operand(ExternalReference(f, isolate())));
   CEntryStub stub(1);
   CallStub(&stub);
 }
@@ -4190,9 +3661,10 @@
 
 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
   const Runtime::Function* function = Runtime::FunctionForId(id);
-  PrepareCEntryArgs(function->nargs);
-  PrepareCEntryFunction(ExternalReference(function, isolate()));
-  CEntryStub stub(1, kSaveFPRegs);
+  li(a0, Operand(function->nargs));
+  li(a1, Operand(ExternalReference(function, isolate())));
+  CEntryStub stub(1);
+  stub.SaveDoubles();
   CallStub(&stub);
 }
 
@@ -4203,13 +3675,12 @@
 
 
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
-                                           int num_arguments,
-                                           BranchDelaySlot bd) {
-  PrepareCEntryArgs(num_arguments);
-  PrepareCEntryFunction(ext);
+                                           int num_arguments) {
+  li(a0, Operand(num_arguments));
+  li(a1, Operand(ext));
 
   CEntryStub stub(1);
-  CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
+  CallStub(&stub);
 }
 
 
@@ -4220,11 +3691,22 @@
   // arguments passed in because it is constant. At some point we
   // should remove this need and make the runtime routine entry code
   // smarter.
-  PrepareCEntryArgs(num_arguments);
+  li(a0, Operand(num_arguments));
   JumpToExternalReference(ext);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  li(a0, num_arguments);
+  return TryJumpToExternalReference(ext);
+}
+
+
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -4234,25 +3716,24 @@
 }
 
 
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
-                                             BranchDelaySlot bd) {
-  PrepareCEntryFunction(builtin);
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+  li(a1, Operand(builtin));
   CEntryStub stub(1);
-  Jump(stub.GetCode(),
-       RelocInfo::CODE_TARGET,
-       al,
-       zero_reg,
-       Operand(zero_reg),
-       bd);
+  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& builtin) {
+  li(a1, Operand(builtin));
+  CEntryStub stub(1);
+  return TryTailCallStub(&stub);
 }
 
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   GetBuiltinEntry(t9, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(t9));
@@ -4385,20 +3866,14 @@
     RecordComment(msg);
   }
 #endif
+  // Disable stub call restrictions to always allow calls to abort.
+  AllowStubCallsScope allow_scope(this, true);
 
   li(a0, Operand(p0));
   push(a0);
   li(a0, Operand(Smi::FromInt(p1 - p0)));
   push(a0);
-  // Disable stub call restrictions to always allow calls to abort.
-  if (!has_frame_) {
-    // We don't actually want to generate a pile of code for this, so just
-    // claim there is a stack frame, without generating one.
-    FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 2);
-  } else {
-    CallRuntime(Runtime::kAbort, 2);
-  }
+  CallRuntime(Runtime::kAbort, 2);
   // Will not return here.
   if (is_trampoline_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -4432,46 +3907,6 @@
 }
 
 
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  // Load the global or builtins object from the current context.
-  lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
-
-  // Check that the function's map is the same as the expected cached map.
-  int expected_index =
-      Context::GetContextMapIndexFromElementsKind(expected_kind);
-  lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
-  Branch(no_map_match, ne, map_in_out, Operand(at));
-
-  // Use the transitioned cached map.
-  int trans_index =
-      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
-  lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
-    Register function_in, Register scratch, Register map_out) {
-  ASSERT(!function_in.is(map_out));
-  Label done;
-  lw(map_out, FieldMemOperand(function_in,
-                              JSFunction::kPrototypeOrInitialMapOffset));
-  if (!FLAG_smi_only_arrays) {
-    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                        FAST_ELEMENTS,
-                                        map_out,
-                                        scratch,
-                                        &done);
-  }
-  bind(&done);
-}
-
-
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the global or builtins object from the current context.
   lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -4502,7 +3937,7 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   addiu(sp, sp, -5 * kPointerSize);
   li(t8, Operand(Smi::FromInt(type)));
-  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
+  li(t9, Operand(CodeObject()));
   sw(ra, MemOperand(sp, 4 * kPointerSize));
   sw(fp, MemOperand(sp, 3 * kPointerSize));
   sw(cp, MemOperand(sp, 2 * kPointerSize));
@@ -4522,7 +3957,7 @@
 
 void MacroAssembler::EnterExitFrame(bool save_doubles,
                                     int stack_space) {
-  // Set up the frame structure on the stack.
+  // Setup the frame structure on the stack.
   STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
   STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
   STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
@@ -4540,14 +3975,13 @@
   addiu(sp, sp, -4 * kPointerSize);
   sw(ra, MemOperand(sp, 3 * kPointerSize));
   sw(fp, MemOperand(sp, 2 * kPointerSize));
-  addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
+  addiu(fp, sp, 2 * kPointerSize);  // Setup new frame pointer.
 
   if (emit_debug_code()) {
     sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
   }
 
-  // Accessed from ExitFrame::code_slot.
-  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
+  li(t8, Operand(CodeObject()));  // Accessed from ExitFrame::code_slot.
   sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
 
   // Save the frame pointer and the context in top.
@@ -4591,8 +4025,7 @@
 
 
 void MacroAssembler::LeaveExitFrame(bool save_doubles,
-                                    Register argument_count,
-                                    bool do_return) {
+                                    Register argument_count) {
   // Optionally restore all double registers.
   if (save_doubles) {
     // Remember: we only need to restore every 2nd double FPU value.
@@ -4618,17 +4051,11 @@
   mov(sp, fp);  // Respect ABI stack constraint.
   lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
   lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
-
+  addiu(sp, sp, 8);
   if (argument_count.is_valid()) {
     sll(t8, argument_count, kPointerSizeLog2);
     addu(sp, sp, t8);
   }
-
-  if (do_return) {
-    Ret(USE_DELAY_SLOT);
-    // If returning, the instruction in the delay slot will be the addiu below.
-  }
-  addiu(sp, sp, 8);
 }
 
 
@@ -4693,71 +4120,14 @@
 }
 
 
-void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
-  ASSERT(!reg.is(overflow));
-  mov(overflow, reg);  // Save original value.
-  SmiTag(reg);
-  xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
-}
-
-
-void MacroAssembler::SmiTagCheckOverflow(Register dst,
-                                         Register src,
-                                         Register overflow) {
-  if (dst.is(src)) {
-    // Fall back to slower case.
-    SmiTagCheckOverflow(dst, overflow);
-  } else {
-    ASSERT(!dst.is(src));
-    ASSERT(!dst.is(overflow));
-    ASSERT(!src.is(overflow));
-    SmiTag(dst, src);
-    xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
-  }
-}
-
-
-void MacroAssembler::UntagAndJumpIfSmi(Register dst,
-                                       Register src,
-                                       Label* smi_case) {
-  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
-  SmiUntag(dst, src);
-}
-
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
-                                          Register src,
-                                          Label* non_smi_case) {
-  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
-  SmiUntag(dst, src);
-}
-
-void MacroAssembler::JumpIfSmi(Register value,
-                               Label* smi_label,
-                               Register scratch,
-                               BranchDelaySlot bd) {
-  ASSERT_EQ(0, kSmiTag);
-  andi(scratch, value, kSmiTagMask);
-  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
-}
-
-void MacroAssembler::JumpIfNotSmi(Register value,
-                                  Label* not_smi_label,
-                                  Register scratch,
-                                  BranchDelaySlot bd) {
-  ASSERT_EQ(0, kSmiTag);
-  andi(scratch, value, kSmiTagMask);
-  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
-}
-
-
 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
                                       Register reg2,
                                       Label* on_not_both_smi) {
   STATIC_ASSERT(kSmiTag == 0);
   ASSERT_EQ(1, kSmiTagMask);
   or_(at, reg1, reg2);
-  JumpIfNotSmi(at, on_not_both_smi);
+  andi(at, at, kSmiTagMask);
+  Branch(on_not_both_smi, ne, at, Operand(zero_reg));
 }
 
 
@@ -4768,7 +4138,8 @@
   ASSERT_EQ(1, kSmiTagMask);
   // Both Smi tags must be 1 (not Smi).
   and_(at, reg1, reg2);
-  JumpIfSmi(at, on_either_smi);
+  andi(at, at, kSmiTagMask);
+  Branch(on_either_smi, eq, at, Operand(zero_reg));
 }
 
 
@@ -4846,7 +4217,8 @@
   // Check that neither is a smi.
   STATIC_ASSERT(kSmiTag == 0);
   And(scratch1, first, Operand(second));
-  JumpIfSmi(scratch1, failure);
+  And(scratch1, scratch1, Operand(kSmiTagMask));
+  Branch(failure, eq, scratch1, Operand(zero_reg));
   JumpIfNonSmisNotBothSequentialAsciiStrings(first,
                                              second,
                                              scratch1,
@@ -4885,23 +4257,7 @@
 
 static const int kRegisterPassedArguments = 4;
 
-int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
-                                              int num_double_arguments) {
-  int stack_passed_words = 0;
-  num_reg_arguments += 2 * num_double_arguments;
-
-  // Up to four simple arguments are passed in registers a0..a3.
-  if (num_reg_arguments > kRegisterPassedArguments) {
-    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
-  }
-  stack_passed_words += kCArgSlotCount;
-  return stack_passed_words;
-}
-
-
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
-                                          int num_double_arguments,
-                                          Register scratch) {
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
   // Up to four simple arguments are passed in registers a0..a3.
@@ -4909,8 +4265,9 @@
   // mips, even though those argument slots are not normally used.
   // Remaining arguments are pushed on the stack, above (higher address than)
   // the argument slots.
-  int stack_passed_arguments = CalculateStackPassedWords(
-      num_reg_arguments, num_double_arguments);
+  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+                                 0 : num_arguments - kRegisterPassedArguments) +
+                                kCArgSlotCount;
   if (frame_alignment > kPointerSize) {
     // Make stack end at alignment and make room for num_arguments - 4 words
     // and the original value of sp.
@@ -4925,43 +4282,26 @@
 }
 
 
-void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
-                                          Register scratch) {
-  PrepareCallCFunction(num_reg_arguments, 0, scratch);
-}
-
-
 void MacroAssembler::CallCFunction(ExternalReference function,
-                                   int num_reg_arguments,
-                                   int num_double_arguments) {
-  li(t8, Operand(function));
-  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+                                   int num_arguments) {
+  CallCFunctionHelper(no_reg, function, t8, num_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                   int num_reg_arguments,
-                                   int num_double_arguments) {
-  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
-}
-
-
-void MacroAssembler::CallCFunction(ExternalReference function,
+                                   Register scratch,
                                    int num_arguments) {
-  CallCFunction(function, num_arguments, 0);
-}
-
-
-void MacroAssembler::CallCFunction(Register function,
-                                   int num_arguments) {
-  CallCFunction(function, num_arguments, 0);
+  CallCFunctionHelper(function,
+                      ExternalReference::the_hole_value_location(isolate()),
+                      scratch,
+                      num_arguments);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
-                                         int num_reg_arguments,
-                                         int num_double_arguments) {
-  ASSERT(has_frame());
+                                         ExternalReference function_reference,
+                                         Register scratch,
+                                         int num_arguments) {
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -4989,15 +4329,19 @@
   // allow preemption, so the return address in the link register
   // stays correct.
 
-  if (!function.is(t9)) {
+  if (function.is(no_reg)) {
+    function = t9;
+    li(function, Operand(function_reference));
+  } else if (!function.is(t9)) {
     mov(t9, function);
     function = t9;
   }
 
   Call(function);
 
-  int stack_passed_arguments = CalculateStackPassedWords(
-      num_reg_arguments, num_double_arguments);
+  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+                                0 : num_arguments - kRegisterPassedArguments) +
+                               kCArgSlotCount;
 
   if (OS::ActivationFrameAlignment() > kPointerSize) {
     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -5010,370 +4354,17 @@
 #undef BRANCH_ARGS_CHECK
 
 
-void MacroAssembler::PatchRelocatedValue(Register li_location,
-                                         Register scratch,
-                                         Register new_value) {
-  lw(scratch, MemOperand(li_location));
-  // At this point scratch is a lui(at, ...) instruction.
-  if (emit_debug_code()) {
-    And(scratch, scratch, kOpcodeMask);
-    Check(eq, "The instruction to patch should be a lui.",
-        scratch, Operand(LUI));
-    lw(scratch, MemOperand(li_location));
-  }
-  srl(t9, new_value, kImm16Bits);
-  Ins(scratch, t9, 0, kImm16Bits);
-  sw(scratch, MemOperand(li_location));
-
-  lw(scratch, MemOperand(li_location, kInstrSize));
-  // scratch is now ori(at, ...).
-  if (emit_debug_code()) {
-    And(scratch, scratch, kOpcodeMask);
-    Check(eq, "The instruction to patch should be an ori.",
-        scratch, Operand(ORI));
-    lw(scratch, MemOperand(li_location, kInstrSize));
-  }
-  Ins(scratch, new_value, 0, kImm16Bits);
-  sw(scratch, MemOperand(li_location, kInstrSize));
-
-  // Update the I-cache so the new lui and ori can be executed.
-  FlushICache(li_location, 2);
-}
-
-void MacroAssembler::GetRelocatedValue(Register li_location,
-                                       Register value,
-                                       Register scratch) {
-  lw(value, MemOperand(li_location));
-  if (emit_debug_code()) {
-    And(value, value, kOpcodeMask);
-    Check(eq, "The instruction should be a lui.",
-        value, Operand(LUI));
-    lw(value, MemOperand(li_location));
-  }
-
-  // value now holds a lui instruction. Extract the immediate.
-  sll(value, value, kImm16Bits);
-
-  lw(scratch, MemOperand(li_location, kInstrSize));
-  if (emit_debug_code()) {
-    And(scratch, scratch, kOpcodeMask);
-    Check(eq, "The instruction should be an ori.",
-        scratch, Operand(ORI));
-    lw(scratch, MemOperand(li_location, kInstrSize));
-  }
-  // "scratch" now holds an ori instruction. Extract the immediate.
-  andi(scratch, scratch, kImm16Mask);
-
-  // Merge the results.
-  or_(value, value, scratch);
-}
-
-
-void MacroAssembler::CheckPageFlag(
-    Register object,
-    Register scratch,
-    int mask,
-    Condition cc,
-    Label* condition_met) {
-  And(scratch, object, Operand(~Page::kPageAlignmentMask));
-  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
-  And(scratch, scratch, Operand(mask));
-  Branch(condition_met, cc, scratch, Operand(zero_reg));
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
-                                 Register scratch0,
-                                 Register scratch1,
-                                 Label* on_black) {
-  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-}
-
-
-void MacroAssembler::HasColor(Register object,
-                              Register bitmap_scratch,
-                              Register mask_scratch,
-                              Label* has_color,
-                              int first_bit,
-                              int second_bit) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
-
-  GetMarkBits(object, bitmap_scratch, mask_scratch);
-
-  Label other_color, word_boundary;
-  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  And(t8, t9, Operand(mask_scratch));
-  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
-  // Shift left 1 by adding.
-  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
-  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
-  And(t8, t9, Operand(mask_scratch));
-  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
-  jmp(&other_color);
-
-  bind(&word_boundary);
-  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
-  And(t9, t9, Operand(1));
-  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
-  bind(&other_color);
-}
-
-
-// Detect some, but not all, common pointer-free objects.  This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(Register value,
-                                      Register scratch,
-                                      Label* not_data_object) {
-  ASSERT(!AreAliased(value, scratch, t8, no_reg));
-  Label is_data_object;
-  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
-  Branch(&is_data_object, eq, t8, Operand(scratch));
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
-  Branch(not_data_object, ne, t8, Operand(zero_reg));
-  bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
-                                 Register bitmap_reg,
-                                 Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
-  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
-  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
-  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
-  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
-  sll(t8, t8, kPointerSizeLog2);
-  Addu(bitmap_reg, bitmap_reg, t8);
-  li(t8, Operand(1));
-  sllv(mask_reg, t8, mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
-    Register value,
-    Register bitmap_scratch,
-    Register mask_scratch,
-    Register load_scratch,
-    Label* value_is_white_and_not_data) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
-  GetMarkBits(value, bitmap_scratch, mask_scratch);
-
-  // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  Label done;
-
-  // Since both black and grey have a 1 in the first position and white does
-  // not have a 1 there we only need to check one bit.
-  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  And(t8, mask_scratch, load_scratch);
-  Branch(&done, ne, t8, Operand(zero_reg));
-
-  if (emit_debug_code()) {
-    // Check for impossible bit pattern.
-    Label ok;
-    // sll may overflow, making the check conservative.
-    sll(t8, mask_scratch, 1);
-    And(t8, load_scratch, t8);
-    Branch(&ok, eq, t8, Operand(zero_reg));
-    stop("Impossible marking bit pattern");
-    bind(&ok);
-  }
-
-  // Value is white.  We check whether it is data that doesn't need scanning.
-  // Currently only checks for HeapNumber and non-cons strings.
-  Register map = load_scratch;  // Holds map while checking type.
-  Register length = load_scratch;  // Holds length of object after testing type.
-  Label is_data_object;
-
-  // Check for heap-number
-  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
-  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
-  {
-    Label skip;
-    Branch(&skip, ne, t8, Operand(map));
-    li(length, HeapNumber::kSize);
-    Branch(&is_data_object);
-    bind(&skip);
-  }
-
-  // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  Register instance_type = load_scratch;
-  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
-  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
-  // It's a non-indirect (non-cons and non-slice) string.
-  // If it's external, the length is just ExternalString::kSize.
-  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
-  // External strings are the only ones with the kExternalStringTag bit
-  // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
-  And(t8, instance_type, Operand(kExternalStringTag));
-  {
-    Label skip;
-    Branch(&skip, eq, t8, Operand(zero_reg));
-    li(length, ExternalString::kSize);
-    Branch(&is_data_object);
-    bind(&skip);
-  }
-
-  // Sequential string, either ASCII or UC16.
-  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
-  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
-  // getting the length multiplied by 2.
-  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  lw(t9, FieldMemOperand(value, String::kLengthOffset));
-  And(t8, instance_type, Operand(kStringEncodingMask));
-  {
-    Label skip;
-    Branch(&skip, eq, t8, Operand(zero_reg));
-    srl(t9, t9, 1);
-    bind(&skip);
-  }
-  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
-  And(length, length, Operand(~kObjectAlignmentMask));
-
-  bind(&is_data_object);
-  // Value is a data object, and it is white.  Mark it black.  Since we know
-  // that the object is white we can make it black by flipping one bit.
-  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  Or(t8, t8, Operand(mask_scratch));
-  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
-
-  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
-  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-  Addu(t8, t8, Operand(length));
-  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
-
-  bind(&done);
-}
-
-
 void MacroAssembler::LoadInstanceDescriptors(Register map,
                                              Register descriptors) {
   lw(descriptors,
      FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
   Label not_smi;
   JumpIfNotSmi(descriptors, &not_smi);
-  LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
+  li(descriptors, Operand(FACTORY->empty_descriptor_array()));
   bind(&not_smi);
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
-  Label next;
-  // Preload a couple of values used in the loop.
-  Register  empty_fixed_array_value = t2;
-  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
-  Register empty_descriptor_array_value = t3;
-  LoadRoot(empty_descriptor_array_value,
-           Heap::kEmptyDescriptorArrayRootIndex);
-  mov(a1, a0);
-  bind(&next);
-
-  // Check that there are no elements.  Register a1 contains the
-  // current JS object we've reached through the prototype chain.
-  lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
-  Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in a2 for the subsequent
-  // prototype load.
-  lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
-  lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
-  JumpIfSmi(a3, call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (a3).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
-  JumpIfSmi(a3, call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  Branch(&check_prototype, eq, a1, Operand(a0));
-  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
-
-  // Load the prototype from the map and loop if non-null.
-  bind(&check_prototype);
-  lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
-  Branch(&next, ne, a1, Operand(null_value));
-}
-
-
-void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
-  ASSERT(!output_reg.is(input_reg));
-  Label done;
-  li(output_reg, Operand(255));
-  // Normal branch: nop in delay slot.
-  Branch(&done, gt, input_reg, Operand(output_reg));
-  // Use delay slot in this branch.
-  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
-  mov(output_reg, zero_reg);  // In delay slot.
-  mov(output_reg, input_reg);  // Value is in range 0..255.
-  bind(&done);
-}
-
-
-void MacroAssembler::ClampDoubleToUint8(Register result_reg,
-                                        DoubleRegister input_reg,
-                                        DoubleRegister temp_double_reg) {
-  Label above_zero;
-  Label done;
-  Label in_bounds;
-
-  Move(temp_double_reg, 0.0);
-  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
-
-  // Double value is less than zero, NaN or Inf, return 0.
-  mov(result_reg, zero_reg);
-  Branch(&done);
-
-  // Double value is >= 255, return 255.
-  bind(&above_zero);
-  Move(temp_double_reg, 255.0);
-  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
-  li(result_reg, Operand(255));
-  Branch(&done);
-
-  // In 0-255 range, round and truncate.
-  bind(&in_bounds);
-  round_w_d(temp_double_reg, input_reg);
-  mfc1(result_reg, temp_double_reg);
-  bind(&done);
-}
-
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
-}
-
-
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index f57418f..c968ffc 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -50,6 +50,15 @@
 // trying to update gp register for position-independent-code. Whenever
 // MIPS generated code calls C code, it must be via t9 register.
 
+// Registers aliases
+// cp is assumed to be a callee saved register.
+const Register roots = s6;  // Roots array pointer.
+const Register cp = s7;     // JavaScript context pointer.
+const Register fp = s8_fp;  // Alias for fp.
+// Registers used for condition evaluation.
+const Register condReg1 = s4;
+const Register condReg2 = s5;
+
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -81,53 +90,6 @@
   PROTECT
 };
 
-// Flags used for the li macro-assembler function.
-enum LiFlags {
-  // If the constant value can be represented in just 16 bits, then
-  // optimize the li to use a single instruction, rather than lui/ori pair.
-  OPTIMIZE_SIZE = 0,
-  // Always use 2 instructions (lui/ori pair), even if the constant could
-  // be loaded with just one, so that this value is patchable later.
-  CONSTANT_SIZE = 1
-};
-
-
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
-
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-inline MemOperand ContextOperand(Register context, int index) {
-  return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-inline MemOperand GlobalObjectOperand()  {
-  return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-inline MemOperand FieldMemOperand(Register object, int offset) {
-  return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-inline MemOperand CFunctionArgumentOperand(int index) {
-  ASSERT(index > kCArgSlotCount);
-  // Argument 5 takes the slot just past the four Arg-slots.
-  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
-  return MemOperand(sp, offset);
-}
-
-
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -176,30 +138,23 @@
   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
-  static int CallSize(Register target, COND_ARGS);
+  int CallSize(Register target, COND_ARGS);
   void Call(Register target, COND_ARGS);
-  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
-  static int CallSize(Handle<Code> code,
-                      RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
-                      unsigned ast_id = kNoASTId,
-                      COND_ARGS);
+  int CallSize(Handle<Code> code,
+               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+               unsigned ast_id = kNoASTId,
+               COND_ARGS);
   void Call(Handle<Code> code,
             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
             unsigned ast_id = kNoASTId,
             COND_ARGS);
   void Ret(COND_ARGS);
-  inline void Ret(BranchDelaySlot bd, Condition cond = al,
-    Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
-    Ret(cond, rs, rt, bd);
+  inline void Ret(BranchDelaySlot bd) {
+    Ret(al, zero_reg, Operand(zero_reg), bd);
   }
 
-  void Branch(Label* L,
-              Condition cond,
-              Register rs,
-              Heap::RootListIndex index,
-              BranchDelaySlot bdslot = PROTECT);
-
 #undef COND_ARGS
 
   // Emit code to discard a non-negative number of pointer-sized elements
@@ -209,14 +164,10 @@
             Register reg = no_reg,
             const Operand& op = Operand(no_reg));
 
-  // Trivial case of DropAndRet that utilizes the delay slot and only emits
-  // 2 instructions.
-  void DropAndRet(int drop);
-
-  void DropAndRet(int drop,
-                  Condition cond,
-                  Register reg,
-                  const Operand& op);
+  void DropAndRet(int drop = 0,
+                  Condition cond = cc_always,
+                  Register reg = no_reg,
+                  const Operand& op = Operand(no_reg));
 
   // Swap two registers.  If the scratch register is omitted then a slightly
   // less efficient form using xor instead of mov is emitted.
@@ -246,15 +197,6 @@
     mtc1(src_high, FPURegister::from_code(dst.code() + 1));
   }
 
-  // Conditional move.
-  void Move(FPURegister dst, double imm);
-  void Movz(Register rd, Register rs, Register rt);
-  void Movn(Register rd, Register rs, Register rt);
-  void Movt(Register rd, Register rs, uint16_t cc = 0);
-  void Movf(Register rd, Register rs, uint16_t cc = 0);
-
-  void Clz(Register rd, Register rs);
-
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
   // CodeGenerator::ProcessDeferred().
@@ -278,137 +220,40 @@
                  Heap::RootListIndex index,
                  Condition cond, Register src1, const Operand& src2);
 
-  void LoadHeapObject(Register dst, Handle<HeapObject> object);
 
-  void LoadObject(Register result, Handle<Object> object) {
-    if (object->IsHeapObject()) {
-      LoadHeapObject(result, Handle<HeapObject>::cast(object));
-    } else {
-      li(result, object);
-    }
-  }
-
-  // ---------------------------------------------------------------------------
-  // GC Support
-
-  void IncrementalMarkingRecordWriteHelper(Register object,
-                                           Register value,
-                                           Register address);
-
-  enum RememberedSetFinalAction {
-    kReturnAtEnd,
-    kFallThroughAtEnd
-  };
+  // Check if object is in new space.
+  // scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,  // eq for new space, ne otherwise.
+                  Label* branch);
 
 
-  // Record in the remembered set the fact that we have a pointer to new space
-  // at the address pointed to by the addr register.  Only works if addr is not
-  // in new space.
-  void RememberedSetHelper(Register object,  // Used for debug code.
-                           Register addr,
-                           Register scratch,
-                           SaveFPRegsMode save_fp,
-                           RememberedSetFinalAction and_then);
+  // For the page containing |object| mark the region covering [address]
+  // dirty. The object address must be in the first 8K of an allocated page.
+  void RecordWriteHelper(Register object,
+                         Register address,
+                         Register scratch);
 
-  void CheckPageFlag(Register object,
-                     Register scratch,
-                     int mask,
-                     Condition cc,
-                     Label* condition_met);
-
-  // Check if object is in new space.  Jumps if the object is not in new space.
-  // The register scratch can be object itself, but it will be clobbered.
-  void JumpIfNotInNewSpace(Register object,
-                           Register scratch,
-                           Label* branch) {
-    InNewSpace(object, scratch, ne, branch);
-  }
-
-  // Check if object is in new space.  Jumps if the object is in new space.
-  // The register scratch can be object itself, but scratch will be clobbered.
-  void JumpIfInNewSpace(Register object,
-                        Register scratch,
-                        Label* branch) {
-    InNewSpace(object, scratch, eq, branch);
-  }
-
-  // Check if an object has a given incremental marking color.
-  void HasColor(Register object,
-                Register scratch0,
-                Register scratch1,
-                Label* has_color,
-                int first_bit,
-                int second_bit);
-
-  void JumpIfBlack(Register object,
+  // For the page containing |object| mark the region covering
+  // [object+offset] dirty. The object address must be in the first 8K
+  // of an allocated page.  The 'scratch' registers are used in the
+  // implementation and all 3 registers are clobbered by the
+  // operation, as well as the 'at' register. RecordWrite updates the
+  // write barrier even when storing smis.
+  void RecordWrite(Register object,
+                   Operand offset,
                    Register scratch0,
-                   Register scratch1,
-                   Label* on_black);
+                   Register scratch1);
 
-  // Checks the color of an object.  If the object is already grey or black
-  // then we just fall through, since it is already live.  If it is white and
-  // we can determine that it doesn't need to be scanned, then we just mark it
-  // black and fall through.  For the rest we jump to the label so the
-  // incremental marker can fix its assumptions.
-  void EnsureNotWhite(Register object,
-                      Register scratch1,
-                      Register scratch2,
-                      Register scratch3,
-                      Label* object_is_white_and_not_data);
-
-  // Detects conservatively whether an object is data-only, i.e. it does need to
-  // be scanned by the garbage collector.
-  void JumpIfDataObject(Register value,
-                        Register scratch,
-                        Label* not_data_object);
-
-  // Notify the garbage collector that we wrote a pointer into an object.
-  // |object| is the object being stored into, |value| is the object being
-  // stored.  value and scratch registers are clobbered by the operation.
-  // The offset is the offset from the start of the object, not the offset from
-  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
-  void RecordWriteField(
-      Register object,
-      int offset,
-      Register value,
-      Register scratch,
-      RAStatus ra_status,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
-
-  // As above, but the offset has the tag presubtracted.  For use with
-  // MemOperand(reg, off).
-  inline void RecordWriteContextSlot(
-      Register context,
-      int offset,
-      Register value,
-      Register scratch,
-      RAStatus ra_status,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK) {
-    RecordWriteField(context,
-                     offset + kHeapObjectTag,
-                     value,
-                     scratch,
-                     ra_status,
-                     save_fp,
-                     remembered_set_action,
-                     smi_check);
-  }
-
-  // For a given |object| notify the garbage collector that the slot |address|
-  // has been written.  |value| is the object being stored. The value and
-  // address registers are clobbered by the operation.
-  void RecordWrite(
-      Register object,
-      Register address,
-      Register value,
-      RAStatus ra_status,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
+  // For the page containing |object| mark the region covering
+  // [address] dirty. The object address must be in the first 8K of an
+  // allocated page.  All 3 registers are clobbered by the operation,
+  // as well as the ip register. RecordWrite updates the write barrier
+  // even when storing smis.
+  void RecordWrite(Register object,
+                   Register address,
+                   Register scratch);
 
 
   // ---------------------------------------------------------------------------
@@ -437,7 +282,7 @@
   }
 
   // Check if the given instruction is a 'type' marker.
-  // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+  // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
   // nop(type)). These instructions are generated to mark special location in
   // the code, like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
@@ -594,13 +439,12 @@
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
   // Load int32 in the rd register.
-  void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
-  inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
-    li(rd, Operand(j), mode);
+  void li(Register rd, Operand j, bool gen2instr = false);
+  inline void li(Register rd, int32_t j, bool gen2instr = false) {
+    li(rd, Operand(j), gen2instr);
   }
-  inline void li(Register dst, Handle<Object> value,
-                 LiFlags mode = OPTIMIZE_SIZE) {
-    li(dst, Operand(value), mode);
+  inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
+    li(dst, Operand(value), gen2instr);
   }
 
   // Push multiple registers on the stack.
@@ -674,14 +518,6 @@
     Addu(sp, sp, 2 * kPointerSize);
   }
 
-  // Pop three registers. Pops rightmost register first (from lower address).
-  void Pop(Register src1, Register src2, Register src3) {
-    lw(src3, MemOperand(sp, 0 * kPointerSize));
-    lw(src2, MemOperand(sp, 1 * kPointerSize));
-    lw(src1, MemOperand(sp, 2 * kPointerSize));
-    Addu(sp, sp, 3 * kPointerSize);
-  }
-
   void Pop(uint32_t count = 1) {
     Addu(sp, sp, Operand(count * kPointerSize));
   }
@@ -700,17 +536,10 @@
   // into register dst.
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
-  // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
-  // Does not handle errors.
-  void FlushICache(Register address, unsigned instructions);
-
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
-  // ---------------------------------------------------------------------------
-  // FPU macros. These do not handle special cases like NaN or +- inf.
-
   // Convert unsigned word to double.
   void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
@@ -719,28 +548,6 @@
   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
 
-  void Trunc_w_d(FPURegister fd, FPURegister fs);
-  void Round_w_d(FPURegister fd, FPURegister fs);
-  void Floor_w_d(FPURegister fd, FPURegister fs);
-  void Ceil_w_d(FPURegister fd, FPURegister fs);
-  // Wrapper function for the different cmp/branch types.
-  void BranchF(Label* target,
-               Label* nan,
-               Condition cc,
-               FPURegister cmp1,
-               FPURegister cmp2,
-               BranchDelaySlot bd = PROTECT);
-
-  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
-  inline void BranchF(BranchDelaySlot bd,
-                      Label* target,
-                      Label* nan,
-                      Condition cc,
-                      FPURegister cmp1,
-                      FPURegister cmp2) {
-    BranchF(target, nan, cc, cmp1, cmp2, bd);
-  };
-
   // Convert the HeapNumber pointed to by source to a 32bits signed integer
   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
   // to not_int32 label. If FPU is available double_scratch is used but not
@@ -752,18 +559,6 @@
                       FPURegister double_scratch,
                       Label *not_int32);
 
-  // Truncates a double using a specific rounding mode.
-  // The except_flag will contain any exceptions caused by the instruction.
-  // If check_inexact is kDontCheckForInexactConversion, then the inexacat
-  // exception is masked.
-  void EmitFPUTruncate(FPURoundingMode rounding_mode,
-                       FPURegister result,
-                       DoubleRegister double_input,
-                       Register scratch1,
-                       Register except_flag,
-                       CheckForInexactConversion check_inexact
-                           = kDontCheckForInexactConversion);
-
   // Helper for EmitECMATruncate.
   // This will truncate a floating-point value outside of the singed 32bit
   // integer range to a 32bit signed integer.
@@ -785,6 +580,15 @@
                         Register scratch2,
                         Register scratch3);
 
+  // -------------------------------------------------------------------------
+  // Activation frames.
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
   // Enter exit frame.
   // argc - argument count to be dropped by LeaveExitFrame.
   // save_doubles - saves FPU registers on stack, currently disabled.
@@ -793,9 +597,7 @@
                       int stack_space = 0);
 
   // Leave the current exit frame.
-  void LeaveExitFrame(bool save_doubles,
-                      Register arg_count,
-                      bool do_return = false);
+  void LeaveExitFrame(bool save_doubles, Register arg_count);
 
   // Get the actual activation frame alignment for target environment.
   static int ActivationFrameAlignment();
@@ -805,22 +607,6 @@
 
   void LoadContext(Register dst, int context_chain_length);
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the global context if the map in register
-  // map_in_out is the cached Array map in the global context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
-  // Load the initial map for new Arrays from a JSFunction.
-  void LoadInitialArrayMap(Register function_in,
-                           Register scratch,
-                           Register map_out);
-
   void LoadGlobalFunction(int index, Register function);
 
   // Load the initial map from the global function. The registers
@@ -829,16 +615,10 @@
                                     Register map,
                                     Register scratch);
 
-  void InitializeRootRegister() {
-    ExternalReference roots_array_start =
-        ExternalReference::roots_array_start(isolate());
-    li(kRootRegister, Operand(roots_array_start));
-  }
-
   // -------------------------------------------------------------------------
   // JavaScript invokes.
 
-  // Set up call kind marking in t1. The method takes t1 as an
+  // Setup call kind marking in t1. The method takes t1 as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
@@ -866,10 +646,9 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(Handle<JSFunction> function,
+  void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
-                      const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
 
@@ -893,23 +672,30 @@
   void DebugBreak();
 #endif
 
+  void InitializeRootRegister() {
+    ExternalReference roots_address =
+        ExternalReference::roots_address(isolate());
+    li(kRootRegister, Operand(roots_address));
+  }
 
   // -------------------------------------------------------------------------
   // Exception handling.
 
   // Push a new try handler and link into try handler chain.
-  void PushTryHandler(StackHandler::Kind kind, int handler_index);
+  // The return address must be passed in register ra.
+  // Clobber t0, t1, t2.
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   // Must preserve the result register.
   void PopTryHandler();
 
-  // Passes thrown value to the handler of top of the try handler chain.
+  // Passes thrown value (in v0) to the handler of top of the try handler chain.
   void Throw(Register value);
 
   // Propagates an uncatchable exception to the top of the current JS stack's
   // handler chain.
-  void ThrowUncatchable(Register value);
+  void ThrowUncatchable(UncatchableExceptionType type, Register value);
 
   // Copies a fixed number of fields of heap objects from src to dst.
   void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -922,13 +708,6 @@
                  Register length,
                  Register scratch);
 
-  // Initialize fields with filler values.  Fields starting at |start_offset|
-  // not including end_offset are overwritten with the value in |filler|.  At
-  // the end the loop, |start_offset| takes the value of |end_offset|.
-  void InitializeFieldsWithFiller(Register start_offset,
-                                  Register end_offset,
-                                  Register filler);
-
   // -------------------------------------------------------------------------
   // Support functions.
 
@@ -940,8 +719,7 @@
   void TryGetFunctionPrototype(Register function,
                                Register result,
                                Register scratch,
-                               Label* miss,
-                               bool miss_on_bound_function = false);
+                               Label* miss);
 
   void GetObjectType(Register function,
                      Register map,
@@ -953,55 +731,15 @@
                          Register scratch,
                          Label* fail);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Register scratch,
-                               Label* fail);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiOnlyElements(Register map,
-                                Register scratch,
-                                Label* fail);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements. Otherwise jump to fail, in which
-  // case scratch2, scratch3 and scratch4 are unmodified.
-  void StoreNumberToDoubleElements(Register value_reg,
-                                   Register key_reg,
-                                   Register receiver_reg,
-                                   Register elements_reg,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Register scratch3,
-                                   Register scratch4,
-                                   Label* fail);
-
-  // Compare an object's map with the specified map and its transitioned
-  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
-  // "branch_to" if the result of the comparison is "cond". If multiple map
-  // compares are required, the compare sequences branches to early_success.
-  void CompareMapAndBranch(Register obj,
-                           Register scratch,
-                           Handle<Map> map,
-                           Label* early_success,
-                           Condition cond,
-                           Label* branch_to,
-                           CompareMapMode mode = REQUIRE_EXACT_MAP);
-
-  // Check if the map of an object is equal to a specified map and branch to
-  // label if not. Skip the smi check if not required (object is known to be a
-  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
-  // against maps that are ElementsKind transition maps of the specificed map.
+  // Check if the map of an object is equal to a specified map (either
+  // given directly or as an index into the root list) and branch to
+  // label if not. Skip the smi check if not required (object is known
+  // to be a heap object).
   void CheckMap(Register obj,
                 Register scratch,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type,
-                CompareMapMode mode = REQUIRE_EXACT_MAP);
-
+                SmiCheckType smi_check_type);
 
   void CheckMap(Register obj,
                 Register scratch,
@@ -1022,21 +760,6 @@
   // occurred.
   void IllegalOperation(int num_arguments);
 
-
-  // Load and check the instance type of an object for being a string.
-  // Loads the type into the second argument register.
-  // Returns a condition that will be enabled if the object was a string.
-  Condition IsObjectStringType(Register obj,
-                               Register type,
-                               Register result) {
-    lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
-    lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
-    And(type, type, Operand(kIsNotStringMask));
-    ASSERT_EQ(0, kStringTag);
-    return eq;
-  }
-
-
   // Picks out an array index from the hash field.
   // Register use:
   //   hash - holds the index's hash. Clobbered.
@@ -1106,26 +829,31 @@
   // -------------------------------------------------------------------------
   // Runtime calls.
 
-  // See comments at the beginning of CEntryStub::Generate.
-  inline void PrepareCEntryArgs(int num_args) {
-    li(s0, num_args);
-    li(s1, (num_args - 1) * kPointerSize);
-  }
-
-  inline void PrepareCEntryFunction(const ExternalReference& ref) {
-    li(s2, Operand(ref));
-  }
-
   // Call a code stub.
-  void CallStub(CodeStub* stub,
-                Condition cond = cc_always,
-                Register r1 = zero_reg,
-                const Operand& r2 = Operand(zero_reg),
-                BranchDelaySlot bd = PROTECT);
+  void CallStub(CodeStub* stub, Condition cond = cc_always,
+                Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub,
+                                           Condition cond = cc_always,
+                                           Register r1 = zero_reg,
+                                           const Operand& r2 =
+                                               Operand(zero_reg));
 
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
+                                               Condition cond = cc_always,
+                                               Register r1 = zero_reg,
+                                               const Operand& r2 =
+                                                   Operand(zero_reg));
+
   void CallJSExitStub(CodeStub* stub);
 
   // Call a runtime routine.
@@ -1137,8 +865,7 @@
 
   // Convenience function: call an external reference.
   void CallExternalReference(const ExternalReference& ext,
-                             int num_arguments,
-                             BranchDelaySlot bd = PROTECT);
+                             int num_arguments);
 
   // Tail call of a runtime routine (jump).
   // Like JumpToExternalReference, but also takes care of passing the number
@@ -1147,14 +874,17 @@
                                  int num_arguments,
                                  int result_size);
 
+  // Tail call of a runtime routine (jump). Try to generate the code if
+  // necessary. Do not perform a GC but instead return a retry after GC
+  // failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
-  int CalculateStackPassedWords(int num_reg_arguments,
-                                int num_double_arguments);
-
   // Before calling a C-function from generated code, align arguments on stack
   // and add space for the four mips argument slots.
   // After aligning the frame, non-register arguments must be stored on the
@@ -1164,11 +894,7 @@
   // C++ code.
   // Needs a scratch register to do some arithmetic. This register will be
   // trashed.
-  void PrepareCallCFunction(int num_reg_arguments,
-                            int num_double_registers,
-                            Register scratch);
-  void PrepareCallCFunction(int num_reg_arguments,
-                            Register scratch);
+  void PrepareCallCFunction(int num_arguments, Register scratch);
 
   // Arguments 1-4 are placed in registers a0 thru a3 respectively.
   // Arguments 5..n are stored to stack using following:
@@ -1180,13 +906,7 @@
   // return address (unless this is somehow accounted for by the called
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
-  void CallCFunction(Register function, int num_arguments);
-  void CallCFunction(ExternalReference function,
-                     int num_reg_arguments,
-                     int num_double_arguments);
-  void CallCFunction(Register function,
-                     int num_reg_arguments,
-                     int num_double_arguments);
+  void CallCFunction(Register function, Register scratch, int num_arguments);
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
   // There are two ways of passing double arguments on MIPS, depending on
@@ -1197,15 +917,15 @@
   void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
   void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
 
-  // Calls an API function.  Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions.  Restores context.  stack_space
-  // - space to be unwound on exit (includes the call JS arguments space and
-  // the additional space allocated for the fast call).
-  void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
+  // Calls an API function. Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions. Restores context.
+  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
+                                           int stack_space);
 
   // Jump to the builtin routine.
-  void JumpToExternalReference(const ExternalReference& builtin,
-                               BranchDelaySlot bd = PROTECT);
+  void JumpToExternalReference(const ExternalReference& builtin);
+
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
 
   // Invoke specified builtin JavaScript function. Adds an entry to
   // the unresolved list if the name does not resolve.
@@ -1262,9 +982,6 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
-  void set_has_frame(bool value) { has_frame_ = value; }
-  bool has_frame() { return has_frame_; }
-  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // Number utilities.
@@ -1280,14 +997,18 @@
   // -------------------------------------------------------------------------
   // Smi utilities.
 
+  // Try to convert int32 to smi. If the value is to large, preserve
+  // the original value and jump to not_a_smi. Destroys scratch and
+  // sets flags.
+  // This is only used by crankshaft atm so it is unimplemented on MIPS.
+  void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
+    UNIMPLEMENTED_MIPS();
+  }
+
   void SmiTag(Register reg) {
     Addu(reg, reg, reg);
   }
 
-  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
-  void SmiTagCheckOverflow(Register reg, Register overflow);
-  void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
-
   void SmiTag(Register dst, Register src) {
     Addu(dst, src, src);
   }
@@ -1300,25 +1021,21 @@
     sra(dst, src, kSmiTagSize);
   }
 
-  // Untag the source value into destination and jump if source is a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
-
-  // Untag the source value into destination and jump if source is not a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
   // Jump the register contains a smi.
-  void JumpIfSmi(Register value,
-                 Label* smi_label,
-                 Register scratch = at,
-                 BranchDelaySlot bd = PROTECT);
+  inline void JumpIfSmi(Register value, Label* smi_label,
+                        Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(smi_label, eq, scratch, Operand(zero_reg));
+  }
 
   // Jump if the register contains a non-smi.
-  void JumpIfNotSmi(Register value,
-                    Label* not_smi_label,
-                    Register scratch = at,
-                    BranchDelaySlot bd = PROTECT);
+  inline void JumpIfNotSmi(Register value, Label* not_smi_label,
+                           Register scratch = at) {
+    ASSERT_EQ(0, kSmiTag);
+    andi(scratch, value, kSmiTagMask);
+    Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+  }
 
   // Jump if either of the registers contain a non-smi.
   void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
@@ -1379,37 +1096,13 @@
                                            Register scratch2,
                                            Label* failure);
 
-  void ClampUint8(Register output_reg, Register input_reg);
-
-  void ClampDoubleToUint8(Register result_reg,
-                          DoubleRegister input_reg,
-                          DoubleRegister temp_double_reg);
-
-
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
-
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
-  // Patch the relocated value (lui/ori pair).
-  void PatchRelocatedValue(Register li_location,
-                           Register scratch,
-                           Register new_value);
-  // Get the relocatad value (loaded data) from the lui/ori pair.
-  void GetRelocatedValue(Register li_location,
-                         Register value,
-                         Register scratch);
-
-  // Expects object in a0 and returns map with validated enum cache
-  // in a0.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value, Label* call_runtime);
-
  private:
   void CallCFunctionHelper(Register function,
-                           int num_reg_arguments,
-                           int num_double_arguments);
+                           ExternalReference function_reference,
+                           Register scratch,
+                           int num_arguments);
 
   void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
   void BranchShort(int16_t offset, Condition cond, Register rs,
@@ -1437,7 +1130,6 @@
                       Handle<Code> code_constant,
                       Register code_reg,
                       Label* done,
-                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
@@ -1446,37 +1138,25 @@
   // the function in the 'resolved' flag.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
                            Register scratch1,
                            Register scratch2);
 
-  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cond,  // eq for new space, ne otherwise.
-                  Label* branch);
-
-  // Helper for finding the mark bits for an address.  Afterwards, the
-  // bitmap register points at the word with the mark bits and the mask
-  // the position of the first bit.  Leaves addr_reg unchanged.
-  inline void GetMarkBits(Register addr_reg,
-                          Register bitmap_reg,
-                          Register mask_reg);
-
-  // Helper for throwing exceptions.  Compute a handler address and jump to
-  // it.  See the implementation for register usage.
-  void JumpToHandlerEntry();
-
   // Compute memory operands for safepoint stack slots.
   static int SafepointRegisterStackIndex(int reg_code);
   MemOperand SafepointRegisterSlot(Register reg);
   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
 
+  bool UseAbsoluteCodePointers();
+
   bool generating_stub_;
   bool allow_stub_calls_;
-  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -1517,6 +1197,34 @@
 };
 
 
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+static MemOperand ContextOperand(Register context, int index) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand()  {
+  return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+  ASSERT(index > kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index ae4da93..63e836f 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -377,16 +377,13 @@
     // Isolate.
     __ li(a3, Operand(ExternalReference::isolate_address()));
 
-    {
-      AllowExternalCallThatCantCauseGC scope(masm_);
-      ExternalReference function =
-          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-      __ CallCFunction(function, argument_count);
-    }
+    ExternalReference function =
+        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+    __ CallCFunction(function, argument_count);
 
     // Restore regexp engine registers.
     __ MultiPop(regexp_registers_to_retain);
-    __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
     __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
 
     // Check if function returned non-zero for success or zero for failure.
@@ -610,12 +607,6 @@
 
     // Entry code:
     __ bind(&entry_label_);
-
-    // Tell the system that we have a stack frame.  Because the type is MANUAL,
-    // no is generated.
-    FrameScope scope(masm_, StackFrame::MANUAL);
-
-    // Actually emit code to start a new stack frame.
     // Push arguments
     // Save callee-save registers.
     // Start new stack frame.
@@ -678,7 +669,7 @@
     // string, and store that value in a local variable.
     __ mov(t5, a1);
     __ li(a1, Operand(1));
-    __ Movn(a1, zero_reg, t5);
+    __ movn(a1, zero_reg, t5);
     __ sw(a1, MemOperand(frame_pointer(), kAtStart));
 
     if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
@@ -698,7 +689,7 @@
     // Initialize backtrack stack pointer.
     __ lw(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
     // Initialize code pointer register
-    __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+    __ li(code_pointer(), Operand(masm_->CodeObject()));
     // Load previous char as initial value of current character register.
     Label at_start;
     __ lw(a0, MemOperand(frame_pointer(), kAtStart));
@@ -783,7 +774,7 @@
 
       // String might have moved: Reload end of string from frame.
       __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
-      __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
       SafeReturn();
     }
 
@@ -813,7 +804,7 @@
       // Otherwise use return value as new stack pointer.
       __ mov(backtrack_stackpointer(), v0);
       // Restore saved registers and continue.
-      __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+      __ li(code_pointer(), Operand(masm_->CodeObject()));
       __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
       SafeReturn();
     }
@@ -1010,7 +1001,7 @@
   __ PrepareCallCFunction(num_arguments, scratch);
   __ mov(a2, frame_pointer());
   // Code* of self.
-  __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
+  __ li(a1, Operand(masm_->CodeObject()));
   // a0 becomes return address pointer.
   ExternalReference stack_guard_check =
       ExternalReference::re_check_stack_guard_state(masm_->isolate());
@@ -1056,7 +1047,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+  MaybeObject* result = Execution::HandleStackGuardInterrupt();
 
   if (*code_handle != re_code) {  // Return address no longer valid.
     int delta = code_handle->address() - re_code->address();
@@ -1112,11 +1103,6 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
-  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
-    // Subject string might have been a ConsString that underwent
-    // short-circuiting during GC. That will not change start_address but
-    // will change pointer inside the subject handle.
-    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
@@ -1229,7 +1215,7 @@
   if (OS::ActivationFrameAlignment() != 0) {
     __ lw(sp, MemOperand(sp, 16));
   }
-  __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+  __ li(code_pointer(), Operand(masm_->CodeObject()));
 }
 
 
@@ -1258,14 +1244,13 @@
   if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
   // Stack is already aligned for call, so decrement by alignment
   // to make room for storing the return address.
-  __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
-  const int return_address_offset = kCArgsSlotsSize;
-  __ Addu(a0, sp, return_address_offset);
-  __ sw(ra, MemOperand(a0, 0));
+  __ Subu(sp, sp, Operand(stack_alignment));
+  __ sw(ra, MemOperand(sp, 0));
+  __ mov(a0, sp);
   __ mov(t9, t1);
   __ Call(t9);
-  __ lw(ra, MemOperand(sp, return_address_offset));
-  __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
+  __ lw(ra, MemOperand(sp, 0));
+  __ Addu(sp, sp, Operand(stack_alignment));
   __ Jump(ra);
 }
 
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index f31ce7e..17c1897 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -72,7 +72,7 @@
 // code.
 class MipsDebugger {
  public:
-  explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+  explicit MipsDebugger(Simulator* sim);
   ~MipsDebugger();
 
   void Stop(Instruction* instr);
@@ -105,6 +105,10 @@
   void RedoBreakpoints();
 };
 
+MipsDebugger::MipsDebugger(Simulator* sim) {
+  sim_ = sim;
+}
+
 
 MipsDebugger::~MipsDebugger() {
 }
@@ -387,13 +391,6 @@
     if (line == NULL) {
       break;
     } else {
-      char* last_input = sim_->last_debugger_input();
-      if (strcmp(line, "\n") == 0 && last_input != NULL) {
-        line = last_input;
-      } else {
-        // Ownership is transferred to sim_;
-        sim_->set_last_debugger_input(line);
-      }
       // Use sscanf to parse the individual parts of the command line. At the
       // moment no command expects more than two parameters.
       int argc = SScanF(line,
@@ -760,6 +757,7 @@
         PrintF("Unknown command: %s\n", cmd);
       }
     }
+    DeleteArray(line);
   }
 
   // Add all the breakpoints back to stop execution and enter the debugger
@@ -793,12 +791,6 @@
 }
 
 
-void Simulator::set_last_debugger_input(char* input) {
-  DeleteArray(last_debugger_input_);
-  last_debugger_input_ = input;
-}
-
-
 void Simulator::FlushICache(v8::internal::HashMap* i_cache,
                             void* start_addr,
                             size_t size) {
@@ -888,7 +880,7 @@
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
-  // Set up simulator support first. Some of this information is needed to
+  // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
   stack_ = reinterpret_cast<char*>(malloc(stack_size_));
   pc_modified_ = false;
@@ -897,7 +889,7 @@
   break_pc_ = NULL;
   break_instr_ = 0;
 
-  // Set up architecture state.
+  // Setup architecture state.
   // All registers are initialized to zero to start with.
   for (int i = 0; i < kNumSimuRegisters; i++) {
     registers_[i] = 0;
@@ -919,8 +911,6 @@
   for (int i = 0; i < kNumExceptions; i++) {
     exceptions[i] = 0;
   }
-
-  last_debugger_input_ = NULL;
 }
 
 
@@ -1369,9 +1359,9 @@
 
 // Returns the limit of the stack area to enable checking for stack overflows.
 uintptr_t Simulator::StackLimit() const {
-  // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+  // Leave a safety margin of 256 bytes to prevent overrunning the stack when
   // pushing values.
-  return reinterpret_cast<uintptr_t>(stack_) + 1024;
+  return reinterpret_cast<uintptr_t>(stack_) + 256;
 }
 
 
@@ -1944,7 +1934,7 @@
   // Next pc
   int32_t next_pc = 0;
 
-  // Set up the variables if needed before executing the instruction.
+  // Setup the variables if needed before executing the instruction.
   ConfigureTypeRegister(instr,
                         alu_out,
                         i64hilo,
@@ -2291,7 +2281,7 @@
 }
 
 
-// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq).
 void Simulator::DecodeTypeImmediate(Instruction* instr) {
   // Instruction fields.
   Opcode   op     = instr->OpcodeFieldRaw();
@@ -2614,7 +2604,7 @@
 }
 
 
-// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
+// Type 3: instructions using a 26 bytes immediate. (eg: j, jal).
 void Simulator::DecodeTypeJump(Instruction* instr) {
   // Get current pc.
   int32_t current_pc = get_pc();
@@ -2711,7 +2701,7 @@
 int32_t Simulator::Call(byte* entry, int argument_count, ...) {
   va_list parameters;
   va_start(parameters, argument_count);
-  // Set up arguments.
+  // Setup arguments.
 
   // First four arguments passed in registers.
   ASSERT(argument_count >= 4);
@@ -2758,7 +2748,7 @@
   int32_t sp_val = get_register(sp);
   int32_t fp_val = get_register(fp);
 
-  // Set up the callee-saved registers with a known value. To be able to check
+  // Setup the callee-saved registers with a known value. To be able to check
   // that they are preserved properly across JS execution.
   int32_t callee_saved_value = icount_;
   set_register(s0, callee_saved_value);
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 1e72939..69dddfa 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -221,10 +221,6 @@
   // Pop an address from the JS stack.
   uintptr_t PopAddress();
 
-  // Debugger input.
-  void set_last_debugger_input(char* input);
-  char* last_debugger_input() { return last_debugger_input_; }
-
   // ICache checking.
   static void FlushICache(v8::internal::HashMap* i_cache, void* start,
                           size_t size);
@@ -309,14 +305,6 @@
   void InstructionDecode(Instruction* instr);
   // Execute one instruction placed in a branch delay slot.
   void BranchDelayInstructionDecode(Instruction* instr) {
-    if (instr->InstructionBits() == nopInstr) {
-      // Short-cut generic nop instructions. They are always valid and they
-      // never change the simulator state.
-      set_register(pc, reinterpret_cast<int32_t>(instr) +
-                       Instruction::kInstrSize);
-      return;
-    }
-
     if (instr->IsForbiddenInBranchDelay()) {
       V8_Fatal(__FILE__, __LINE__,
                "Eror:Unexpected %i opcode in a branch delay slot.",
@@ -370,9 +358,6 @@
   int icount_;
   int break_count_;
 
-  // Debugger input.
-  char* last_debugger_input_;
-
   // Icache simulation.
   v8::internal::HashMap* i_cache_;
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 294bc0a..5b94973 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,74 +43,51 @@
                        MacroAssembler* masm,
                        Code::Flags flags,
                        StubCache::Table table,
-                       Register receiver,
                        Register name,
-                       // Number of the cache entry, not scaled.
                        Register offset,
                        Register scratch,
-                       Register scratch2,
-                       Register offset_scratch) {
+                       Register scratch2) {
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
   ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
 
   uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
   uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
 
   // Check the relative positions of the address fields.
   ASSERT(value_off_addr > key_off_addr);
   ASSERT((value_off_addr - key_off_addr) % 4 == 0);
   ASSERT((value_off_addr - key_off_addr) < (256 * 4));
-  ASSERT(map_off_addr > key_off_addr);
-  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
-  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
 
   Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ sll(offset_scratch, offset, 1);
-  __ Addu(offset_scratch, offset_scratch, offset);
-
-  // Calculate the base address of the entry.
-  __ li(base_addr, Operand(key_offset));
-  __ sll(at, offset_scratch, kPointerSizeLog2);
-  __ Addu(base_addr, base_addr, at);
+  Register offsets_base_addr = scratch;
 
   // Check that the key in the entry matches the name.
-  __ lw(at, MemOperand(base_addr, 0));
-  __ Branch(&miss, ne, name, Operand(at));
-
-  // Check the map matches.
-  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, at, Operand(scratch2));
+  __ li(offsets_base_addr, Operand(key_offset));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
+  __ Branch(&miss, ne, name, Operand(scratch2));
 
   // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+  __ Addu(offsets_base_addr, offsets_base_addr,
+         Operand(value_off_addr - key_off_addr));
+  __ sll(scratch2, offset, 1);
+  __ addu(scratch2, offsets_base_addr, scratch2);
+  __ lw(scratch2, MemOperand(scratch2));
 
   // Check that the flags match what we're looking for.
-  Register flags_reg = base_addr;
-  base_addr = no_reg;
-  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
-  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
-  __ Branch(&miss, ne, flags_reg, Operand(flags));
+  __ lw(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
+  __ And(scratch2, scratch2, Operand(~Code::kFlagsNotUsedInLookup));
+  __ Branch(&miss, ne, scratch2, Operand(flags));
 
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
+  // Re-load code entry from cache.
+  __ sll(offset, offset, 1);
+  __ addu(offset, offset, offsets_base_addr);
+  __ lw(offset, MemOperand(offset));
 
   // Jump to the first instruction in the code stub.
-  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
+  __ Addu(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(offset);
 
   // Miss: fall through.
   __ bind(&miss);
@@ -122,12 +99,13 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             Handle<String> name,
-                                             Register scratch0,
-                                             Register scratch1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register scratch0,
+    Register scratch1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
@@ -142,8 +120,9 @@
   Register map = scratch1;
   __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
-  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
+  __ And(at, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, at, Operand(zero_reg));
+
 
   // Check that receiver is a JSObject.
   __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
@@ -161,16 +140,20 @@
   // Restore the temporarily used register.
   __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      receiver,
+      properties,
+      name,
+      scratch1);
+  if (result->IsFailure()) return result;
 
-  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                     miss_label,
-                                                     &done,
-                                                     receiver,
-                                                     properties,
-                                                     name,
-                                                     scratch1);
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  return result;
 }
 
 
@@ -180,14 +163,13 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2,
-                              Register extra3) {
+                              Register extra2) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  ASSERT(sizeof(Entry) == 12);
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 8.
+  ASSERT(sizeof(Entry) == 8);
 
   // Make sure the flags does not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -203,66 +185,39 @@
   ASSERT(!extra2.is(scratch));
   ASSERT(!extra2.is(extra));
 
-  // Check register validity.
+  // Check scratch, extra and extra2 registers are valid.
   ASSERT(!scratch.is(no_reg));
   ASSERT(!extra.is(no_reg));
   ASSERT(!extra2.is(no_reg));
-  ASSERT(!extra3.is(no_reg));
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
-                      extra2, extra3);
 
   // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
+  __ JumpIfSmi(receiver, &miss, t0);
 
   // Get the map of the receiver and compute the hash.
   __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset));
-  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Addu(scratch, scratch, at);
-  uint32_t mask = kPrimaryTableSize - 1;
-  // We shift out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.
-  __ srl(scratch, scratch, kHeapObjectTagSize);
-  __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
-  __ And(scratch, scratch, Operand(mask));
+  __ lw(t8, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Addu(scratch, scratch, Operand(t8));
+  __ Xor(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the primary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kPrimary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
+  ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
 
   // Primary miss: Compute hash for secondary probe.
-  __ srl(at, name, kHeapObjectTagSize);
-  __ Subu(scratch, scratch, at);
-  uint32_t mask2 = kSecondaryTableSize - 1;
-  __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
-  __ And(scratch, scratch, Operand(mask2));
+  __ Subu(scratch, scratch, Operand(name));
+  __ Addu(scratch, scratch, Operand(flags));
+  __ And(scratch,
+         scratch,
+         Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the secondary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kSecondary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
+  ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
-                      extra2, extra3);
 }
 
 
@@ -285,10 +240,7 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ lw(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -296,8 +248,8 @@
   __ li(at, isolate->global());
   __ Branch(miss, ne, prototype, Operand(at));
   // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->global_context()->get(index)));
+  JSFunction* function =
+      JSFunction::cast(isolate->global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
   __ li(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -309,10 +261,8 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            Handle<JSObject> holder,
-                                            int index) {
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -333,7 +283,8 @@
                                            Register scratch,
                                            Label* miss_label) {
   // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss_label);
+  __ And(scratch, receiver, Operand(kSmiTagMask));
+  __ Branch(miss_label, eq, scratch, Operand(zero_reg));
 
   // Check that the object is a JS array.
   __ GetObjectType(receiver, scratch, scratch);
@@ -419,18 +370,22 @@
 // After executing generated code, the receiver_reg and name_reg
 // may be clobbered.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Handle<JSObject> object,
+                                      JSObject* object,
                                       int index,
-                                      Handle<Map> transition,
+                                      Map* transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
   // a0 : value.
   Label exit;
-  // Check that the map of the object hasn't changed.
-  __ CheckMap(receiver_reg, scratch, Handle<Map>(object->map()), miss_label,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label, scratch);
+
+  // Check that the map of the receiver hasn't changed.
+  __ lw(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+  __ Branch(miss_label, ne, scratch, Operand(Handle<Map>(object->map())));
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -442,11 +397,11 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ push(receiver_reg);
-    __ li(a2, Operand(transition));
+    __ li(a2, Operand(Handle<Map>(transition)));
     __ Push(a2, a0);
     __ TailCallExternalReference(
            ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -455,10 +410,10 @@
     return;
   }
 
-  if (!transition.is_null()) {
+  if (transition != NULL) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ li(t0, Operand(transition));
+    __ li(t0, Operand(Handle<Map>(transition)));
     __ sw(t0, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
   }
 
@@ -477,13 +432,7 @@
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
-    __ mov(name_reg, a0);
-    __ RecordWriteField(receiver_reg,
-                        offset,
-                        name_reg,
-                        scratch,
-                        kRAHasNotBeenSaved,
-                        kDontSaveFPRegs);
+    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -496,13 +445,7 @@
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
-    __ mov(name_reg, a0);
-    __ RecordWriteField(scratch,
-                        offset,
-                        name_reg,
-                        receiver_reg,
-                        kRAHasNotBeenSaved,
-                        kDontSaveFPRegs);
+    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
   }
 
   // Return the value (register v0).
@@ -514,15 +457,20 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Handle<Code> code = (kind == Code::LOAD_IC)
-      ? masm->isolate()->builtins()->LoadIC_Miss()
-      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
-  __ Jump(code, RelocInfo::CODE_TARGET);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+  } else {
+    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
 static void GenerateCallFunction(MacroAssembler* masm,
-                                 Handle<Object> object,
+                                 Object* object,
                                  const ParameterCount& arguments,
                                  Label* miss,
                                  Code::ExtraICState extra_ic_state) {
@@ -554,31 +502,30 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     Handle<JSObject> holder_obj) {
+                                     JSObject* holder_obj) {
   __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
   Register scratch = name;
-  __ li(scratch, Operand(interceptor));
+  __ li(scratch, Operand(Handle<Object>(interceptor)));
   __ Push(scratch, receiver, holder);
   __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
   __ push(scratch);
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Register name,
+                                                   JSObject* holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
       ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
           masm->isolate());
-  __ PrepareCEntryArgs(5);
-  __ PrepareCEntryFunction(ref);
+  __ li(a0, Operand(5));
+  __ li(a1, Operand(ref));
 
   CEntryStub stub(1);
   __ CallStub(&stub);
@@ -607,34 +554,34 @@
 }
 
 
-static void GenerateFastApiDirectCall(MacroAssembler* masm,
+static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
                                       int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
-  //  -- sp[4]              : callee JS function
+  //  -- sp[4]              : callee js function
   //  -- sp[8]              : call data
-  //  -- sp[12]             : last JS argument
+  //  -- sp[12]             : last js argument
   //  -- ...
-  //  -- sp[(argc + 3) * 4] : first JS argument
+  //  -- sp[(argc + 3) * 4] : first js argument
   //  -- sp[(argc + 4) * 4] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  Handle<JSFunction> function = optimization.constant_function();
-  __ LoadHeapObject(t1, function);
+  JSFunction* function = optimization.constant_function();
+  __ li(t1, Operand(Handle<JSFunction>(function)));
   __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
 
   // Pass the additional arguments FastHandleApiCall expects.
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data(api_call_info->data());
-  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
-    __ li(a0, api_call_info);
+  Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+  if (masm->isolate()->heap()->InNewSpace(call_data)) {
+    __ li(a0, api_call_info_handle);
     __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
   } else {
-    __ li(t2, call_data);
+    __ li(t2, Operand(Handle<Object>(call_data)));
   }
 
-  // Store JS function and call data.
+  // Store js function and call data.
   __ sw(t1, MemOperand(sp, 1 * kPointerSize));
   __ sw(t2, MemOperand(sp, 2 * kPointerSize));
 
@@ -642,9 +589,12 @@
   // (refer to layout above).
   __ Addu(a2, sp, Operand(2 * kPointerSize));
 
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
+
   const int kApiStackSpace = 4;
 
-  FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -667,15 +617,16 @@
   // v8::Arguments::is_construct_call = 0
   __ sw(zero_reg, MemOperand(a1, 3 * kPointerSize));
 
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated). Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
   const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  ApiFunction fun(function_address);
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_API_CALL,
                         masm->isolate());
-  AllowExternalCallThatCantCauseGC scope(masm);
-  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
 class CallInterceptorCompiler BASE_EMBEDDED {
@@ -689,63 +640,86 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  void Compile(MacroAssembler* masm,
-               Handle<JSObject> object,
-               Handle<JSObject> holder,
-               Handle<String> name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
+  MaybeObject* Compile(MacroAssembler* masm,
+                       JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
     // Check that the receiver isn't a smi.
     __ JumpIfSmi(receiver, miss);
+
     CallOptimization optimization(lookup);
+
     if (optimization.is_constant_call()) {
-      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
-                       holder, lookup, name, optimization, miss);
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
     } else {
-      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
-                     name, holder, miss);
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     scratch3,
+                     name,
+                     holder,
+                     miss);
+      return masm->isolate()->heap()->undefined_value();
     }
   }
 
  private:
-  void CompileCacheable(MacroAssembler* masm,
-                        Handle<JSObject> object,
-                        Register receiver,
-                        Register scratch1,
-                        Register scratch2,
-                        Register scratch3,
-                        Handle<JSObject> interceptor_holder,
-                        LookupResult* lookup,
-                        Handle<String> name,
-                        const CallOptimization& optimization,
-                        Label* miss_label) {
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
+
     Counters* counters = masm->isolate()->counters();
+
     int depth1 = kInvalidProtoDepth;
     int depth2 = kInvalidProtoDepth;
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
-          !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(
-          object, interceptor_holder);
+        !lookup->holder()->IsGlobalObject()) {
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(
-            interceptor_holder, Handle<JSObject>(lookup->holder()));
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
       }
-      can_do_fast_api_call =
-          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
     }
 
     __ IncrementCounter(counters->call_const_interceptor(), 1,
-                        scratch1, scratch2);
+                      scratch1, scratch2);
 
     if (can_do_fast_api_call) {
       __ IncrementCounter(counters->call_const_interceptor_fast_api(), 1,
@@ -758,9 +732,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, depth1, miss);
+      stub_compiler_->CheckPrototypes(object, receiver,
+                                      interceptor_holder, scratch1,
+                                      scratch2, scratch3, name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -773,11 +747,10 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      Handle<JSObject>(lookup->holder()),
-                                      scratch1, scratch2, scratch3,
-                                      name, depth2, miss);
+                                      lookup->holder(), scratch1,
+                                      scratch2, scratch3, name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -788,13 +761,16 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      GenerateFastApiDirectCall(masm, optimization, arguments_.immediate());
+      MaybeObject* result = GenerateFastApiDirectCall(masm,
+                                                      optimization,
+                                                      arguments_.immediate());
+      if (result->IsFailure()) return result;
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
           : CALL_AS_METHOD;
       __ InvokeFunction(optimization.constant_function(), arguments_,
-                        JUMP_FUNCTION, NullCallWrapper(), call_kind);
+                        JUMP_FUNCTION, call_kind);
     }
 
     // Deferred code for fast API call case---clean preallocated space.
@@ -809,57 +785,66 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
     }
+
+    return masm->isolate()->heap()->undefined_value();
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      Handle<JSObject> object,
+                      JSObject* object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      Handle<String> name,
-                      Handle<JSObject> interceptor_holder,
+                      String* name,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, miss_label);
+                                        scratch1, scratch2, scratch3, name,
+                                        miss_label);
 
     // Call a runtime function to load the interceptor property.
-    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ EnterInternalFrame();
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
               IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
               masm->isolate()),
           5);
+
     // Restore the name_ register.
     __ pop(name_);
-    // Leave the internal frame.
+    __ LeaveInternalFrame();
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           Handle<JSObject> holder_obj,
+                           JSObject* holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
+    __ EnterInternalFrame();
 
-      __ Push(holder, name_);
-      CompileCallLoadPropertyWithInterceptor(masm,
-                                             receiver,
-                                             holder,
-                                             name_,
-                                             holder_obj);
-      __ pop(name_);  // Restore the name.
-      __ pop(receiver);  // Restore the holder.
-    }
+    __ Push(holder, name_);
+
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
+
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
+
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
     __ Branch(interceptor_succeeded, ne, v0, Operand(scratch));
@@ -876,41 +861,52 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
-                                      Handle<GlobalObject> global,
-                                      Handle<String> name,
-                                      Register scratch,
-                                      Label* miss) {
-  Handle<JSGlobalPropertyCell> cell =
-      GlobalObject::EnsurePropertyCell(global, name);
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+    MacroAssembler* masm,
+    GlobalObject* global,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  Object* probe;
+  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
   ASSERT(cell->value()->IsTheHole());
-  __ li(scratch, Operand(cell));
+  __ li(scratch, Operand(Handle<Object>(cell)));
   __ lw(scratch,
         FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
   __ Branch(miss, ne, scratch, Operand(at));
+  return cell;
 }
 
 
 // Calls GenerateCheckPropertyCell for each global object in the prototype chain
 // from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
-                                       Handle<JSObject> object,
-                                       Handle<JSObject> holder,
-                                       Handle<String> name,
-                                       Register scratch,
-                                       Label* miss) {
-  Handle<JSObject> current = object;
-  while (!current.is_identical_to(holder)) {
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCells(
+    MacroAssembler* masm,
+    JSObject* object,
+    JSObject* holder,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
     if (current->IsGlobalObject()) {
-      GenerateCheckPropertyCell(masm,
-                                Handle<GlobalObject>::cast(current),
-                                name,
-                                scratch,
-                                miss);
+      // Returns a cell or a failure.
+      MaybeObject* result = GenerateCheckPropertyCell(
+          masm,
+          GlobalObject::cast(current),
+          name,
+          scratch,
+          miss);
+      if (result->IsFailure()) return result;
     }
-    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
   }
+  return NULL;
 }
 
 
@@ -943,7 +939,7 @@
     __ And(fval, ival, Operand(kBinary32SignMask));
     // Negate value if it is negative.
     __ subu(scratch1, zero_reg, ival);
-    __ Movn(ival, scratch1, fval);
+    __ movn(ival, scratch1, fval);
 
     // We have -1, 0 or 1, which we treat specially. Register ival contains
     // absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -957,14 +953,14 @@
     __ Xor(scratch1, ival, Operand(1));
     __ li(scratch2, exponent_word_for_1);
     __ or_(scratch2, fval, scratch2);
-    __ Movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
+    __ movz(fval, scratch2, scratch1);  // Only if ival is equal to 1.
     __ Branch(&done);
 
     __ bind(&not_special);
     // Count leading zeros.
     // Gets the wrong answer for 0, but we already checked for that case above.
     Register zeros = scratch2;
-    __ Clz(zeros, ival);
+    __ clz(zeros, ival);
 
     // Compute exponent and or it into the exponent register.
     __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
@@ -1034,13 +1030,13 @@
 #define __ ACCESS_MASM(masm())
 
 
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(JSObject* object,
                                        Register object_reg,
-                                       Handle<JSObject> holder,
+                                       JSObject* holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       Handle<String> name,
+                                       String* name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -1058,50 +1054,81 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  Handle<JSObject> current = object;
-  while (!current.is_identical_to(holder)) {
-    ++depth;
+  JSObject* current = object;
+  while (current != holder) {
+    depth++;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
+    ASSERT(current->GetPrototype()->IsJSObject());
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        name = factory()->LookupSymbol(name);
+        MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
+        Object* lookup_result = NULL;  // Initialization to please compiler.
+        if (!maybe_lookup_result->ToObject(&lookup_result)) {
+          set_failure(Failure::cast(maybe_lookup_result));
+          return reg;
+        }
+        name = String::cast(lookup_result);
       }
-      ASSERT(current->property_dictionary()->FindEntry(*name) ==
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
 
       __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else if (heap()->InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+        // Restore scratch register to be the map of the object.  In the
+        // new space case below, we load the prototype from the map in
+        // the scratch register.
+        __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      reg = holder_reg;  // From now the object is in holder_reg.
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
       __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      Handle<Map> current_map(current->map());
-      __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
-                  ALLOW_ELEMENT_TRANSITION_MAPS);
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
+      // Check the map of the current object.
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      // Branch on the result of the map check.
+      __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
       }
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (heap()->InNewSpace(*prototype)) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ li(reg, Operand(prototype));
-      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // From now the object is in holder_reg.
+      __ li(reg, Operand(Handle<JSObject>(prototype)));
     }
 
     if (save_at_depth == depth) {
@@ -1112,57 +1139,65 @@
     current = prototype;
   }
 
+  // Check the holder map.
+  __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ Branch(miss, ne, scratch1, Operand(Handle<Map>(current->map())));
+
   // Log the check depth.
   LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  // Check the holder map.
-  __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
-              DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
   // Perform security check for access to the global object.
   ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
   if (holder->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
+  };
 
-  // If we've skipped any global objects, it's not enough to verify that
-  // their maps haven't changed.  We also need to check that the property
-  // cell for the property is still empty.
-  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
+
+  MaybeObject* result = GenerateCheckPropertyCells(masm(),
+                                                   object,
+                                                   holder,
+                                                   name,
+                                                   scratch1,
+                                                   miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
-                                     Handle<JSObject> holder,
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     Handle<String> name,
+                                     String* name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss);
+  __ And(scratch1, receiver, Operand(kSmiTagMask));
+  __ Branch(miss, eq, scratch1, Operand(zero_reg));
 
   // Check that the maps haven't changed.
-  Register reg = CheckPrototypes(
-      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                      name, miss);
   GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<JSFunction> value,
-                                        Handle<String> name,
+                                        Object* value,
+                                        String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss, scratch1);
@@ -1173,77 +1208,83 @@
                       scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ LoadHeapObject(v0, value);
+  __ li(v0, Operand(Handle<Object>(value)));
   __ Ret();
 }
 
 
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        Handle<AccessorInfo> callback,
-                                        Handle<String> name,
-                                        Label* miss) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss, scratch1);
 
   // Check that the maps haven't changed.
-  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
-                                 scratch2, scratch3, name, miss);
+  Register reg =
+    CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
+                    name, miss);
 
   // Build AccessorInfo::args_ list on the stack and push property name below
   // the exit frame to make GC aware of them and store pointers to them.
   __ push(receiver);
   __ mov(scratch2, sp);  // scratch2 = AccessorInfo::args_
-  if (heap()->InNewSpace(callback->data())) {
-    __ li(scratch3, callback);
+  Handle<AccessorInfo> callback_handle(callback);
+  if (heap()->InNewSpace(callback_handle->data())) {
+    __ li(scratch3, callback_handle);
     __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
   } else {
-    __ li(scratch3, Handle<Object>(callback->data()));
+    __ li(scratch3, Handle<Object>(callback_handle->data()));
   }
   __ Push(reg, scratch3, name_reg);
   __ mov(a2, scratch2);  // Saved in case scratch2 == a1.
   __ mov(a1, sp);  // a1 (first argument - see note below) = Handle<String>
 
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
   // struct from the function (which is currently the case). This means we pass
   // the arguments in a1-a2 instead of a0-a1. TryCallApiFunctionAndReturn
   // will handle setting up a0.
 
   const int kApiStackSpace = 1;
-  FrameScope frame_scope(masm(), StackFrame::MANUAL);
-  __ EnterExitFrame(false, kApiStackSpace);
 
+  __ EnterExitFrame(false, kApiStackSpace);
   // Create AccessorInfo instance on the stack above the exit frame with
-  // scratch2 (internal::Object** args_) as the data.
+  // scratch2 (internal::Object **args_) as the data.
   __ sw(a2, MemOperand(sp, kPointerSize));
   // a2 (second argument - see note above) = AccessorInfo&
   __ Addu(a2, sp, kPointerSize);
 
-  const int kStackUnwindSpace = 4;
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
   ExternalReference ref =
       ExternalReference(&fun,
                         ExternalReference::DIRECT_GETTER_CALL,
                         masm()->isolate());
-  __ CallApiFunctionAndReturn(ref, kStackUnwindSpace);
+  // 4 args - will be freed later by LeaveExitFrame.
+  return masm()->TryCallApiFunctionAndReturn(ref, 4);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
-                                           Handle<JSObject> interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           Handle<String> name,
+                                           String* name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1255,13 +1296,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsAccessorInfo()) {
-      compile_followup_inline =
-          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+        lookup->GetCallbackObject()->IsAccessorInfo() &&
+        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+      compile_followup_inline = true;
     }
   }
 
@@ -1276,44 +1317,47 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
-        __ Push(receiver, holder_reg, name_reg);
-      } else {
-        __ Push(holder_reg, name_reg);
-      }
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method).
-      CompileCallLoadPropertyWithInterceptor(masm(),
-                                             receiver,
-                                             holder_reg,
-                                             name_reg,
-                                             interceptor_holder);
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-      __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
-      frame_scope.GenerateLeaveFrame();
-      __ Ret();
+    __ EnterInternalFrame();
 
-      __ bind(&interceptor_failed);
-      __ pop(name_reg);
-      __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        __ pop(receiver);
-      }
-      // Leave the internal frame.
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      // CALLBACKS case needs a receiver to be passed into C++ callback.
+      __ Push(receiver, holder_reg, name_reg);
+    } else {
+      __ Push(holder_reg, name_reg);
     }
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method).
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           interceptor_holder);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+    __ LeaveInternalFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
+
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   Handle<JSObject>(lookup->holder()),
+                                   lookup->holder(),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1325,21 +1369,21 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), v0, holder_reg,
-                               Handle<JSObject>(lookup->holder()),
-                               lookup->GetFieldIndex());
+                               lookup->holder(), lookup->GetFieldIndex());
       __ Ret();
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      Handle<AccessorInfo> callback(
-          AccessorInfo::cast(lookup->GetCallbackObject()));
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
       // Important invariant in CALLBACKS case: the code above must be
       // structured to never clobber |receiver| register.
-      __ li(scratch2, callback);
+      __ li(scratch2, Handle<AccessorInfo>(callback));
       // holder_reg is either receiver or scratch1.
       if (!receiver.is(holder_reg)) {
         ASSERT(scratch1.is(holder_reg));
@@ -1375,16 +1419,16 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ Branch(miss, ne, a2, Operand(name));
+    __ Branch(miss, ne, a2, Operand(Handle<String>(name)));
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<String> name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+                                                   JSObject* holder,
+                                                   String* name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1394,22 +1438,27 @@
   // Get the receiver from the stack.
   __ lw(a0, MemOperand(sp, argc * kPointerSize));
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(a0, miss);
+  }
+
   // Check that the maps haven't changed.
-  __ JumpIfSmi(a0, miss);
   CheckPrototypes(object, a0, holder, a3, a1, t0, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    Label* miss) {
   // Get the value from the cell.
-  __ li(a3, Operand(cell));
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ lw(a1, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(*function)) {
+  if (heap()->InNewSpace(function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1424,24 +1473,27 @@
     __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ Branch(miss, ne, t0, Operand(a3));
   } else {
-    __ Branch(miss, ne, a1, Operand(function));
+    __ Branch(miss, ne, a1, Operand(Handle<JSFunction>(function)));
   }
 }
 
 
-void CallStubCompiler::GenerateMissBranch() {
-  Handle<Code> code =
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
+  MaybeObject* maybe_obj =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_state_);
-  __ Jump(code, RelocInfo::CODE_TARGET);
+                                               extra_ic_state_);
+  Object* obj;
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+  return obj;
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
+MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
+                                                JSObject* holder,
                                                 int index,
-                                                Handle<String> name) {
+                                                String* name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1461,23 +1513,23 @@
   Register reg = CheckPrototypes(object, a0, holder, a1, a3, t0, name, &miss);
   GenerateFastPropertyLoad(masm(), a1, reg, holder, index);
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1487,7 +1539,7 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
 
   Label miss;
 
@@ -1503,8 +1555,8 @@
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, a3, v0, t0,
-                  name, &miss);
+  CheckPrototypes(JSObject::cast(object), receiver,
+                  holder, a3, v0, t0, name, &miss);
 
   if (argc == 0) {
     // Nothing to do, just return the length.
@@ -1513,20 +1565,22 @@
     __ Ret();
   } else {
     Label call_builtin;
+
+    Register elements = a3;
+    Register end_elements = t1;
+
+    // Get the elements array of the object.
+    __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode and writable.
+    __ CheckMap(elements,
+                v0,
+                Heap::kFixedArrayMapRootIndex,
+                &call_builtin,
+                DONT_DO_SMI_CHECK);
+
     if (argc == 1) {  // Otherwise fall through to call the builtin.
-      Label attempt_to_grow_elements;
-
-      Register elements = t2;
-      Register end_elements = t1;
-      // Get the elements array of the object.
-      __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
-
-      // Check that the elements are in fast mode and writable.
-      __ CheckMap(elements,
-                  v0,
-                  Heap::kFixedArrayMapRootIndex,
-                  &call_builtin,
-                  DONT_DO_SMI_CHECK);
+      Label exit, with_write_barrier, attempt_to_grow_elements;
 
       // Get the array's length into v0 and calculate new length.
       __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1534,77 +1588,35 @@
       STATIC_ASSERT(kSmiTag == 0);
       __ Addu(v0, v0, Operand(Smi::FromInt(argc)));
 
-      // Get the elements' length.
+      // Get the element's length.
       __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
       __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
 
-      // Check if value is a smi.
-      Label with_write_barrier;
-      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
-      __ JumpIfNotSmi(t0, &with_write_barrier);
-
       // Save new length.
       __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
 
-      // Store the value.
+      // Push the element.
+      __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
       // We may need a register containing the address end_elements below,
       // so write back the value in end_elements.
       __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
       __ Addu(end_elements, elements, end_elements);
       const int kEndElementsOffset =
           FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
-      __ Addu(end_elements, end_elements, kEndElementsOffset);
-      __ sw(t0, MemOperand(end_elements));
+      __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
+      __ Addu(end_elements, end_elements, kPointerSize);
 
       // Check for a smi.
+      __ JumpIfNotSmi(t0, &with_write_barrier);
+      __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
 
       __ bind(&with_write_barrier);
-
-      __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
-        Label fast_object, not_fast_object;
-        __ CheckFastObjectElements(a3, t3, &not_fast_object);
-        __ jmp(&fast_object);
-        // In case of fast smi-only, convert to fast object, otherwise bail out.
-        __ bind(&not_fast_object);
-        __ CheckFastSmiOnlyElements(a3, t3, &call_builtin);
-        // edx: receiver
-        // r3: map
-        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                               FAST_ELEMENTS,
-                                               a3,
-                                               t3,
-                                               &call_builtin);
-        __ mov(a2, receiver);
-        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
-        __ bind(&fast_object);
-      } else {
-        __ CheckFastObjectElements(a3, a3, &call_builtin);
-      }
-
-      // Save new length.
-      __ sw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
-      // Store the value.
-      // We may need a register containing the address end_elements below,
-      // so write back the value in end_elements.
-      __ sll(end_elements, v0, kPointerSizeLog2 - kSmiTagSize);
-      __ Addu(end_elements, elements, end_elements);
-      __ Addu(end_elements, end_elements, kEndElementsOffset);
-      __ sw(t0, MemOperand(end_elements));
-
-      __ RecordWrite(elements,
-                     end_elements,
-                     t0,
-                     kRAHasNotBeenSaved,
-                     kDontSaveFPRegs,
-                     EMIT_REMEMBERED_SET,
-                     OMIT_SMI_CHECK);
+      __ InNewSpace(elements, t0, eq, &exit);
+      __ RecordWriteHelper(elements, end_elements, t0);
       __ Drop(argc + 1);
       __ Ret();
 
@@ -1616,15 +1628,6 @@
         __ Branch(&call_builtin);
       }
 
-      __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize));
-      // Growing elements that are SMI-only requires special handling in case
-      // the new element is non-Smi. For now, delegate to the builtin.
-      Label no_fast_elements_check;
-      __ JumpIfSmi(a2, &no_fast_elements_check);
-      __ lw(t3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-      __ CheckFastObjectElements(t3, t3, &call_builtin);
-      __ bind(&no_fast_elements_check);
-
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(
               masm()->isolate());
@@ -1638,23 +1641,24 @@
       __ Addu(end_elements, elements, end_elements);
       __ Addu(end_elements, end_elements, Operand(kEndElementsOffset));
       __ li(t3, Operand(new_space_allocation_top));
-      __ lw(a3, MemOperand(t3));
-      __ Branch(&call_builtin, ne, end_elements, Operand(a3));
+      __ lw(t2, MemOperand(t3));
+      __ Branch(&call_builtin, ne, end_elements, Operand(t2));
 
       __ li(t5, Operand(new_space_allocation_limit));
       __ lw(t5, MemOperand(t5));
-      __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize));
-      __ Branch(&call_builtin, hi, a3, Operand(t5));
+      __ Addu(t2, t2, Operand(kAllocationDelta * kPointerSize));
+      __ Branch(&call_builtin, hi, t2, Operand(t5));
 
       // We fit and could grow elements.
       // Update new_space_allocation_top.
-      __ sw(a3, MemOperand(t3));
+      __ sw(t2, MemOperand(t3));
       // Push the argument.
-      __ sw(a2, MemOperand(end_elements));
+      __ lw(t2, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ sw(t2, MemOperand(end_elements));
       // Fill the rest with holes.
-      __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+      __ LoadRoot(t2, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
-        __ sw(a3, MemOperand(end_elements, i * kPointerSize));
+        __ sw(t2, MemOperand(end_elements, i * kPointerSize));
       }
 
       // Update elements' and array's sizes.
@@ -1675,19 +1679,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                                   JSObject* holder,
+                                                   JSGlobalPropertyCell* cell,
+                                                   JSFunction* function,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -1697,22 +1701,25 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
 
   Label miss, return_undefined, call_builtin;
+
   Register receiver = a1;
   Register elements = a3;
+
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
-                  t0, v0, name, &miss);
+  CheckPrototypes(JSObject::cast(object),
+                  receiver, holder, elements, t0, v0, name, &miss);
 
   // Get the elements array of the object.
   __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
@@ -1761,19 +1768,20 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1783,9 +1791,10 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
 
   const int argc = arguments().immediate();
+
   Label miss;
   Label name_miss;
   Label index_out_of_range;
@@ -1793,7 +1802,7 @@
   Label* index_out_of_range_label = &index_out_of_range;
 
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
@@ -1805,92 +1814,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             v0,
                                             &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  v0, holder, a1, a3, t0, name, &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
 
   Register receiver = a1;
   Register index = t1;
-  Register result = v0;
-  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
-  if (argc > 0) {
-    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
-  } else {
-    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
-  }
-
-  StringCharCodeAtGenerator generator(receiver,
-                                      index,
-                                      result,
-                                      &miss,  // When not a string.
-                                      &miss,  // When not a number.
-                                      index_out_of_range_label,
-                                      STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
-  __ Drop(argc + 1);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ LoadRoot(v0, Heap::kNanValueRootIndex);
-    __ Drop(argc + 1);
-    __ Ret();
-  }
-
-  __ bind(&miss);
-  // Restore function name in a2.
-  __ li(a2, name);
-  __ bind(&name_miss);
-  GenerateMissBranch();
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
-  // ----------- S t a t e -------------
-  //  -- a2                     : function name
-  //  -- ra                     : return address
-  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
-  //  -- ...
-  //  -- sp[argc * 4]           : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
-  const int argc = arguments().immediate();
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            v0,
-                                            &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  v0, holder, a1, a3, t0, name, &miss);
-
-  Register receiver = v0;
-  Register index = t1;
   Register scratch = a3;
   Register result = v0;
   __ lw(receiver, MemOperand(sp, argc * kPointerSize));
@@ -1900,20 +1829,108 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator generator(receiver,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &miss,  // When not a string.
-                                  &miss,  // When not a number.
-                                  index_out_of_range_label,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
+  StringCharCodeAtGenerator char_code_at_generator(receiver,
+                                                   index,
+                                                   scratch,
+                                                   result,
+                                                   &miss,  // When not a string.
+                                                   &miss,  // When not a number.
+                                                   index_out_of_range_label,
+                                                   STRING_INDEX_IS_NUMBER);
+  char_code_at_generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(v0, Heap::kNanValueRootIndex);
+    __ Drop(argc + 1);
+    __ Ret();
+  }
+
+  __ bind(&miss);
+  // Restore function name in a2.
+  __ li(a2, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
+  // ----------- S t a t e -------------
+  //  -- a2                     : function name
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
+  //  -- ...
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            v0,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), v0, holder,
+                  a1, a3, t0, name, &miss);
+
+  Register receiver = v0;
+  Register index = t1;
+  Register scratch1 = a1;
+  Register scratch2 = a3;
+  Register result = v0;
+  __ lw(receiver, MemOperand(sp, argc * kPointerSize));
+  if (argc > 0) {
+    __ lw(index, MemOperand(sp, (argc - 1) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          index_out_of_range_label,
+                                          STRING_INDEX_IS_NUMBER);
+  char_at_generator.GenerateFast(masm());
+  __ Drop(argc + 1);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
@@ -1924,21 +1941,22 @@
 
   __ bind(&miss);
   // Restore function name in a2.
-  __ li(a2, name);
+  __ li(a2, Handle<String>(name));
   __ bind(&name_miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -1951,23 +1969,22 @@
 
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
 
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
 
-    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
-                    name, &miss);
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1983,35 +2000,34 @@
   // Convert the smi code to uint16.
   __ And(code, code, Operand(Smi::FromInt(0xffff)));
 
-  StringCharFromCodeGenerator generator(code, v0);
-  generator.GenerateFast(masm());
+  StringCharFromCodeGenerator char_from_code_generator(code, v0);
+  char_from_code_generator.GenerateFast(masm());
   __ Drop(argc + 1);
   __ Ret();
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // a2: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2020,29 +2036,30 @@
   //  -- sp[argc * 4]           : receiver
   // -----------------------------------
 
-  if (!CpuFeatures::IsSupported(FPU)) {
-    return Handle<Code>::null();
-  }
-
+  if (!CpuFeatures::IsSupported(FPU))
+    return heap()->undefined_value();
   CpuFeatures::Scope scope_fpu(FPU);
+
   const int argc = arguments().immediate();
+
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss, slow;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
-                    name, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2128,24 +2145,23 @@
   __ bind(&slow);
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // a2: function name.
-  GenerateMissBranch();
+  MaybeObject* obj = GenerateMissBranch();
+  if (obj->IsFailure()) return obj;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+                                                  JSObject* holder,
+                                                  JSGlobalPropertyCell* cell,
+                                                  JSFunction* function,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- a2                     : function name
   //  -- ra                     : return address
@@ -2155,23 +2171,25 @@
   // -----------------------------------
 
   const int argc = arguments().immediate();
+
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss;
-
   GenerateNameCheck(name, &miss);
-  if (cell.is_null()) {
+
+  if (cell == NULL) {
     __ lw(a1, MemOperand(sp, 1 * kPointerSize));
+
     STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfSmi(a1, &miss);
-    CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, v0, a3, t0,
-                    name, &miss);
+
+    CheckPrototypes(JSObject::cast(object), a1, holder, v0, a3, t0, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -2229,37 +2247,37 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
 
   __ bind(&miss);
   // a2: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileFastApiCall(
+MaybeObject* CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
 
   Counters* counters = isolate()->counters();
 
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return Handle<Code>::null();
-  if (!cell.is_null()) return Handle<Code>::null();
-  if (!object->IsJSObject()) return Handle<Code>::null();
+  if (object->IsGlobalObject()) return heap()->undefined_value();
+  if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-      Handle<JSObject>::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
+            JSObject::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
 
   Label miss, miss_before_stack_reserved;
 
@@ -2278,37 +2296,40 @@
   ReserveSpaceForFastApiCall(masm(), a0);
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name,
+  CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
                   depth, &miss);
 
-  GenerateFastApiDirectCall(masm(), optimization, argc);
+  MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
+  if (result->IsFailure()) return result;
 
   __ bind(&miss);
   FreeSpaceForFastApiCall(masm());
 
   __ bind(&miss_before_stack_reserved);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
+                                                   JSObject* holder,
+                                                   JSFunction* function,
+                                                   String* name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, NULL, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
@@ -2321,20 +2342,23 @@
 
   // Check that the receiver isn't a smi.
   if (check != NUMBER_CHECK) {
-    __ JumpIfSmi(a1, &miss);
+    __ And(t1, a1, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t1, Operand(zero_reg));
   }
 
   // Make sure that it's okay not to patch the on stack receiver
   // unless we're doing a receiver map check.
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(masm()->isolate()->counters()->call_const(),
           1, a0, a3);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0,
-                      name, &miss);
+      CheckPrototypes(JSObject::cast(object), a1, holder, a0, a3, t0, name,
+                      &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2345,46 +2369,50 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
         // Check that the object is a two-byte string or a symbol.
         __ GetObjectType(a1, a3, a3);
         __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            a0, holder, a3, a1, t0, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
       }
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+    case NUMBER_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
       Label fast;
         // Check that the object is a smi or a heap number.
-        __ JumpIfSmi(a1, &fast);
+        __ And(t1, a1, Operand(kSmiTagMask));
+        __ Branch(&fast, eq, t1, Operand(zero_reg));
         __ GetObjectType(a1, a0, a0);
         __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
         __ bind(&fast);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            a0, holder, a3, a1, t0, name, &miss);
-      } else {
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
+      }
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      }
-      break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      } else {
         Label fast;
         // Check that the object is a boolean.
         __ LoadRoot(t0, Heap::kTrueValueRootIndex);
@@ -2395,36 +2423,35 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            a0, holder, a3, a1, t0, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), a0, holder, a3,
+                        a1, t0, name, &miss);
       }
       break;
     }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+    default:
+      UNREACHABLE();
+  }
+
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+  __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
 
   // Handle call cache miss.
   __ bind(&miss);
 
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
@@ -2436,54 +2463,71 @@
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
-  LookupResult lookup(isolate());
+
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ lw(a1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), a2, extra_state_);
-  compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
-                   &miss);
+  CallInterceptorCompiler compiler(this, arguments(), a2, extra_ic_state_);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         a1,
+                                         a3,
+                                         t0,
+                                         a0,
+                                         &miss);
+  if (result->IsFailure()) {
+    return result;
+  }
 
   // Move returned value, the function to call, to a1.
   __ mov(a1, v0);
   // Restore receiver.
   __ lw(a0, MemOperand(sp, argc * kPointerSize));
 
-  GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
+  GenerateCallFunction(masm(), object, arguments(), &miss, extra_ic_state_);
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 JSFunction* function,
+                                                 String* name) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, cell, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // Undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
+
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
@@ -2494,37 +2538,40 @@
     __ sw(a3, MemOperand(sp, argc * kPointerSize));
   }
 
-  // Set up the context (function already in r1).
+  // Setup the context (function already in r1).
   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1, a3, t0);
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
-  __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
-                NullCallWrapper(), call_kind);
+  if (V8::UseCrankshaft()) {
+    UNIMPLEMENTED_MIPS();
+  } else {
+    __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
+                  JUMP_FUNCTION, call_kind);
+  }
 
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1, a1, a3);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
                                                   int index,
-                                                  Handle<Map> transition,
-                                                  Handle<String> name) {
+                                                  Map* transition,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2534,21 +2581,25 @@
   Label miss;
 
   // Name register might be clobbered.
-  GenerateStoreField(masm(), object, index, transition, a1, a2, a3, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a1, a2, a3,
+                     &miss);
   __ bind(&miss);
   __ li(a2, Operand(Handle<String>(name)));  // Restore name.
   Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<AccessorInfo> callback,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                     AccessorInfo* callback,
+                                                     String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2557,9 +2608,12 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(a1, a3, Handle<Map>(object->map()), &miss,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map())));
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2571,7 +2625,7 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   __ push(a1);  // Receiver.
-  __ li(a3, Operand(callback));  // Callback info.
+  __ li(a3, Operand(Handle<AccessorInfo>(callback)));  // Callback info.
   __ Push(a3, a2, a0);
 
   // Do tail-call to the runtime system.
@@ -2590,9 +2644,8 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> receiver,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                        String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2601,9 +2654,12 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(a1, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a3, Operand(Handle<Map>(receiver->map())));
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2635,10 +2691,9 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
-    Handle<GlobalObject> object,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                                   JSGlobalPropertyCell* cell,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : receiver
@@ -2655,7 +2710,7 @@
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ li(t0, Operand(cell));
+  __ li(t0, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
   __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
   __ Branch(&miss, eq, t1, Operand(t2));
@@ -2663,8 +2718,6 @@
   // Store the value in the cell.
   __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
   __ mov(v0, a0);  // Stored value must be returned in v0.
-  // Cells are always rescanned, so no write barrier here.
-
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
   __ Ret();
@@ -2680,9 +2733,9 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
-                                                      Handle<JSObject> object,
-                                                      Handle<JSObject> last) {
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                      JSObject* object,
+                                                      JSObject* last) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- ra    : return address
@@ -2698,8 +2751,15 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
+    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                  GlobalObject::cast(last),
+                                                  name,
+                                                  a1,
+                                                  &miss);
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
   }
 
   // Return undefined if maps of the full prototype chain is still the same.
@@ -2710,14 +2770,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, factory()->empty_string());
+  return GetCode(NONEXISTENT, heap()->empty_string());
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                                JSObject* holder,
                                                 int index,
-                                                Handle<String> name) {
+                                                String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2736,19 +2796,24 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   Label miss;
-  GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, callback, name,
-                       &miss);
+
+  MaybeObject* result = GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0,
+                                             callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2757,10 +2822,10 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> value,
-                                                   Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* value,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2777,9 +2842,9 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2788,9 +2853,17 @@
   // -----------------------------------
   Label miss;
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
+  GenerateLoadInterceptor(object,
+                          holder,
+                          &lookup,
+                          a0,
+                          a2,
+                          a3,
+                          a1,
+                          t0,
+                          name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -2800,12 +2873,11 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name,
-    bool is_dont_delete) {
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 String* name,
+                                                 bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- a0    : receiver
   //  -- a2    : name
@@ -2813,12 +2885,19 @@
   // -----------------------------------
   Label miss;
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ And(t0, a0, Operand(kSmiTagMask));
+    __ Branch(&miss, eq, t0, Operand(zero_reg));
+  }
+
   // Check that the map of the global has not changed.
-  __ JumpIfSmi(a0, &miss);
   CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
 
   // Get the value from the cell.
-  __ li(a3, Operand(cell));
+  __ li(a3, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ lw(t0, FieldMemOperand(a3, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2841,9 +2920,9 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
-                                                     Handle<JSObject> receiver,
-                                                     Handle<JSObject> holder,
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                     JSObject* receiver,
+                                                     JSObject* holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
@@ -2853,7 +2932,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(name));
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
 
   GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
   __ bind(&miss);
@@ -2863,11 +2942,11 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+    String* name,
+    JSObject* receiver,
+    JSObject* holder,
+    AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2876,10 +2955,15 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(name));
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
 
-  GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, callback, name,
-                       &miss);
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, a1, a0, a2, a3,
+                                             t0, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2887,11 +2971,10 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<JSFunction> value) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                        JSObject* receiver,
+                                                        JSObject* holder,
+                                                        Object* value) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2900,7 +2983,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(name));
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
 
   GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
   __ bind(&miss);
@@ -2911,10 +2994,9 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                           JSObject* holder,
+                                                           String* name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2923,11 +3005,19 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(name));
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          a1,
+                          a0,
+                          a2,
+                          a3,
+                          t0,
+                          name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2936,8 +3026,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2946,7 +3035,7 @@
   Label miss;
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(name));
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
 
   GenerateLoadArrayLength(masm(), a1, a2, &miss);
   __ bind(&miss);
@@ -2956,8 +3045,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2969,7 +3057,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
 
   // Check the key is the cached one.
-  __ Branch(&miss, ne, a0, Operand(name));
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
 
   GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
   __ bind(&miss);
@@ -2981,8 +3069,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -2994,7 +3081,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
 
   // Check the name hasn't changed.
-  __ Branch(&miss, ne, a0, Operand(name));
+  __ Branch(&miss, ne, a0, Operand(Handle<String>(name)));
 
   GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
   __ bind(&miss);
@@ -3005,29 +3092,33 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
   //  -- a1    : receiver
   // -----------------------------------
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a1,
+                 a2,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_ics) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- ra    : return address
   //  -- a0    : key
@@ -3039,8 +3130,9 @@
   int receiver_count = receiver_maps->length();
   __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
   for (int current = 0; current < receiver_count; ++current) {
-    __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
-        eq, a2, Operand(receiver_maps->at(current)));
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a2, Operand(map));
   }
 
   __ bind(&miss);
@@ -3048,14 +3140,14 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                        int index,
-                                                       Handle<Map> transition,
-                                                       Handle<String> name) {
+                                                       Map* transition,
+                                                       String* name) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3069,11 +3161,16 @@
   __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0);
 
   // Check that the name has not changed.
-  __ Branch(&miss, ne, a1, Operand(name));
+  __ Branch(&miss, ne, a1, Operand(Handle<String>(name)));
 
   // a3 is used as scratch register. a1 and a2 keep their values if a jump to
   // the miss label is generated.
-  GenerateStoreField(masm(), object, index, transition, a2, a1, a3, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     a2, a1, a3,
+                     &miss);
   __ bind(&miss);
 
   __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0);
@@ -3081,12 +3178,11 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3094,25 +3190,29 @@
   //  -- ra    : return address
   //  -- a3    : scratch
   // -----------------------------------
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  Handle<Code> stub =
-      KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
-  __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(a2,
+                 a3,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -3125,17 +3225,10 @@
 
   int receiver_count = receiver_maps->length();
   __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  for (int i = 0; i < receiver_count; ++i) {
-    if (transitioned_maps->at(i).is_null()) {
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
-          a3, Operand(receiver_maps->at(i)));
-    } else {
-      Label next_map;
-      __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i)));
-      __ li(a3, Operand(transitioned_maps->at(i)));
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-      __ bind(&next_map);
-    }
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<Map> map(receiver_maps->at(current));
+    Handle<Code> code(handler_ics->at(current));
+    __ Jump(code, RelocInfo::CODE_TARGET, eq, a3, Operand(map));
   }
 
   __ bind(&miss);
@@ -3143,12 +3236,11 @@
   __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
-    Handle<JSFunction> function) {
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // a0    : argc
   // a1    : constructor
   // ra    : return address
@@ -3171,7 +3263,8 @@
   // a1: constructor function
   // t7: undefined
   __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(a2, &generic_stub_call);
+  __ And(t0, a2, Operand(kSmiTagMask));
+  __ Branch(&generic_stub_call, eq, t0, Operand(zero_reg));
   __ GetObjectType(a2, a3, t0);
   __ Branch(&generic_stub_call, ne, t0, Operand(MAP_TYPE));
 
@@ -3192,7 +3285,12 @@
   // a2: initial map
   // t7: undefined
   __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
+  __ AllocateInNewSpace(a3,
+                        t4,
+                        t5,
+                        t6,
+                        &generic_stub_call,
+                        SIZE_IN_WORDS);
 
   // Allocated the JSObject, now initialize the fields. Map is set to initial
   // map and properties and elements are set to empty fixed array.
@@ -3227,7 +3325,7 @@
   // t7: undefined
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  Handle<SharedFunctionInfo> shared(function->shared());
+  SharedFunctionInfo* shared = function->shared();
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       Label not_passed, next;
@@ -3359,7 +3457,6 @@
 
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -3456,7 +3553,6 @@
       }
       break;
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3623,7 +3719,7 @@
 
       __ li(t0, 0x7ff);
       __ Xor(t1, t5, Operand(0xFF));
-      __ Movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
+      __ movz(t5, t0, t1);  // Set t5 to 0x7ff only if t5 is equal to 0xff.
       __ Branch(&exponent_rebiased, eq, t0, Operand(0xff));
 
       // Rebias exponent.
@@ -3699,9 +3795,9 @@
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 
   __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
 }
 
 
@@ -3732,6 +3828,7 @@
   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
   // Check that the index is in range.
+  __ SmiUntag(t0, key);
   __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
@@ -3739,6 +3836,7 @@
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
   // a3: external array.
+  // t0: key (integer).
 
   if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
     // Double to pixel conversion is only implemented in the runtime for now.
@@ -3750,6 +3848,7 @@
   __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
   // a3: base pointer of external storage.
+  // t0: key (integer).
   // t1: value (integer).
 
   switch (elements_kind) {
@@ -3766,36 +3865,33 @@
       __ mov(v0, t1);  // Value is in range 0..255.
       __ bind(&done);
       __ mov(t1, v0);
-
-      __ srl(t8, key, 1);
-      __ addu(t8, a3, t8);
+      __ addu(t8, a3, t0);
       __ sb(t1, MemOperand(t8, 0));
       }
       break;
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ srl(t8, key, 1);
-      __ addu(t8, a3, t8);
+      __ addu(t8, a3, t0);
       __ sb(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_SHORT_ELEMENTS:
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ addu(t8, a3, key);
+      __ sll(t8, t0, 1);
+      __ addu(t8, a3, t8);
       __ sh(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ sll(t8, key, 1);
+      __ sll(t8, t0, 2);
       __ addu(t8, a3, t8);
       __ sw(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_FLOAT_ELEMENTS:
       // Perform int-to-float conversion and store to memory.
-      __ SmiUntag(t0, key);
       StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
       break;
     case EXTERNAL_DOUBLE_ELEMENTS:
-      __ sll(t8, key, 2);
+      __ sll(t8, t0, 3);
       __ addu(a3, a3, t8);
       // a3: effective address of the double element
       FloatingPointHelper::Destination destination;
@@ -3817,7 +3913,6 @@
       }
       break;
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3826,11 +3921,12 @@
   }
 
   // Entry registers are intact, a0 holds the value which is the return value.
-  __ mov(v0, a0);
+  __ mov(v0, value);
   __ Ret();
 
   if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
     // a3: external array.
+    // t0: index (integer).
     __ bind(&check_heap_number);
     __ GetObjectType(value, t1, t2);
     __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
@@ -3838,6 +3934,7 @@
     __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
     // a3: base pointer of external storage.
+    // t0: key (integer).
 
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
@@ -3850,11 +3947,11 @@
 
       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
         __ cvt_s_d(f0, f0);
-        __ sll(t8, key, 1);
+        __ sll(t8, t0, 2);
         __ addu(t8, a3, t8);
         __ swc1(f0, MemOperand(t8, 0));
       } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-        __ sll(t8, key, 2);
+        __ sll(t8, t0, 3);
         __ addu(t8, a3, t8);
         __ sdc1(f0, MemOperand(t8, 0));
       } else {
@@ -3863,18 +3960,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ srl(t8, key, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, t0);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ addu(t8, a3, key);
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, key, 1);
+            __ sll(t8, t0, 2);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
@@ -3882,7 +3979,6 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
-          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3893,7 +3989,7 @@
 
       // Entry registers are intact, a0 holds the value
       // which is the return value.
-      __ mov(v0, a0);
+      __ mov(v0, value);
       __ Ret();
     } else {
       // FPU is not available, do manual conversions.
@@ -3917,7 +4013,7 @@
 
         __ xor_(t1, t6, t5);
         __ li(t2, kBinary32ExponentMask);
-        __ Movz(t6, t2, t1);  // Only if t6 is equal to t5.
+        __ movz(t6, t2, t1);  // Only if t6 is equal to t5.
         __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(t5));
 
         // Rebias exponent.
@@ -3930,12 +4026,12 @@
         __ Slt(t1, t1, t6);
         __ And(t2, t3, Operand(HeapNumber::kSignMask));
         __ Or(t2, t2, Operand(kBinary32ExponentMask));
-        __ Movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
+        __ movn(t3, t2, t1);  // Only if t6 is gt kBinary32MaxExponent.
         __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
 
         __ Slt(t1, t6, Operand(kBinary32MinExponent));
         __ And(t2, t3, Operand(HeapNumber::kSignMask));
-        __ Movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
+        __ movn(t3, t2, t1);  // Only if t6 is lt kBinary32MinExponent.
         __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
 
         __ And(t7, t3, Operand(HeapNumber::kSignMask));
@@ -3948,13 +4044,13 @@
         __ or_(t3, t7, t6);
 
         __ bind(&done);
-        __ sll(t9, key, 1);
+        __ sll(t9, a1, 2);
         __ addu(t9, a2, t9);
         __ sw(t3, MemOperand(t9, 0));
 
         // Entry registers are intact, a0 holds the value which is the return
         // value.
-        __ mov(v0, a0);
+        __ mov(v0, value);
         __ Ret();
 
         __ bind(&nan_or_infinity_or_zero);
@@ -3972,7 +4068,6 @@
         // t8: effective address of destination element.
         __ sw(t4, MemOperand(t8, 0));
         __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
-        __ mov(v0, a0);
         __ Ret();
       } else {
         bool is_signed_type = IsElementTypeSigned(elements_kind);
@@ -3985,11 +4080,11 @@
         // and infinities. All these should be converted to 0.
         __ li(t5, HeapNumber::kExponentMask);
         __ and_(t6, t3, t5);
-        __ Movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
+        __ movz(t3, zero_reg, t6);  // Only if t6 is equal to zero.
         __ Branch(&done, eq, t6, Operand(zero_reg));
 
         __ xor_(t2, t6, t5);
-        __ Movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
+        __ movz(t3, zero_reg, t2);  // Only if t6 is equal to t5.
         __ Branch(&done, eq, t6, Operand(t5));
 
         // Unbias exponent.
@@ -3997,13 +4092,13 @@
         __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
         // If exponent is negative then result is 0.
         __ slt(t2, t6, zero_reg);
-        __ Movn(t3, zero_reg, t2);  // Only if exponent is negative.
+        __ movn(t3, zero_reg, t2);  // Only if exponent is negative.
         __ Branch(&done, lt, t6, Operand(zero_reg));
 
         // If exponent is too big then result is minimal value.
         __ slti(t1, t6, meaningfull_bits - 1);
         __ li(t2, min_value);
-        __ Movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
+        __ movz(t3, t2, t1);  // Only if t6 is ge meaningfull_bits - 1.
         __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
 
         __ And(t5, t3, Operand(HeapNumber::kSignMask));
@@ -4014,7 +4109,7 @@
         __ subu(t6, t9, t6);
         __ slt(t1, t6, zero_reg);
         __ srlv(t2, t3, t6);
-        __ Movz(t3, t2, t1);  // Only if t6 is positive.
+        __ movz(t3, t2, t1);  // Only if t6 is positive.
         __ Branch(&sign, ge, t6, Operand(zero_reg));
 
         __ subu(t6, zero_reg, t6);
@@ -4026,7 +4121,7 @@
 
         __ bind(&sign);
         __ subu(t2, t3, zero_reg);
-        __ Movz(t3, t2, t5);  // Only if t5 is zero.
+        __ movz(t3, t2, t5);  // Only if t5 is zero.
 
         __ bind(&done);
 
@@ -4035,18 +4130,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ srl(t8, key, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, t0);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ addu(t8, a3, key);
+            __ sll(t8, t0, 1);
+            __ addu(t8, a3, t8);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, key, 1);
+            __ sll(t8, t0, 2);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
@@ -4054,7 +4149,6 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
-          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4107,8 +4201,7 @@
   // have been verified by the caller to not be a smi.
 
   // Check that the key is a smi.
-  __ JumpIfNotSmi(a0, &miss_force_generic, at, USE_DELAY_SLOT);
-  // The delay slot can be safely used here, a1 is an object pointer.
+  __ JumpIfNotSmi(a0, &miss_force_generic);
 
   // Get the elements array.
   __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
@@ -4116,7 +4209,7 @@
 
   // Check that the key is within bounds.
   __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
-  __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
+  __ Branch(&miss_force_generic, hs, a0, Operand(a3));
 
   // Load the result and make sure it's not the hole.
   __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4126,13 +4219,13 @@
   __ lw(t0, MemOperand(t0));
   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
   __ Branch(&miss_force_generic, eq, t0, Operand(t1));
-  __ Ret(USE_DELAY_SLOT);
   __ mov(v0, t0);
+  __ Ret();
 
   __ bind(&miss_force_generic);
-  Handle<Code> stub =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(stub, RelocInfo::CODE_TARGET);
+  Code* stub = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  __ Jump(Handle<Code>(stub), RelocInfo::CODE_TARGET);
 }
 
 
@@ -4205,11 +4298,8 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
-    MacroAssembler* masm,
-    bool is_js_array,
-    ElementsKind elements_kind,
-    KeyedAccessGrowMode grow_mode) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -4218,17 +4308,15 @@
   //  -- a3    : scratch
   //  -- a4    : scratch (elements)
   // -----------------------------------
-  Label miss_force_generic, transition_elements_kind, grow, slow;
-  Label finish_store, check_capacity;
+  Label miss_force_generic;
 
   Register value_reg = a0;
   Register key_reg = a1;
   Register receiver_reg = a2;
-  Register scratch = t0;
-  Register elements_reg = a3;
-  Register length_reg = t1;
-  Register scratch2 = t2;
-  Register scratch3 = t3;
+  Register scratch = a3;
+  Register elements_reg = t0;
+  Register scratch2 = t1;
+  Register scratch3 = t2;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4236,59 +4324,32 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(key_reg, &miss_force_generic);
 
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    __ JumpIfNotSmi(value_reg, &transition_elements_kind);
-  }
-
-  // Check that the key is within bounds.
+  // Get the elements array and make sure it is a fast element array, not 'cow'.
   __ lw(elements_reg,
         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-  if (is_js_array) {
-    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-  } else {
-    __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-  }
-  // Compare smis.
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    __ Branch(&grow, hs, key_reg, Operand(scratch));
-  } else {
-    __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
-  }
-
-  // Make sure elements is a fast element array, not 'cow'.
   __ CheckMap(elements_reg,
               scratch,
               Heap::kFixedArrayMapRootIndex,
               &miss_force_generic,
               DONT_DO_SMI_CHECK);
 
-  __ bind(&finish_store);
-
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    __ Addu(scratch,
-            elements_reg,
-            Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-    __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(scratch, scratch, scratch2);
-    __ sw(value_reg, MemOperand(scratch));
+  // Check that the key is within bounds.
+  if (is_js_array) {
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
-    ASSERT(elements_kind == FAST_ELEMENTS);
-    __ Addu(scratch,
-            elements_reg,
-            Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-    __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(scratch, scratch, scratch2);
-    __ sw(value_reg, MemOperand(scratch));
-    __ mov(receiver_reg, value_reg);
-  ASSERT(elements_kind == FAST_ELEMENTS);
-    __ RecordWrite(elements_reg,  // Object.
-                   scratch,       // Address.
-                   receiver_reg,  // Value.
-                   kRAHasNotBeenSaved,
-                   kDontSaveFPRegs);
+    __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
+  // Compare smis.
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+  __ Addu(scratch,
+          elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(scratch2, key_reg, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(scratch3, scratch2, scratch);
+  __ sw(value_reg, MemOperand(scratch3));
+  __ RecordWrite(scratch, Operand(scratch2), receiver_reg , elements_reg);
+
   // value_reg (a0) is preserved.
   // Done.
   __ Ret();
@@ -4297,83 +4358,12 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&transition_elements_kind);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Grow the array by a single element if possible.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime.
-    __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch));
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ lw(length_reg,
-          FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ lw(elements_reg,
-          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-    __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
-    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow,
-                          TAG_OBJECT);
-
-    __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-    __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
-    __ li(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-    __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
-      __ sw(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
-    }
-
-    // Store the element at index zero.
-    __ sw(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
-
-    // Install the new backing store in the JSArray.
-    __ sw(elements_reg,
-          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
-                        scratch, kRAHasNotBeenSaved, kDontSaveFPRegs,
-                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ li(length_reg, Operand(Smi::FromInt(1)));
-    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ Ret();
-
-    __ bind(&check_capacity);
-    // Check for cow elements, in general they are not handled by this stub
-    __ CheckMap(elements_reg,
-                scratch,
-                Heap::kFixedCOWArrayMapRootIndex,
-                &miss_force_generic,
-                DONT_DO_SMI_CHECK);
-
-    __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
-    __ Branch(&slow, hs, length_reg, Operand(scratch));
-
-    // Grow the array and finish the store.
-    __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
-    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ jmp(&finish_store);
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array,
-    KeyedAccessGrowMode grow_mode) {
+    bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- a0    : value
   //  -- a1    : key
@@ -4385,18 +4375,16 @@
   //  -- t2    : scratch (exponent_reg)
   //  -- t3    : scratch4
   // -----------------------------------
-  Label miss_force_generic, transition_elements_kind, grow, slow;
-  Label finish_store, check_capacity;
+  Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
 
   Register value_reg = a0;
   Register key_reg = a1;
   Register receiver_reg = a2;
-  Register elements_reg = a3;
-  Register scratch1 = t0;
-  Register scratch2 = t1;
-  Register scratch3 = t2;
+  Register scratch = a3;
+  Register elements_reg = t0;
+  Register mantissa_reg = t1;
+  Register exponent_reg = t2;
   Register scratch4 = t3;
-  Register length_reg = t3;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -4407,31 +4395,90 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-    __ lw(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+    __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
   } else {
-    __ lw(scratch1,
+    __ lw(scratch,
           FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
   }
   // Compare smis, unsigned compare catches both negative and out-of-bound
   // indexes.
-  if (grow_mode == ALLOW_JSARRAY_GROWTH) {
-    __ Branch(&grow, hs, key_reg, Operand(scratch1));
+  __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
+
+  // Handle smi values specially.
+  __ JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  __ CheckMap(value_reg,
+              scratch,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+  __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch));
+
+  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  __ bind(&have_double_value);
+  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  __ Addu(scratch, elements_reg, Operand(scratch4));
+  __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ sw(exponent_reg, FieldMemOperand(scratch, offset));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value_reg);  // In delay slot.
+
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+  __ Branch(&is_nan, gt, exponent_reg, Operand(scratch));
+  __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+
+  __ bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  uint64_t nan_int64 = BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+  __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+  __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+  __ jmp(&have_double_value);
+
+  __ bind(&smi_value);
+  __ Addu(scratch, elements_reg,
+          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  __ Addu(scratch, scratch, scratch4);
+  // scratch is now effective address of the double element
+
+  FloatingPointHelper::Destination destination;
+  if (CpuFeatures::IsSupported(FPU)) {
+    destination = FloatingPointHelper::kFPURegisters;
   } else {
-    __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1));
+    destination = FloatingPointHelper::kCoreRegisters;
   }
 
-  __ bind(&finish_store);
-
-  __ StoreNumberToDoubleElements(value_reg,
-                                 key_reg,
-                                 receiver_reg,
-                                 elements_reg,
-                                 scratch1,
-                                 scratch2,
-                                 scratch3,
-                                 scratch4,
-                                 &transition_elements_kind);
-
+  Register untagged_value = receiver_reg;
+  __ SmiUntag(untagged_value, value_reg);
+  FloatingPointHelper::ConvertIntToDouble(
+      masm,
+      untagged_value,
+      destination,
+      f0,
+      mantissa_reg,
+      exponent_reg,
+      scratch4,
+      f2);
+  if (destination == FloatingPointHelper::kFPURegisters) {
+    CpuFeatures::Scope scope(FPU);
+    __ sdc1(f0, MemOperand(scratch, 0));
+  } else {
+    __ sw(mantissa_reg, MemOperand(scratch, 0));
+    __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+  }
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, value_reg);  // In delay slot.
 
@@ -4440,75 +4487,6 @@
   Handle<Code> ic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ Jump(ic, RelocInfo::CODE_TARGET);
-
-  __ bind(&transition_elements_kind);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ Jump(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Grow the array by a single element if possible.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime.
-    __ Branch(&miss_force_generic, ne, key_reg, Operand(scratch1));
-
-    // Transition on values that can't be stored in a FixedDoubleArray.
-    Label value_is_smi;
-    __ JumpIfSmi(value_reg, &value_is_smi);
-    __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-    __ Branch(&transition_elements_kind, ne, scratch1, Operand(at));
-    __ bind(&value_is_smi);
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ lw(length_reg,
-          FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ lw(elements_reg,
-          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-    __ Branch(&check_capacity, ne, elements_reg, Operand(at));
-
-    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
-                          TAG_OBJECT);
-
-    // Initialize the new FixedDoubleArray. Leave elements unitialized for
-    // efficiency, they are guaranteed to be initialized before use.
-    __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
-    __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
-    __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
-    __ sw(scratch1,
-          FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-
-    // Install the new backing store in the JSArray.
-    __ sw(elements_reg,
-          FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-    __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
-                        scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs,
-                        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ li(length_reg, Operand(Smi::FromInt(1)));
-    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ jmp(&finish_store);
-
-    __ bind(&check_capacity);
-    // Make sure that the backing store can hold additional elements.
-    __ lw(scratch1,
-          FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
-    __ Branch(&slow, hs, length_reg, Operand(scratch1));
-
-    // Grow the array and finish the store.
-    __ Addu(length_reg, length_reg, Operand(Smi::FromInt(1)));
-    __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
-    __ jmp(&finish_store);
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ Jump(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index c43dd22..e3f3c48 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -144,32 +144,32 @@
 
 
 // Type names of the different mirrors.
-var UNDEFINED_TYPE = 'undefined';
-var NULL_TYPE = 'null';
-var BOOLEAN_TYPE = 'boolean';
-var NUMBER_TYPE = 'number';
-var STRING_TYPE = 'string';
-var OBJECT_TYPE = 'object';
-var FUNCTION_TYPE = 'function';
-var REGEXP_TYPE = 'regexp';
-var ERROR_TYPE = 'error';
-var PROPERTY_TYPE = 'property';
-var FRAME_TYPE = 'frame';
-var SCRIPT_TYPE = 'script';
-var CONTEXT_TYPE = 'context';
-var SCOPE_TYPE = 'scope';
+const UNDEFINED_TYPE = 'undefined';
+const NULL_TYPE = 'null';
+const BOOLEAN_TYPE = 'boolean';
+const NUMBER_TYPE = 'number';
+const STRING_TYPE = 'string';
+const OBJECT_TYPE = 'object';
+const FUNCTION_TYPE = 'function';
+const REGEXP_TYPE = 'regexp';
+const ERROR_TYPE = 'error';
+const PROPERTY_TYPE = 'property';
+const FRAME_TYPE = 'frame';
+const SCRIPT_TYPE = 'script';
+const CONTEXT_TYPE = 'context';
+const SCOPE_TYPE = 'scope';
 
 // Maximum length when sending strings through the JSON protocol.
-var kMaxProtocolStringLength = 80;
+const kMaxProtocolStringLength = 80;
 
 // Different kind of properties.
-var PropertyKind = {};
+PropertyKind = {};
 PropertyKind.Named   = 1;
 PropertyKind.Indexed = 2;
 
 
 // A copy of the PropertyType enum from global.h
-var PropertyType = {};
+PropertyType = {};
 PropertyType.Normal                  = 0;
 PropertyType.Field                   = 1;
 PropertyType.ConstantFunction        = 2;
@@ -183,7 +183,7 @@
 
 
 // Different attributes for a property.
-var PropertyAttribute = {};
+PropertyAttribute = {};
 PropertyAttribute.None       = NONE;
 PropertyAttribute.ReadOnly   = READ_ONLY;
 PropertyAttribute.DontEnum   = DONT_ENUM;
@@ -191,12 +191,12 @@
 
 
 // A copy of the scope types from runtime.cc.
-var ScopeType = { Global: 0,
-                  Local: 1,
-                  With: 2,
-                  Closure: 3,
-                  Catch: 4,
-                  Block: 5 };
+ScopeType = { Global: 0,
+              Local: 1,
+              With: 2,
+              Closure: 3,
+              Catch: 4,
+              Block: 5 };
 
 
 // Mirror hierarchy:
@@ -225,7 +225,7 @@
  */
 function Mirror(type) {
   this.type_ = type;
-}
+};
 
 
 Mirror.prototype.type = function() {
@@ -239,7 +239,7 @@
  */
 Mirror.prototype.isValue = function() {
   return this instanceof ValueMirror;
-};
+}
 
 
 /**
@@ -248,7 +248,7 @@
  */
 Mirror.prototype.isUndefined = function() {
   return this instanceof UndefinedMirror;
-};
+}
 
 
 /**
@@ -257,7 +257,7 @@
  */
 Mirror.prototype.isNull = function() {
   return this instanceof NullMirror;
-};
+}
 
 
 /**
@@ -266,7 +266,7 @@
  */
 Mirror.prototype.isBoolean = function() {
   return this instanceof BooleanMirror;
-};
+}
 
 
 /**
@@ -275,7 +275,7 @@
  */
 Mirror.prototype.isNumber = function() {
   return this instanceof NumberMirror;
-};
+}
 
 
 /**
@@ -284,7 +284,7 @@
  */
 Mirror.prototype.isString = function() {
   return this instanceof StringMirror;
-};
+}
 
 
 /**
@@ -293,7 +293,7 @@
  */
 Mirror.prototype.isObject = function() {
   return this instanceof ObjectMirror;
-};
+}
 
 
 /**
@@ -302,7 +302,7 @@
  */
 Mirror.prototype.isFunction = function() {
   return this instanceof FunctionMirror;
-};
+}
 
 
 /**
@@ -311,7 +311,7 @@
  */
 Mirror.prototype.isUnresolvedFunction = function() {
   return this instanceof UnresolvedFunctionMirror;
-};
+}
 
 
 /**
@@ -320,7 +320,7 @@
  */
 Mirror.prototype.isArray = function() {
   return this instanceof ArrayMirror;
-};
+}
 
 
 /**
@@ -329,7 +329,7 @@
  */
 Mirror.prototype.isDate = function() {
   return this instanceof DateMirror;
-};
+}
 
 
 /**
@@ -338,7 +338,7 @@
  */
 Mirror.prototype.isRegExp = function() {
   return this instanceof RegExpMirror;
-};
+}
 
 
 /**
@@ -347,7 +347,7 @@
  */
 Mirror.prototype.isError = function() {
   return this instanceof ErrorMirror;
-};
+}
 
 
 /**
@@ -356,7 +356,7 @@
  */
 Mirror.prototype.isProperty = function() {
   return this instanceof PropertyMirror;
-};
+}
 
 
 /**
@@ -365,7 +365,7 @@
  */
 Mirror.prototype.isFrame = function() {
   return this instanceof FrameMirror;
-};
+}
 
 
 /**
@@ -374,7 +374,7 @@
  */
 Mirror.prototype.isScript = function() {
   return this instanceof ScriptMirror;
-};
+}
 
 
 /**
@@ -383,7 +383,7 @@
  */
 Mirror.prototype.isContext = function() {
   return this instanceof ContextMirror;
-};
+}
 
 
 /**
@@ -392,7 +392,7 @@
  */
 Mirror.prototype.isScope = function() {
   return this instanceof ScopeMirror;
-};
+}
 
 
 /**
@@ -400,7 +400,7 @@
  */
 Mirror.prototype.allocateHandle_ = function() {
   this.handle_ = next_handle_++;
-};
+}
 
 
 /**
@@ -409,13 +409,13 @@
  */
 Mirror.prototype.allocateTransientHandle_ = function() {
   this.handle_ = next_transient_handle_--;
-};
+}
 
 
 Mirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "#<" + this.constructor.name + ">";
-};
+}
 
 
 /**
@@ -480,7 +480,7 @@
 
 UndefinedMirror.prototype.toText = function() {
   return 'undefined';
-};
+}
 
 
 /**
@@ -496,7 +496,7 @@
 
 NullMirror.prototype.toText = function() {
   return 'null';
-};
+}
 
 
 /**
@@ -513,7 +513,7 @@
 
 BooleanMirror.prototype.toText = function() {
   return this.value_ ? 'true' : 'false';
-};
+}
 
 
 /**
@@ -530,7 +530,7 @@
 
 NumberMirror.prototype.toText = function() {
   return %NumberToString(this.value_);
-};
+}
 
 
 /**
@@ -555,11 +555,11 @@
            '... (length: ' + this.length() + ')';
   }
   return this.value_;
-};
+}
 
 StringMirror.prototype.toText = function() {
   return this.getTruncatedValue(kMaxProtocolStringLength);
-};
+}
 
 
 /**
@@ -898,7 +898,7 @@
 
 FunctionMirror.prototype.toText = function() {
   return this.source();
-};
+}
 
 
 /**
@@ -951,7 +951,7 @@
 
 UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
   return [];
-};
+}
 
 
 /**
@@ -971,8 +971,7 @@
 };
 
 
-ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index,
-                                                            opt_to_index) {
+ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) {
   var from_index = opt_from_index || 0;
   var to_index = opt_to_index || this.length() - 1;
   if (from_index > to_index) return new Array();
@@ -988,7 +987,7 @@
     values[i - from_index] = value;
   }
   return values;
-};
+}
 
 
 /**
@@ -1006,7 +1005,7 @@
 DateMirror.prototype.toText = function() {
   var s = JSON.stringify(this.value_);
   return s.substring(1, s.length - 1);  // cut quotes
-};
+}
 
 
 /**
@@ -1060,7 +1059,7 @@
 RegExpMirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "/" + this.source() + "/";
-};
+}
 
 
 /**
@@ -1088,12 +1087,12 @@
   // Use the same text representation as in messages.js.
   var text;
   try {
-    str = %_CallFunction(this.value_, builtins.ErrorToString);
+    str = %_CallFunction(this.value_, builtins.errorToString);
   } catch (e) {
     str = '#<Error>';
   }
   return str;
-};
+}
 
 
 /**
@@ -1111,7 +1110,7 @@
   this.value_ = details[0];
   this.details_ = details[1];
   if (details.length > 2) {
-    this.exception_ = details[2];
+    this.exception_ = details[2]
     this.getter_ = details[3];
     this.setter_ = details[4];
   }
@@ -1121,22 +1120,22 @@
 
 PropertyMirror.prototype.isReadOnly = function() {
   return (this.attributes() & PropertyAttribute.ReadOnly) != 0;
-};
+}
 
 
 PropertyMirror.prototype.isEnum = function() {
   return (this.attributes() & PropertyAttribute.DontEnum) == 0;
-};
+}
 
 
 PropertyMirror.prototype.canDelete = function() {
   return (this.attributes() & PropertyAttribute.DontDelete) == 0;
-};
+}
 
 
 PropertyMirror.prototype.name = function() {
   return this.name_;
-};
+}
 
 
 PropertyMirror.prototype.isIndexed = function() {
@@ -1146,12 +1145,12 @@
     }
   }
   return true;
-};
+}
 
 
 PropertyMirror.prototype.value = function() {
   return MakeMirror(this.value_, false);
-};
+}
 
 
 /**
@@ -1160,22 +1159,22 @@
  */
 PropertyMirror.prototype.isException = function() {
   return this.exception_ ? true : false;
-};
+}
 
 
 PropertyMirror.prototype.attributes = function() {
   return %DebugPropertyAttributesFromDetails(this.details_);
-};
+}
 
 
 PropertyMirror.prototype.propertyType = function() {
   return %DebugPropertyTypeFromDetails(this.details_);
-};
+}
 
 
 PropertyMirror.prototype.insertionIndex = function() {
   return %DebugPropertyIndexFromDetails(this.details_);
-};
+}
 
 
 /**
@@ -1184,7 +1183,7 @@
  */
 PropertyMirror.prototype.hasGetter = function() {
   return this.getter_ ? true : false;
-};
+}
 
 
 /**
@@ -1193,7 +1192,7 @@
  */
 PropertyMirror.prototype.hasSetter = function() {
   return this.setter_ ? true : false;
-};
+}
 
 
 /**
@@ -1207,7 +1206,7 @@
   } else {
     return GetUndefinedMirror();
   }
-};
+}
 
 
 /**
@@ -1221,7 +1220,7 @@
   } else {
     return GetUndefinedMirror();
   }
-};
+}
 
 
 /**
@@ -1234,27 +1233,27 @@
   return (this.propertyType() == PropertyType.Interceptor) ||
          ((this.propertyType() == PropertyType.Callbacks) &&
           !this.hasGetter() && !this.hasSetter());
-};
+}
 
 
-var kFrameDetailsFrameIdIndex = 0;
-var kFrameDetailsReceiverIndex = 1;
-var kFrameDetailsFunctionIndex = 2;
-var kFrameDetailsArgumentCountIndex = 3;
-var kFrameDetailsLocalCountIndex = 4;
-var kFrameDetailsSourcePositionIndex = 5;
-var kFrameDetailsConstructCallIndex = 6;
-var kFrameDetailsAtReturnIndex = 7;
-var kFrameDetailsFlagsIndex = 8;
-var kFrameDetailsFirstDynamicIndex = 9;
+const kFrameDetailsFrameIdIndex = 0;
+const kFrameDetailsReceiverIndex = 1;
+const kFrameDetailsFunctionIndex = 2;
+const kFrameDetailsArgumentCountIndex = 3;
+const kFrameDetailsLocalCountIndex = 4;
+const kFrameDetailsSourcePositionIndex = 5;
+const kFrameDetailsConstructCallIndex = 6;
+const kFrameDetailsAtReturnIndex = 7;
+const kFrameDetailsFlagsIndex = 8;
+const kFrameDetailsFirstDynamicIndex = 9;
 
-var kFrameDetailsNameIndex = 0;
-var kFrameDetailsValueIndex = 1;
-var kFrameDetailsNameValueSize = 2;
+const kFrameDetailsNameIndex = 0;
+const kFrameDetailsValueIndex = 1;
+const kFrameDetailsNameValueSize = 2;
 
-var kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
-var kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
-var kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
+const kFrameDetailsFlagDebuggerFrameMask = 1 << 0;
+const kFrameDetailsFlagOptimizedFrameMask = 1 << 1;
+const kFrameDetailsFlagInlinedFrameIndexMask = 7 << 2;
 
 /**
  * Wrapper for the frame details information retreived from the VM. The frame
@@ -1285,63 +1284,63 @@
 FrameDetails.prototype.frameId = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsFrameIdIndex];
-};
+}
 
 
 FrameDetails.prototype.receiver = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsReceiverIndex];
-};
+}
 
 
 FrameDetails.prototype.func = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsFunctionIndex];
-};
+}
 
 
 FrameDetails.prototype.isConstructCall = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsConstructCallIndex];
-};
+}
 
 
 FrameDetails.prototype.isAtReturn = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsAtReturnIndex];
-};
+}
 
 
 FrameDetails.prototype.isDebuggerFrame = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagDebuggerFrameMask;
   return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-};
+}
 
 
 FrameDetails.prototype.isOptimizedFrame = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagOptimizedFrameMask;
   return (this.details_[kFrameDetailsFlagsIndex] & f) == f;
-};
+}
 
 
 FrameDetails.prototype.isInlinedFrame = function() {
   return this.inlinedFrameIndex() > 0;
-};
+}
 
 
 FrameDetails.prototype.inlinedFrameIndex = function() {
   %CheckExecutionState(this.break_id_);
   var f = kFrameDetailsFlagInlinedFrameIndexMask;
-  return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2;
-};
+  return (this.details_[kFrameDetailsFlagsIndex] & f) >> 2
+}
 
 
 FrameDetails.prototype.argumentCount = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsArgumentCountIndex];
-};
+}
 
 
 FrameDetails.prototype.argumentName = function(index) {
@@ -1349,9 +1348,9 @@
   if (index >= 0 && index < this.argumentCount()) {
     return this.details_[kFrameDetailsFirstDynamicIndex +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsNameIndex];
+                         kFrameDetailsNameIndex]
   }
-};
+}
 
 
 FrameDetails.prototype.argumentValue = function(index) {
@@ -1359,45 +1358,45 @@
   if (index >= 0 && index < this.argumentCount()) {
     return this.details_[kFrameDetailsFirstDynamicIndex +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsValueIndex];
+                         kFrameDetailsValueIndex]
   }
-};
+}
 
 
 FrameDetails.prototype.localCount = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsLocalCountIndex];
-};
+}
 
 
 FrameDetails.prototype.sourcePosition = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kFrameDetailsSourcePositionIndex];
-};
+}
 
 
 FrameDetails.prototype.localName = function(index) {
   %CheckExecutionState(this.break_id_);
   if (index >= 0 && index < this.localCount()) {
     var locals_offset = kFrameDetailsFirstDynamicIndex +
-                        this.argumentCount() * kFrameDetailsNameValueSize;
+                        this.argumentCount() * kFrameDetailsNameValueSize
     return this.details_[locals_offset +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsNameIndex];
+                         kFrameDetailsNameIndex]
   }
-};
+}
 
 
 FrameDetails.prototype.localValue = function(index) {
   %CheckExecutionState(this.break_id_);
   if (index >= 0 && index < this.localCount()) {
     var locals_offset = kFrameDetailsFirstDynamicIndex +
-                        this.argumentCount() * kFrameDetailsNameValueSize;
+                        this.argumentCount() * kFrameDetailsNameValueSize
     return this.details_[locals_offset +
                          index * kFrameDetailsNameValueSize +
-                         kFrameDetailsValueIndex];
+                         kFrameDetailsValueIndex]
   }
-};
+}
 
 
 FrameDetails.prototype.returnValue = function() {
@@ -1408,12 +1407,12 @@
   if (this.details_[kFrameDetailsAtReturnIndex]) {
     return this.details_[return_value_offset];
   }
-};
+}
 
 
 FrameDetails.prototype.scopeCount = function() {
   return %GetScopeCount(this.break_id_, this.frameId());
-};
+}
 
 
 /**
@@ -1576,8 +1575,7 @@
 };
 
 
-FrameMirror.prototype.evaluate = function(source, disable_break,
-                                          opt_context_object) {
+FrameMirror.prototype.evaluate = function(source, disable_break, opt_context_object) {
   var result = %DebugEvaluate(this.break_id_,
                               this.details_.frameId(),
                               this.details_.inlinedFrameIndex(),
@@ -1601,8 +1599,7 @@
     result += '[debugger]';
   } else {
     // If the receiver has a className which is 'global' don't display it.
-    var display_receiver =
-      !receiver.className || (receiver.className() != 'global');
+    var display_receiver = !receiver.className || receiver.className() != 'global';
     if (display_receiver) {
       result += receiver.toText();
     }
@@ -1664,7 +1661,7 @@
   }
 
   return result;
-};
+}
 
 
 FrameMirror.prototype.sourceAndPositionText = function() {
@@ -1696,13 +1693,13 @@
   }
 
   return result;
-};
+}
 
 
 FrameMirror.prototype.localsText = function() {
   // Format local variables.
   var result = '';
-  var locals_count = this.localCount();
+  var locals_count = this.localCount()
   if (locals_count > 0) {
     for (var i = 0; i < locals_count; ++i) {
       result += '      var ';
@@ -1714,7 +1711,7 @@
   }
 
   return result;
-};
+}
 
 
 FrameMirror.prototype.toText = function(opt_locals) {
@@ -1729,11 +1726,11 @@
     result += this.localsText();
   }
   return result;
-};
+}
 
 
-var kScopeDetailsTypeIndex = 0;
-var kScopeDetailsObjectIndex = 1;
+const kScopeDetailsTypeIndex = 0;
+const kScopeDetailsObjectIndex = 1;
 
 function ScopeDetails(frame, index) {
   this.break_id_ = frame.break_id_;
@@ -1747,13 +1744,13 @@
 ScopeDetails.prototype.type = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kScopeDetailsTypeIndex];
-};
+}
 
 
 ScopeDetails.prototype.object = function() {
   %CheckExecutionState(this.break_id_);
   return this.details_[kScopeDetailsObjectIndex];
-};
+}
 
 
 /**
@@ -1832,11 +1829,6 @@
 };
 
 
-ScriptMirror.prototype.setSource = function(source) {
-  %DebugSetScriptSource(this.script_, source);
-};
-
-
 ScriptMirror.prototype.lineOffset = function() {
   return this.script_.line_offset;
 };
@@ -1870,12 +1862,12 @@
 ScriptMirror.prototype.locationFromPosition = function(
     position, include_resource_offset) {
   return this.script_.locationFromPosition(position, include_resource_offset);
-};
+}
 
 
 ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) {
   return this.script_.sourceSlice(opt_from_line, opt_to_line);
-};
+}
 
 
 ScriptMirror.prototype.context = function() {
@@ -1915,7 +1907,7 @@
   }
   result += ')';
   return result;
-};
+}
 
 
 /**
@@ -1973,7 +1965,7 @@
  */
 JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
   return this.serialize_(mirror, true, true);
-};
+}
 
 
 /**
@@ -1986,7 +1978,7 @@
 JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
   var json = this.serialize_(mirror, false, true);
   return json;
-};
+}
 
 
 /**
@@ -2008,17 +2000,17 @@
   }
 
   return content;
-};
+}
 
 
 JSONProtocolSerializer.prototype.includeSource_ = function() {
   return this.options_ && this.options_.includeSource;
-};
+}
 
 
 JSONProtocolSerializer.prototype.inlineRefs_ = function() {
   return this.options_ && this.options_.inlineRefs;
-};
+}
 
 
 JSONProtocolSerializer.prototype.maxStringLength_ = function() {
@@ -2027,7 +2019,7 @@
     return kMaxProtocolStringLength;
   }
   return this.options_.maxStringLength;
-};
+}
 
 
 JSONProtocolSerializer.prototype.add_ = function(mirror) {
@@ -2040,7 +2032,7 @@
 
   // Add the mirror to the list of mirrors to be serialized.
   this.mirrors_.push(mirror);
-};
+}
 
 
 /**
@@ -2147,7 +2139,7 @@
       break;
 
     case PROPERTY_TYPE:
-      throw new Error('PropertyMirror cannot be serialized independeltly');
+      throw new Error('PropertyMirror cannot be serialized independeltly')
       break;
 
     case FRAME_TYPE:
@@ -2187,7 +2179,7 @@
           mirror.evalFromScript()) {
         content.evalFromScript =
             this.serializeReference(mirror.evalFromScript());
-        var evalFromLocation = mirror.evalFromLocation();
+        var evalFromLocation = mirror.evalFromLocation()
         if (evalFromLocation) {
           content.evalFromLocation = { line: evalFromLocation.line,
                                        column: evalFromLocation.column };
@@ -2211,7 +2203,7 @@
 
   // Create and return the JSON string.
   return content;
-};
+}
 
 
 /**
@@ -2286,7 +2278,7 @@
     }
   }
   content.properties = p;
-};
+}
 
 
 /**
@@ -2350,7 +2342,7 @@
     result.ref = propertyValue.handle();
   }
   return result;
-};
+}
 
 
 JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
@@ -2370,7 +2362,7 @@
   var x = new Array(mirror.argumentCount());
   for (var i = 0; i < mirror.argumentCount(); i++) {
     var arg = {};
-    var argument_name = mirror.argumentName(i);
+    var argument_name = mirror.argumentName(i)
     if (argument_name) {
       arg.name = argument_name;
     }
@@ -2400,7 +2392,7 @@
       index: i
     });
   }
-};
+}
 
 
 JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
@@ -2410,7 +2402,7 @@
   content.object = this.inlineRefs_() ?
                    this.serializeValue(mirror.scopeObject()) :
                    this.serializeReference(mirror.scopeObject());
-};
+}
 
 
 /**
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index d1620bf..a791dbb 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -109,7 +109,7 @@
       if (j != 0) {
         fprintf(fp, ",");
       }
-      fprintf(fp, "%u", static_cast<unsigned char>(at(j)));
+      fprintf(fp, "%d", at(j));
     }
   }
   char at(int i) { return data_[i]; }
@@ -312,7 +312,7 @@
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of the context.
-  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
+  HEAP->CollectAllGarbage(true);
   i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
   context.Dispose();
   CppByteSink sink(argv[1]);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 8eefb23..e9ca6c0 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -94,9 +94,6 @@
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayVerify();
       break;
-    case FREE_SPACE_TYPE:
-      FreeSpace::cast(this)->FreeSpaceVerify();
-      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
       break;
@@ -138,9 +135,6 @@
     case JS_VALUE_TYPE:
       JSValue::cast(this)->JSValueVerify();
       break;
-    case JS_DATE_TYPE:
-      JSDate::cast(this)->JSDateVerify();
-      break;
     case JS_FUNCTION_TYPE:
       JSFunction::cast(this)->JSFunctionVerify();
       break;
@@ -159,12 +153,6 @@
     case JS_ARRAY_TYPE:
       JSArray::cast(this)->JSArrayVerify();
       break;
-    case JS_SET_TYPE:
-      JSSet::cast(this)->JSSetVerify();
-      break;
-    case JS_MAP_TYPE:
-      JSMap::cast(this)->JSMapVerify();
-      break;
     case JS_WEAK_MAP_TYPE:
       JSWeakMap::cast(this)->JSWeakMapVerify();
       break;
@@ -219,11 +207,6 @@
 }
 
 
-void FreeSpace::FreeSpaceVerify() {
-  ASSERT(IsFreeSpace());
-}
-
-
 void ExternalPixelArray::ExternalPixelArrayVerify() {
   ASSERT(IsExternalPixelArray());
 }
@@ -272,20 +255,12 @@
 void JSObject::JSObjectVerify() {
   VerifyHeapPointer(properties());
   VerifyHeapPointer(elements());
-
-  if (GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
-    ASSERT(this->elements()->IsFixedArray());
-    ASSERT(this->elements()->length() >= 2);
-  }
-
   if (HasFastProperties()) {
     CHECK_EQ(map()->unused_property_fields(),
              (map()->inobject_properties() + properties()->length() -
               map()->NextFreePropertyIndex()));
   }
-  ASSERT_EQ((map()->has_fast_elements() ||
-             map()->has_fast_smi_only_elements() ||
-             (elements() == GetHeap()->empty_fixed_array())),
+  ASSERT_EQ(map()->has_fast_elements(),
             (elements()->map() == GetHeap()->fixed_array_map() ||
              elements()->map() == GetHeap()->fixed_cow_array_map()));
   ASSERT(map()->has_fast_elements() == HasFastElements());
@@ -329,18 +304,6 @@
 }
 
 
-void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
-  VerifyObjectField(kIcTotalCountOffset);
-  VerifyObjectField(kIcWithTypeinfoCountOffset);
-  VerifyHeapPointer(type_feedback_cells());
-}
-
-
-void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
-  VerifySmiField(kAliasedContextSlot);
-}
-
-
 void FixedArray::FixedArrayVerify() {
   for (int i = 0; i < length(); i++) {
     Object* e = get(i);
@@ -359,8 +322,7 @@
       double value = get_scalar(i);
       ASSERT(!isnan(value) ||
              (BitCast<uint64_t>(value) ==
-              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
-             ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
+              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
     }
   }
 }
@@ -374,53 +336,6 @@
 }
 
 
-void JSDate::JSDateVerify() {
-  if (value()->IsHeapObject()) {
-    VerifyHeapPointer(value());
-  }
-  CHECK(value()->IsUndefined() || value()->IsSmi() || value()->IsHeapNumber());
-  CHECK(year()->IsUndefined() || year()->IsSmi() || year()->IsNaN());
-  CHECK(month()->IsUndefined() || month()->IsSmi() || month()->IsNaN());
-  CHECK(day()->IsUndefined() || day()->IsSmi() || day()->IsNaN());
-  CHECK(weekday()->IsUndefined() || weekday()->IsSmi() || weekday()->IsNaN());
-  CHECK(hour()->IsUndefined() || hour()->IsSmi() || hour()->IsNaN());
-  CHECK(min()->IsUndefined() || min()->IsSmi() || min()->IsNaN());
-  CHECK(sec()->IsUndefined() || sec()->IsSmi() || sec()->IsNaN());
-  CHECK(cache_stamp()->IsUndefined() ||
-        cache_stamp()->IsSmi() ||
-        cache_stamp()->IsNaN());
-
-  if (month()->IsSmi()) {
-    int month = Smi::cast(this->month())->value();
-    CHECK(0 <= month && month <= 11);
-  }
-  if (day()->IsSmi()) {
-    int day = Smi::cast(this->day())->value();
-    CHECK(1 <= day && day <= 31);
-  }
-  if (hour()->IsSmi()) {
-    int hour = Smi::cast(this->hour())->value();
-    CHECK(0 <= hour && hour <= 23);
-  }
-  if (min()->IsSmi()) {
-    int min = Smi::cast(this->min())->value();
-    CHECK(0 <= min && min <= 59);
-  }
-  if (sec()->IsSmi()) {
-    int sec = Smi::cast(this->sec())->value();
-    CHECK(0 <= sec && sec <= 59);
-  }
-  if (weekday()->IsSmi()) {
-    int weekday = Smi::cast(this->weekday())->value();
-    CHECK(0 <= weekday && weekday <= 6);
-  }
-  if (cache_stamp()->IsSmi()) {
-    CHECK(Smi::cast(cache_stamp())->value() <=
-          Smi::cast(Isolate::Current()->date_cache()->stamp())->value());
-  }
-}
-
-
 void JSMessageObject::JSMessageObjectVerify() {
   CHECK(IsJSMessageObject());
   CHECK(type()->IsString());
@@ -452,7 +367,7 @@
   CHECK(this->first()->IsString());
   CHECK(this->second() == GetHeap()->empty_string() ||
         this->second()->IsString());
-  CHECK(this->length() >= ConsString::kMinLength);
+  CHECK(this->length() >= String::kMinNonFlatLength);
   if (this->IsFlat()) {
     // A flat cons can only be created by String::SlowTryFlatten.
     // Afterwards, the first part may be externalized.
@@ -472,7 +387,6 @@
   CHECK(IsJSFunction());
   VerifyObjectField(kPrototypeOrInitialMapOffset);
   VerifyObjectField(kNextFunctionLinkOffset);
-  CHECK(code()->IsCode());
   CHECK(next_function_link()->IsUndefined() ||
         next_function_link()->IsJSFunction());
 }
@@ -549,7 +463,6 @@
 void Code::CodeVerify() {
   CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
                   kCodeAlignment));
-  relocation_info()->Verify();
   Address last_gc_pc = NULL;
   for (RelocIterator it(this); !it.done(); it.next()) {
     it.rinfo()->Verify();
@@ -571,27 +484,11 @@
 }
 
 
-void JSSet::JSSetVerify() {
-  CHECK(IsJSSet());
-  JSObjectVerify();
-  VerifyHeapPointer(table());
-  ASSERT(table()->IsHashTable() || table()->IsUndefined());
-}
-
-
-void JSMap::JSMapVerify() {
-  CHECK(IsJSMap());
-  JSObjectVerify();
-  VerifyHeapPointer(table());
-  ASSERT(table()->IsHashTable() || table()->IsUndefined());
-}
-
-
 void JSWeakMap::JSWeakMapVerify() {
   CHECK(IsJSWeakMap());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  ASSERT(table()->IsHashTable() || table()->IsUndefined());
+  ASSERT(table()->IsHashTable());
 }
 
 
@@ -638,14 +535,13 @@
 
 
 void JSProxy::JSProxyVerify() {
-  CHECK(IsJSProxy());
+  ASSERT(IsJSProxy());
   VerifyPointer(handler());
-  ASSERT(hash()->IsSmi() || hash()->IsUndefined());
 }
 
 
 void JSFunctionProxy::JSFunctionProxyVerify() {
-  CHECK(IsJSFunctionProxy());
+  ASSERT(IsJSFunctionProxy());
   JSProxyVerify();
   VerifyPointer(call_trap());
   VerifyPointer(construct_trap());
@@ -667,13 +563,6 @@
 }
 
 
-void AccessorPair::AccessorPairVerify() {
-  CHECK(IsAccessorPair());
-  VerifyPointer(getter());
-  VerifyPointer(setter());
-}
-
-
 void AccessCheckInfo::AccessCheckInfoVerify() {
   CHECK(IsAccessCheckInfo());
   VerifyPointer(named_callback());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 78578cc..e7b6a34 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,10 +43,7 @@
 #include "isolate.h"
 #include "property.h"
 #include "spaces.h"
-#include "store-buffer.h"
 #include "v8memory.h"
-#include "factory.h"
-#include "incremental-marking.h"
 
 namespace v8 {
 namespace internal {
@@ -67,13 +64,6 @@
 }
 
 
-#define TYPE_CHECKER(type, instancetype)                                \
-  bool Object::Is##type() {                                             \
-  return Object::IsHeapObject() &&                                      \
-      HeapObject::cast(this)->map()->instance_type() == instancetype;   \
-  }
-
-
 #define CAST_ACCESSOR(type)                     \
   type* type::cast(Object* object) {            \
     ASSERT(object->Is##type());                 \
@@ -90,19 +80,19 @@
   type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
   void holder::set_##name(type* value, WriteBarrierMode mode) {         \
     WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);    \
+    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);           \
   }
 
 
-// Getter that returns a tagged Smi and setter that writes a tagged Smi.
-#define ACCESSORS_TO_SMI(holder, name, offset)                          \
-  Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); }   \
-  void holder::set_##name(Smi* value, WriteBarrierMode mode) {          \
+// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
+#define ACCESSORS_GCSAFE(holder, name, type, offset)                    \
+  type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
+  void holder::set_##name(type* value, WriteBarrierMode mode) {         \
     WRITE_FIELD(this, offset, value);                                   \
+    CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode);                \
   }
 
 
-// Getter that returns a Smi as an int and writes an int as a Smi.
 #define SMI_ACCESSORS(holder, name, offset)             \
   int holder::name() {                                  \
     Object* value = READ_FIELD(this, offset);           \
@@ -128,23 +118,6 @@
   }
 
 
-bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
-                                         ElementsKind to_kind) {
-  if (to_kind == FAST_ELEMENTS) {
-    return from_kind == FAST_SMI_ONLY_ELEMENTS ||
-        from_kind == FAST_DOUBLE_ELEMENTS;
-  } else {
-    return to_kind == FAST_DOUBLE_ELEMENTS &&
-        from_kind == FAST_SMI_ONLY_ELEMENTS;
-  }
-}
-
-
-bool Object::IsFixedArrayBase() {
-  return IsFixedArray() || IsFixedDoubleArray();
-}
-
-
 bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
   // There is a constraint on the object; check.
   if (!this->IsJSObject()) return false;
@@ -174,15 +147,12 @@
 }
 
 
-bool Object::NonFailureIsHeapObject() {
-  ASSERT(!this->IsFailure());
-  return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
+bool Object::IsHeapNumber() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
 }
 
 
-TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-
-
 bool Object::IsString() {
   return Object::IsHeapObject()
     && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
@@ -195,13 +165,6 @@
 }
 
 
-bool Object::IsSpecFunction() {
-  if (!Object::IsHeapObject()) return false;
-  InstanceType type = HeapObject::cast(this)->map()->instance_type();
-  return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
-}
-
-
 bool Object::IsSymbol() {
   if (!this->IsHeapObject()) return false;
   uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -433,18 +396,17 @@
 }
 
 
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
-TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-
-
-bool Object::IsFiller() {
-  if (!Object::IsHeapObject()) return false;
-  InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
-  return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
+bool Object::IsByteArray() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == BYTE_ARRAY_TYPE;
 }
 
 
-TYPE_CHECKER(ExternalPixelArray, EXTERNAL_PIXEL_ARRAY_TYPE)
+bool Object::IsExternalPixelArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+          EXTERNAL_PIXEL_ARRAY_TYPE;
+}
 
 
 bool Object::IsExternalArray() {
@@ -457,14 +419,60 @@
 }
 
 
-TYPE_CHECKER(ExternalByteArray, EXTERNAL_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedByteArray, EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)
-TYPE_CHECKER(ExternalShortArray, EXTERNAL_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedShortArray, EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalIntArray, EXTERNAL_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalUnsignedIntArray, EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalFloatArray, EXTERNAL_FLOAT_ARRAY_TYPE)
-TYPE_CHECKER(ExternalDoubleArray, EXTERNAL_DOUBLE_ARRAY_TYPE)
+bool Object::IsExternalByteArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedByteArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalShortArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_SHORT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedShortArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalIntArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_INT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedIntArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalFloatArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_FLOAT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalDoubleArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_DOUBLE_ARRAY_TYPE;
+}
 
 
 bool MaybeObject::IsFailure() {
@@ -501,34 +509,59 @@
 
 
 bool Object::IsJSReceiver() {
-  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   return IsHeapObject() &&
       HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
 }
 
 
 bool Object::IsJSObject() {
-  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
-  return IsHeapObject() &&
-      HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
+  return IsJSReceiver() && !IsJSProxy();
 }
 
 
 bool Object::IsJSProxy() {
-  if (!Object::IsHeapObject()) return false;
-  InstanceType type = HeapObject::cast(this)->map()->instance_type();
-  return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
+  return Object::IsHeapObject() &&
+     (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
+      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
 }
 
 
-TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
-TYPE_CHECKER(Map, MAP_TYPE)
-TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+bool Object::IsJSFunctionProxy() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE;
+}
+
+
+bool Object::IsJSWeakMap() {
+  return Object::IsJSObject() &&
+      HeapObject::cast(this)->map()->instance_type() == JS_WEAK_MAP_TYPE;
+}
+
+
+bool Object::IsJSContextExtensionObject() {
+  return IsHeapObject()
+      && (HeapObject::cast(this)->map()->instance_type() ==
+          JS_CONTEXT_EXTENSION_OBJECT_TYPE);
+}
+
+
+bool Object::IsMap() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == MAP_TYPE;
+}
+
+
+bool Object::IsFixedArray() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == FIXED_ARRAY_TYPE;
+}
+
+
+bool Object::IsFixedDoubleArray() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() ==
+          FIXED_DOUBLE_ARRAY_TYPE;
+}
 
 
 bool Object::IsDescriptorArray() {
@@ -563,16 +596,6 @@
 }
 
 
-bool Object::IsTypeFeedbackCells() {
-  if (!IsFixedArray()) return false;
-  // There's actually no way to see the difference between a fixed array and
-  // a cache cells array.  Since this is used for asserts we can check that
-  // the length is plausible though.
-  if (FixedArray::cast(this)->length() % 2 != 0) return false;
-  return true;
-}
-
-
 bool Object::IsContext() {
   if (Object::IsHeapObject()) {
     Map* map = HeapObject::cast(this)->map();
@@ -594,14 +617,17 @@
 }
 
 
-bool Object::IsScopeInfo() {
+bool Object::IsSerializedScopeInfo() {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
-      HeapObject::cast(this)->GetHeap()->scope_info_map();
+      HeapObject::cast(this)->GetHeap()->serialized_scope_info_map();
 }
 
 
-TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
+bool Object::IsJSFunction() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_TYPE;
+}
 
 
 template <> inline bool Is<JSFunction>(Object* obj) {
@@ -609,13 +635,44 @@
 }
 
 
-TYPE_CHECKER(Code, CODE_TYPE)
-TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
-TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
-TYPE_CHECKER(JSDate, JS_DATE_TYPE)
-TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
+bool Object::IsCode() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == CODE_TYPE;
+}
+
+
+bool Object::IsOddball() {
+  ASSERT(HEAP->is_safe_to_read_maps());
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
+}
+
+
+bool Object::IsJSGlobalPropertyCell() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type()
+      == JS_GLOBAL_PROPERTY_CELL_TYPE;
+}
+
+
+bool Object::IsSharedFunctionInfo() {
+  return Object::IsHeapObject() &&
+      (HeapObject::cast(this)->map()->instance_type() ==
+       SHARED_FUNCTION_INFO_TYPE);
+}
+
+
+bool Object::IsJSValue() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == JS_VALUE_TYPE;
+}
+
+
+bool Object::IsJSMessageObject() {
+  return Object::IsHeapObject()
+      && (HeapObject::cast(this)->map()->instance_type() ==
+          JS_MESSAGE_OBJECT_TYPE);
+}
 
 
 bool Object::IsStringWrapper() {
@@ -623,7 +680,10 @@
 }
 
 
-TYPE_CHECKER(Foreign, FOREIGN_TYPE)
+bool Object::IsForeign() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == FOREIGN_TYPE;
+}
 
 
 bool Object::IsBoolean() {
@@ -632,8 +692,16 @@
 }
 
 
-TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
-TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
+bool Object::IsJSArray() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == JS_ARRAY_TYPE;
+}
+
+
+bool Object::IsJSRegExp() {
+  return Object::IsHeapObject()
+      && HeapObject::cast(this)->map()->instance_type() == JS_REGEXP_TYPE;
+}
 
 
 template <> inline bool Is<JSArray>(Object* obj) {
@@ -670,10 +738,7 @@
     return false;
   }
 #ifdef DEBUG
-  if (FLAG_verify_heap) {
-    reinterpret_cast<JSFunctionResultCache*>(this)->
-        JSFunctionResultCacheVerify();
-  }
+  reinterpret_cast<JSFunctionResultCache*>(this)->JSFunctionResultCacheVerify();
 #endif
   return true;
 }
@@ -685,9 +750,7 @@
     return false;
   }
 #ifdef DEBUG
-  if (FLAG_verify_heap) {
-    reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
-  }
+  reinterpret_cast<NormalizedMapCache*>(this)->NormalizedMapCacheVerify();
 #endif
   return true;
 }
@@ -736,8 +799,18 @@
 }
 
 
-TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
+bool Object::IsJSGlobalObject() {
+  return IsHeapObject() &&
+      (HeapObject::cast(this)->map()->instance_type() ==
+       JS_GLOBAL_OBJECT_TYPE);
+}
+
+
+bool Object::IsJSBuiltinsObject() {
+  return IsHeapObject() &&
+      (HeapObject::cast(this)->map()->instance_type() ==
+       JS_BUILTINS_OBJECT_TYPE);
+}
 
 
 bool Object::IsUndetectableObject() {
@@ -810,11 +883,6 @@
 }
 
 
-bool Object::IsNaN() {
-  return this->IsHeapNumber() && isnan(HeapNumber::cast(this)->value());
-}
-
-
 MaybeObject* Object::ToSmi() {
   if (IsSmi()) return this;
   if (IsHeapNumber()) {
@@ -871,20 +939,21 @@
 #define WRITE_FIELD(p, offset, value) \
   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
 
-#define WRITE_BARRIER(heap, object, offset, value)                      \
-  heap->incremental_marking()->RecordWrite(                             \
-      object, HeapObject::RawField(object, offset), value);             \
-  if (heap->InNewSpace(value)) {                                        \
-    heap->RecordWrite(object->address(), offset);                       \
-  }
+// TODO(isolates): Pass heap in to these macros.
+#define WRITE_BARRIER(object, offset) \
+  object->GetHeap()->RecordWrite(object->address(), offset);
 
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode)    \
-  if (mode == UPDATE_WRITE_BARRIER) {                                   \
-    heap->incremental_marking()->RecordWrite(                           \
-      object, HeapObject::RawField(object, offset), value);             \
-    if (heap->InNewSpace(value)) {                                      \
-      heap->RecordWrite(object->address(), offset);                     \
-    }                                                                   \
+// CONDITIONAL_WRITE_BARRIER must be issued after the actual
+// write due to the assert validating the written value.
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
+  if (mode == UPDATE_WRITE_BARRIER) { \
+    heap->RecordWrite(object->address(), offset); \
+  } else { \
+    ASSERT(mode == SKIP_WRITE_BARRIER); \
+    ASSERT(heap->InNewSpace(object) || \
+           !heap->InNewSpace(READ_FIELD(object, offset)) || \
+           Page::FromAddress(object->address())->           \
+               IsRegionDirty(object->address() + offset));  \
   }
 
 #ifndef V8_TARGET_ARCH_MIPS
@@ -905,6 +974,7 @@
   #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
 #endif  // V8_TARGET_ARCH_MIPS
 
+
 #ifndef V8_TARGET_ARCH_MIPS
   #define WRITE_DOUBLE_FIELD(p, offset, value) \
     (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
@@ -944,12 +1014,6 @@
 #define WRITE_UINT32_FIELD(p, offset, value) \
   (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
 
-#define READ_INT64_FIELD(p, offset) \
-  (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT64_FIELD(p, offset, value) \
-  (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
-
 #define READ_SHORT_FIELD(p, offset) \
   (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
 
@@ -1105,6 +1169,91 @@
 }
 
 
+bool MapWord::IsMarked() {
+  return (value_ & kMarkingMask) == 0;
+}
+
+
+void MapWord::SetMark() {
+  value_ &= ~kMarkingMask;
+}
+
+
+void MapWord::ClearMark() {
+  value_ |= kMarkingMask;
+}
+
+
+bool MapWord::IsOverflowed() {
+  return (value_ & kOverflowMask) != 0;
+}
+
+
+void MapWord::SetOverflow() {
+  value_ |= kOverflowMask;
+}
+
+
+void MapWord::ClearOverflow() {
+  value_ &= ~kOverflowMask;
+}
+
+
+MapWord MapWord::EncodeAddress(Address map_address, int offset) {
+  // Offset is the distance in live bytes from the first live object in the
+  // same page. The offset between two objects in the same page should not
+  // exceed the object area size of a page.
+  ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
+
+  uintptr_t compact_offset = offset >> kObjectAlignmentBits;
+  ASSERT(compact_offset < (1 << kForwardingOffsetBits));
+
+  Page* map_page = Page::FromAddress(map_address);
+  ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
+
+  uintptr_t map_page_offset =
+      map_page->Offset(map_address) >> kMapAlignmentBits;
+
+  uintptr_t encoding =
+      (compact_offset << kForwardingOffsetShift) |
+      (map_page_offset << kMapPageOffsetShift) |
+      (map_page->mc_page_index << kMapPageIndexShift);
+  return MapWord(encoding);
+}
+
+
+Address MapWord::DecodeMapAddress(MapSpace* map_space) {
+  int map_page_index =
+      static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
+  ASSERT_MAP_PAGE_INDEX(map_page_index);
+
+  int map_page_offset = static_cast<int>(
+      ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
+      kMapAlignmentBits);
+
+  return (map_space->PageAddress(map_page_index) + map_page_offset);
+}
+
+
+int MapWord::DecodeOffset() {
+  // The offset field is represented in the kForwardingOffsetBits
+  // most-significant bits.
+  uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
+  ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
+  return static_cast<int>(offset);
+}
+
+
+MapWord MapWord::FromEncodedAddress(Address address) {
+  return MapWord(reinterpret_cast<uintptr_t>(address));
+}
+
+
+Address MapWord::ToEncodedAddress() {
+  return reinterpret_cast<Address>(value_);
+}
+
+
 #ifdef DEBUG
 void HeapObject::VerifyObjectField(int offset) {
   VerifyPointer(READ_FIELD(this, offset));
@@ -1117,11 +1266,12 @@
 
 
 Heap* HeapObject::GetHeap() {
-  Heap* heap =
-      MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
+  // During GC, the map pointer in HeapObject is used in various ways that
+  // prevent us from retrieving Heap from the map.
+  // Assert that we are not in GC, implement GC code in a way that it doesn't
+  // pull heap from the map.
+  ASSERT(HEAP->is_safe_to_read_maps());
+  return map()->heap();
 }
 
 
@@ -1137,17 +1287,6 @@
 
 void HeapObject::set_map(Map* value) {
   set_map_word(MapWord::FromMap(value));
-  if (value != NULL) {
-    // TODO(1600) We are passing NULL as a slot because maps can never be on
-    // evacuation candidate.
-    value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
-  }
-}
-
-
-// Unsafe accessor omitting write barrier.
-void HeapObject::set_map_no_write_barrier(Map* value) {
-  set_map_word(MapWord::FromMap(value));
 }
 
 
@@ -1190,6 +1329,47 @@
 }
 
 
+bool HeapObject::IsMarked() {
+  return map_word().IsMarked();
+}
+
+
+void HeapObject::SetMark() {
+  ASSERT(!IsMarked());
+  MapWord first_word = map_word();
+  first_word.SetMark();
+  set_map_word(first_word);
+}
+
+
+void HeapObject::ClearMark() {
+  ASSERT(IsMarked());
+  MapWord first_word = map_word();
+  first_word.ClearMark();
+  set_map_word(first_word);
+}
+
+
+bool HeapObject::IsOverflowed() {
+  return map_word().IsOverflowed();
+}
+
+
+void HeapObject::SetOverflow() {
+  MapWord first_word = map_word();
+  first_word.SetOverflow();
+  set_map_word(first_word);
+}
+
+
+void HeapObject::ClearOverflow() {
+  ASSERT(IsOverflowed());
+  MapWord first_word = map_word();
+  first_word.ClearOverflow();
+  set_map_word(first_word);
+}
+
+
 double HeapNumber::value() {
   return READ_DOUBLE_FIELD(this, kValueOffset);
 }
@@ -1214,165 +1394,22 @@
 ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
 
 
-Object** FixedArray::GetFirstElementAddress() {
-  return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
-}
-
-
-bool FixedArray::ContainsOnlySmisOrHoles() {
-  Object* the_hole = GetHeap()->the_hole_value();
-  Object** current = GetFirstElementAddress();
-  for (int i = 0; i < length(); ++i) {
-    Object* candidate = *current++;
-    if (!candidate->IsSmi() && candidate != the_hole) return false;
-  }
-  return true;
-}
-
-
 FixedArrayBase* JSObject::elements() {
   Object* array = READ_FIELD(this, kElementsOffset);
+  ASSERT(array->HasValidElements());
   return static_cast<FixedArrayBase*>(array);
 }
 
-void JSObject::ValidateSmiOnlyElements() {
-#if DEBUG
-  if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
-    Heap* heap = GetHeap();
-    // Don't use elements, since integrity checks will fail if there
-    // are filler pointers in the array.
-    FixedArray* fixed_array =
-        reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
-    Map* map = fixed_array->map();
-    // Arrays that have been shifted in place can't be verified.
-    if (map != heap->raw_unchecked_one_pointer_filler_map() &&
-        map != heap->raw_unchecked_two_pointer_filler_map() &&
-        map != heap->free_space_map()) {
-      for (int i = 0; i < fixed_array->length(); i++) {
-        Object* current = fixed_array->get(i);
-        ASSERT(current->IsSmi() || current->IsTheHole());
-      }
-    }
-  }
-#endif
-}
-
-
-MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
-#if DEBUG
-  ValidateSmiOnlyElements();
-#endif
-  if ((map()->elements_kind() != FAST_ELEMENTS)) {
-    return TransitionElementsKind(FAST_ELEMENTS);
-  }
-  return this;
-}
-
-
-MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
-                                                uint32_t count,
-                                                EnsureElementsMode mode) {
-  ElementsKind current_kind = map()->elements_kind();
-  ElementsKind target_kind = current_kind;
-  ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
-  if (current_kind == FAST_ELEMENTS) return this;
-
-  Heap* heap = GetHeap();
-  Object* the_hole = heap->the_hole_value();
-  Object* heap_number_map = heap->heap_number_map();
-  for (uint32_t i = 0; i < count; ++i) {
-    Object* current = *objects++;
-    if (!current->IsSmi() && current != the_hole) {
-      if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
-          HeapObject::cast(current)->map() == heap_number_map) {
-        target_kind = FAST_DOUBLE_ELEMENTS;
-      } else {
-        target_kind = FAST_ELEMENTS;
-        break;
-      }
-    }
-  }
-
-  if (target_kind != current_kind) {
-    return TransitionElementsKind(target_kind);
-  }
-  return this;
-}
-
-
-MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
-                                                EnsureElementsMode mode) {
-  if (elements->map() != GetHeap()->fixed_double_array_map()) {
-    ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
-           elements->map() == GetHeap()->fixed_cow_array_map());
-    if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
-      mode = DONT_ALLOW_DOUBLE_ELEMENTS;
-    }
-    Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
-    return EnsureCanContainElements(objects, elements->length(), mode);
-  }
-
-  ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
-  if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
-    return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
-  }
-
-  return this;
-}
-
-
-MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
-                                                ElementsKind to_kind) {
-  Map* current_map = map();
-  ElementsKind from_kind = current_map->elements_kind();
-
-  if (from_kind == to_kind) return current_map;
-
-  Context* global_context = isolate->context()->global_context();
-  if (current_map == global_context->smi_js_array_map()) {
-    if (to_kind == FAST_ELEMENTS) {
-      return global_context->object_js_array_map();
-    } else {
-      if (to_kind == FAST_DOUBLE_ELEMENTS) {
-        return global_context->double_js_array_map();
-      } else {
-        ASSERT(to_kind == DICTIONARY_ELEMENTS);
-      }
-    }
-  }
-  return GetElementsTransitionMapSlow(to_kind);
-}
-
-
-void JSObject::set_map_and_elements(Map* new_map,
-                                    FixedArrayBase* value,
-                                    WriteBarrierMode mode) {
-  ASSERT(value->HasValidElements());
-#ifdef DEBUG
-  ValidateSmiOnlyElements();
-#endif
-  if (new_map != NULL) {
-    if (mode == UPDATE_WRITE_BARRIER) {
-      set_map(new_map);
-    } else {
-      ASSERT(mode == SKIP_WRITE_BARRIER);
-      set_map_no_write_barrier(new_map);
-    }
-  }
-  ASSERT((map()->has_fast_elements() ||
-          map()->has_fast_smi_only_elements() ||
-          (value == GetHeap()->empty_fixed_array())) ==
-         (value->map() == GetHeap()->fixed_array_map() ||
-          value->map() == GetHeap()->fixed_cow_array_map()));
-  ASSERT((value == GetHeap()->empty_fixed_array()) ||
-         (map()->has_fast_double_elements() == value->IsFixedDoubleArray()));
-  WRITE_FIELD(this, kElementsOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
-}
-
 
 void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
-  set_map_and_elements(NULL, value, mode);
+  ASSERT(map()->has_fast_elements() ==
+         (value->map() == GetHeap()->fixed_array_map() ||
+          value->map() == GetHeap()->fixed_cow_array_map()));
+  ASSERT(map()->has_fast_double_elements() ==
+         value->IsFixedDoubleArray());
+  ASSERT(value->HasValidElements());
+  WRITE_FIELD(this, kElementsOffset, value);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
 }
 
 
@@ -1383,7 +1420,7 @@
 
 
 void JSObject::initialize_elements() {
-  ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements());
+  ASSERT(map()->has_fast_elements());
   ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
   WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
 }
@@ -1391,12 +1428,9 @@
 
 MaybeObject* JSObject::ResetElements() {
   Object* obj;
-  ElementsKind elements_kind = FLAG_smi_only_arrays
-      ? FAST_SMI_ONLY_ELEMENTS
-      : FAST_ELEMENTS;
-  MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
-                                                    elements_kind);
-  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
   set_map(Map::cast(obj));
   initialize_elements();
   return this;
@@ -1408,12 +1442,12 @@
 
 
 byte Oddball::kind() {
-  return Smi::cast(READ_FIELD(this, kKindOffset))->value();
+  return READ_BYTE_FIELD(this, kKindOffset);
 }
 
 
 void Oddball::set_kind(byte value) {
-  WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
+  WRITE_BYTE_FIELD(this, kKindOffset, value);
 }
 
 
@@ -1446,14 +1480,12 @@
       return JSFunction::kSize;
     case JS_VALUE_TYPE:
       return JSValue::kSize;
-    case JS_DATE_TYPE:
-      return JSDate::kSize;
     case JS_ARRAY_TYPE:
-      return JSArray::kSize;
+      return JSValue::kSize;
     case JS_WEAK_MAP_TYPE:
       return JSWeakMap::kSize;
     case JS_REGEXP_TYPE:
-      return JSRegExp::kSize;
+      return JSValue::kSize;
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
       return JSObject::kHeaderSize;
     case JS_MESSAGE_OBJECT_TYPE:
@@ -1496,17 +1528,7 @@
   // to adjust the index here.
   int offset = GetHeaderSize() + (kPointerSize * index);
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(GetHeap(), this, offset, value);
-}
-
-
-void JSObject::SetInternalField(int index, Smi* value) {
-  ASSERT(index < GetInternalFieldCount() && index >= 0);
-  // Internal objects do follow immediately after the header, whereas in-object
-  // properties are at the end of the object. Therefore there is no need
-  // to adjust the index here.
-  int offset = GetHeaderSize() + (kPointerSize * index);
-  WRITE_FIELD(this, offset, value);
+  WRITE_BARRIER(this, offset);
 }
 
 
@@ -1532,7 +1554,7 @@
   if (index < 0) {
     int offset = map()->instance_size() + (index * kPointerSize);
     WRITE_FIELD(this, offset, value);
-    WRITE_BARRIER(GetHeap(), this, offset, value);
+    WRITE_BARRIER(this, offset);
   } else {
     ASSERT(index < properties()->length());
     properties()->set(index, value);
@@ -1566,32 +1588,16 @@
   ASSERT(index < 0);
   int offset = map()->instance_size() + (index * kPointerSize);
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
   return value;
 }
 
 
 
-void JSObject::InitializeBody(Map* map,
-                              Object* pre_allocated_value,
-                              Object* filler_value) {
-  ASSERT(!filler_value->IsHeapObject() ||
-         !GetHeap()->InNewSpace(filler_value));
-  ASSERT(!pre_allocated_value->IsHeapObject() ||
-         !GetHeap()->InNewSpace(pre_allocated_value));
-  int size = map->instance_size();
-  int offset = kHeaderSize;
-  if (filler_value != pre_allocated_value) {
-    int pre_allocated = map->pre_allocated_property_fields();
-    ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
-    for (int i = 0; i < pre_allocated; i++) {
-      WRITE_FIELD(this, offset, pre_allocated_value);
-      offset += kPointerSize;
-    }
-  }
-  while (offset < size) {
-    WRITE_FIELD(this, offset, filler_value);
-    offset += kPointerSize;
+void JSObject::InitializeBody(int object_size, Object* value) {
+  ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
+  for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
+    WRITE_FIELD(this, offset, value);
   }
 }
 
@@ -1677,7 +1683,7 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(GetHeap(), this, offset, value);
+  WRITE_BARRIER(this, offset);
 }
 
 
@@ -1707,12 +1713,6 @@
   return result;
 }
 
-int64_t FixedDoubleArray::get_representation(int index) {
-  ASSERT(map() != HEAP->fixed_cow_array_map() &&
-         map() != HEAP->fixed_array_map());
-  ASSERT(index >= 0 && index < this->length());
-  return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
-}
 
 MaybeObject* FixedDoubleArray::get(int index) {
   if (is_the_hole(index)) {
@@ -1746,10 +1746,67 @@
 }
 
 
+void FixedDoubleArray::Initialize(FixedDoubleArray* from) {
+  int old_length = from->length();
+  ASSERT(old_length < length());
+  if (old_length * kDoubleSize >= OS::kMinComplexMemCopy) {
+    OS::MemCopy(FIELD_ADDR(this, kHeaderSize),
+                FIELD_ADDR(from, kHeaderSize),
+                old_length * kDoubleSize);
+  } else {
+    for (int i = 0; i < old_length; ++i) {
+      if (from->is_the_hole(i)) {
+        set_the_hole(i);
+      } else {
+        set(i, from->get_scalar(i));
+      }
+    }
+  }
+  int offset = kHeaderSize + old_length * kDoubleSize;
+  for (int current = from->length(); current < length(); ++current) {
+    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+    offset += kDoubleSize;
+  }
+}
+
+
+void FixedDoubleArray::Initialize(FixedArray* from) {
+  int old_length = from->length();
+  ASSERT(old_length < length());
+  for (int i = 0; i < old_length; i++) {
+    Object* hole_or_object = from->get(i);
+    if (hole_or_object->IsTheHole()) {
+      set_the_hole(i);
+    } else {
+      set(i, hole_or_object->Number());
+    }
+  }
+  int offset = kHeaderSize + old_length * kDoubleSize;
+  for (int current = from->length(); current < length(); ++current) {
+    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+    offset += kDoubleSize;
+  }
+}
+
+
+void FixedDoubleArray::Initialize(SeededNumberDictionary* from) {
+  int offset = kHeaderSize;
+  for (int current = 0; current < length(); ++current) {
+    WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
+    offset += kDoubleSize;
+  }
+  for (int i = 0; i < from->Capacity(); i++) {
+    Object* key = from->KeyAt(i);
+    if (key->IsNumber()) {
+      uint32_t entry = static_cast<uint32_t>(key->Number());
+      set(entry, from->ValueAt(i)->Number());
+    }
+  }
+}
+
+
 WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
-  Heap* heap = GetHeap();
-  if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
-  if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
+  if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
   return UPDATE_WRITE_BARRIER;
 }
 
@@ -1761,27 +1818,11 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
 }
 
 
-void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
-                                              int index,
-                                              Object* value) {
-  ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
-  ASSERT(index >= 0 && index < array->length());
-  int offset = kHeaderSize + index * kPointerSize;
-  WRITE_FIELD(array, offset, value);
-  Heap* heap = array->GetHeap();
-  if (heap->InNewSpace(value)) {
-    heap->RecordWrite(array->address(), offset);
-  }
-}
-
-
-void FixedArray::NoWriteBarrierSet(FixedArray* array,
-                                   int index,
-                                   Object* value) {
+void FixedArray::fast_set(FixedArray* array, int index, Object* value) {
   ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
   ASSERT(index >= 0 && index < array->length());
   ASSERT(!HEAP->InNewSpace(value));
@@ -1838,7 +1879,7 @@
                                WriteBarrierMode mode) {
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
+  CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
 }
 
 
@@ -1873,12 +1914,10 @@
 }
 
 
-void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
-                                                    int first,
-                                                    int second) {
+void DescriptorArray::fast_swap(FixedArray* array, int first, int second) {
   Object* tmp = array->get(first);
-  NoIncrementalWriteBarrierSet(array, first, array->get(second));
-  NoIncrementalWriteBarrierSet(array, second, tmp);
+  fast_set(array, first, array->get(second));
+  fast_set(array, second, tmp);
 }
 
 
@@ -1953,38 +1992,19 @@
 AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
   ASSERT(GetType(descriptor_number) == CALLBACKS);
   Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
-  return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
+  return reinterpret_cast<AccessorDescriptor*>(p->address());
 }
 
 
 bool DescriptorArray::IsProperty(int descriptor_number) {
-  Entry entry(this, descriptor_number);
-  return IsPropertyDescriptor(&entry);
+  return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
 }
 
 
-bool DescriptorArray::IsTransitionOnly(int descriptor_number) {
-  switch (GetType(descriptor_number)) {
-    case MAP_TRANSITION:
-    case CONSTANT_TRANSITION:
-    case ELEMENTS_TRANSITION:
-      return true;
-    case CALLBACKS: {
-      Object* value = GetValue(descriptor_number);
-      if (!value->IsAccessorPair()) return false;
-      AccessorPair* accessors = AccessorPair::cast(value);
-      return accessors->getter()->IsMap() && accessors->setter()->IsMap();
-    }
-    case NORMAL:
-    case FIELD:
-    case CONSTANT_FUNCTION:
-    case HANDLER:
-    case INTERCEPTOR:
-    case NULL_DESCRIPTOR:
-      return false;
-  }
-  UNREACHABLE();  // Keep the compiler happy.
-  return false;
+bool DescriptorArray::IsTransition(int descriptor_number) {
+  PropertyType t = GetType(descriptor_number);
+  return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
+      t == ELEMENTS_TRANSITION;
 }
 
 
@@ -2005,50 +2025,34 @@
 }
 
 
-void DescriptorArray::Set(int descriptor_number,
-                          Descriptor* desc,
-                          const WhitenessWitness&) {
+void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
   // Range check.
   ASSERT(descriptor_number < number_of_descriptors());
 
-  NoIncrementalWriteBarrierSet(this,
-                               ToKeyIndex(descriptor_number),
-                               desc->GetKey());
+  // Make sure none of the elements in desc are in new space.
+  ASSERT(!HEAP->InNewSpace(desc->GetKey()));
+  ASSERT(!HEAP->InNewSpace(desc->GetValue()));
+
+  fast_set(this, ToKeyIndex(descriptor_number), desc->GetKey());
   FixedArray* content_array = GetContentArray();
-  NoIncrementalWriteBarrierSet(content_array,
-                               ToValueIndex(descriptor_number),
-                               desc->GetValue());
-  NoIncrementalWriteBarrierSet(content_array,
-                               ToDetailsIndex(descriptor_number),
-                               desc->GetDetails().AsSmi());
+  fast_set(content_array, ToValueIndex(descriptor_number), desc->GetValue());
+  fast_set(content_array, ToDetailsIndex(descriptor_number),
+           desc->GetDetails().AsSmi());
 }
 
 
-void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
-    int first, int second) {
-  NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
+void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
+  Descriptor desc;
+  src->Get(src_index, &desc);
+  Set(index, &desc);
+}
+
+
+void DescriptorArray::Swap(int first, int second) {
+  fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
   FixedArray* content_array = GetContentArray();
-  NoIncrementalWriteBarrierSwap(content_array,
-                                ToValueIndex(first),
-                                ToValueIndex(second));
-  NoIncrementalWriteBarrierSwap(content_array,
-                                ToDetailsIndex(first),
-                                ToDetailsIndex(second));
-}
-
-
-DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
-    : marking_(array->GetHeap()->incremental_marking()) {
-  marking_->EnterNoMarkingScope();
-  if (array->number_of_descriptors() > 0) {
-    ASSERT(Marking::Color(array) == Marking::WHITE_OBJECT);
-    ASSERT(Marking::Color(array->GetContentArray()) == Marking::WHITE_OBJECT);
-  }
-}
-
-
-DescriptorArray::WhitenessWitness::~WhitenessWitness() {
-  marking_->LeaveNoMarkingScope();
+  fast_swap(content_array, ToValueIndex(first), ToValueIndex(second));
+  fast_swap(content_array, ToDetailsIndex(first),  ToDetailsIndex(second));
 }
 
 
@@ -2080,7 +2084,7 @@
     Object* element = KeyAt(entry);
     // Empty entry.
     if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
-    if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
+    if (element != isolate->heap()->raw_unchecked_null_value() &&
         Shape::IsMatch(key, element)) return entry;
     entry = NextProbe(entry, count++, capacity);
   }
@@ -2117,11 +2121,9 @@
 CAST_ACCESSOR(DescriptorArray)
 CAST_ACCESSOR(DeoptimizationInputData)
 CAST_ACCESSOR(DeoptimizationOutputData)
-CAST_ACCESSOR(TypeFeedbackCells)
 CAST_ACCESSOR(SymbolTable)
 CAST_ACCESSOR(JSFunctionResultCache)
 CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(ScopeInfo)
 CAST_ACCESSOR(CompilationCacheTable)
 CAST_ACCESSOR(CodeCacheHashTable)
 CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
@@ -2154,12 +2156,9 @@
 CAST_ACCESSOR(JSRegExp)
 CAST_ACCESSOR(JSProxy)
 CAST_ACCESSOR(JSFunctionProxy)
-CAST_ACCESSOR(JSSet)
-CAST_ACCESSOR(JSMap)
 CAST_ACCESSOR(JSWeakMap)
 CAST_ACCESSOR(Foreign)
 CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(FreeSpace)
 CAST_ACCESSOR(ExternalArray)
 CAST_ACCESSOR(ExternalByteArray)
 CAST_ACCESSOR(ExternalUnsignedByteArray)
@@ -2186,7 +2185,6 @@
 
 
 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
 
 SMI_ACCESSORS(String, length, kLengthOffset)
 
@@ -2343,7 +2341,7 @@
 
 
 void SlicedString::set_parent(String* parent) {
-  ASSERT(parent->IsSeqString() || parent->IsExternalString());
+  ASSERT(parent->IsSeqString());
   WRITE_FIELD(this, kParentOffset, parent);
 }
 
@@ -2363,7 +2361,7 @@
 
 void ConsString::set_first(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kFirstOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
 }
 
 
@@ -2379,83 +2377,29 @@
 
 void ConsString::set_second(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kSecondOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
 }
 
 
-bool ExternalString::is_short() {
-  InstanceType type = map()->instance_type();
-  return (type & kShortExternalStringMask) == kShortExternalStringTag;
-}
-
-
-const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+ExternalAsciiString::Resource* ExternalAsciiString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
-void ExternalAsciiString::update_data_cache() {
-  if (is_short()) return;
-  const char** data_field =
-      reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
-  *data_field = resource()->data();
-}
-
-
 void ExternalAsciiString::set_resource(
-    const ExternalAsciiString::Resource* resource) {
-  *reinterpret_cast<const Resource**>(
-      FIELD_ADDR(this, kResourceOffset)) = resource;
-  if (resource != NULL) update_data_cache();
+    ExternalAsciiString::Resource* resource) {
+  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
 }
 
 
-const char* ExternalAsciiString::GetChars() {
-  return resource()->data();
-}
-
-
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return GetChars()[index];
-}
-
-
-const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
-void ExternalTwoByteString::update_data_cache() {
-  if (is_short()) return;
-  const uint16_t** data_field =
-      reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
-  *data_field = resource()->data();
-}
-
-
 void ExternalTwoByteString::set_resource(
-    const ExternalTwoByteString::Resource* resource) {
-  *reinterpret_cast<const Resource**>(
-      FIELD_ADDR(this, kResourceOffset)) = resource;
-  if (resource != NULL) update_data_cache();
-}
-
-
-const uint16_t* ExternalTwoByteString::GetChars() {
-  return resource()->data();
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return GetChars()[index];
-}
-
-
-const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
-      unsigned start) {
-  return GetChars() + start;
+    ExternalTwoByteString::Resource* resource) {
+  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
 }
 
 
@@ -2755,9 +2699,6 @@
   if (instance_type == BYTE_ARRAY_TYPE) {
     return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
   }
-  if (instance_type == FREE_SPACE_TYPE) {
-    return reinterpret_cast<FreeSpace*>(this)->size();
-  }
   if (instance_type == STRING_TYPE) {
     return SeqTwoByteString::SizeFor(
         reinterpret_cast<SeqTwoByteString*>(this)->length());
@@ -2919,6 +2860,12 @@
 }
 
 
+FixedArray* Map::unchecked_prototype_transitions() {
+  return reinterpret_cast<FixedArray*>(
+      READ_FIELD(this, kPrototypeTransitionsOffset));
+}
+
+
 Code::Flags Code::flags() {
   return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
 }
@@ -2990,19 +2937,6 @@
 }
 
 
-bool Code::is_pregenerated() {
-  return kind() == STUB && IsPregeneratedField::decode(flags());
-}
-
-
-void Code::set_is_pregenerated(bool value) {
-  ASSERT(kind() == STUB);
-  Flags f = flags();
-  f = static_cast<Flags>(IsPregeneratedField::update(f, value));
-  set_flags(f);
-}
-
-
 bool Code::optimizable() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
@@ -3045,36 +2979,6 @@
 }
 
 
-bool Code::is_compiled_optimizable() {
-  ASSERT(kind() == FUNCTION);
-  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
-  return FullCodeFlagsIsCompiledOptimizable::decode(flags);
-}
-
-
-void Code::set_compiled_optimizable(bool value) {
-  ASSERT(kind() == FUNCTION);
-  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
-  flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
-  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
-}
-
-
-bool Code::has_self_optimization_header() {
-  ASSERT(kind() == FUNCTION);
-  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
-  return FullCodeFlagsHasSelfOptimizationHeader::decode(flags);
-}
-
-
-void Code::set_self_optimization_header(bool value) {
-  ASSERT(kind() == FUNCTION);
-  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
-  flags = FullCodeFlagsHasSelfOptimizationHeader::update(flags, value);
-  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
-}
-
-
 int Code::allow_osr_at_loop_nesting_level() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
@@ -3198,19 +3102,6 @@
   WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
 }
 
-
-bool Code::has_function_cache() {
-  ASSERT(kind() == STUB);
-  return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
-}
-
-
-void Code::set_has_function_cache(bool flag) {
-  ASSERT(kind() == STUB);
-  WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
-}
-
-
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -3296,6 +3187,48 @@
 }
 
 
+Isolate* Map::isolate() {
+  return heap()->isolate();
+}
+
+
+Heap* Map::heap() {
+  // NOTE: address() helper is not used to save one instruction.
+  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
+}
+
+
+Heap* Code::heap() {
+  // NOTE: address() helper is not used to save one instruction.
+  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
+}
+
+
+Isolate* Code::isolate() {
+  return heap()->isolate();
+}
+
+
+Heap* JSGlobalPropertyCell::heap() {
+  // NOTE: address() helper is not used to save one instruction.
+  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
+}
+
+
+Isolate* JSGlobalPropertyCell::isolate() {
+  return heap()->isolate();
+}
+
+
 Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
   return HeapObject::
       FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3310,14 +3243,53 @@
 void Map::set_prototype(Object* value, WriteBarrierMode mode) {
   ASSERT(value->IsNull() || value->IsJSReceiver());
   WRITE_FIELD(this, kPrototypeOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
+}
+
+
+MaybeObject* Map::GetFastElementsMap() {
+  if (has_fast_elements()) return this;
+  Object* obj;
+  { MaybeObject* maybe_obj = CopyDropTransitions();
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  Map* new_map = Map::cast(obj);
+  new_map->set_elements_kind(FAST_ELEMENTS);
+  isolate()->counters()->map_to_fast_elements()->Increment();
+  return new_map;
+}
+
+
+MaybeObject* Map::GetFastDoubleElementsMap() {
+  if (has_fast_double_elements()) return this;
+  Object* obj;
+  { MaybeObject* maybe_obj = CopyDropTransitions();
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  Map* new_map = Map::cast(obj);
+  new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
+  isolate()->counters()->map_to_fast_double_elements()->Increment();
+  return new_map;
+}
+
+
+MaybeObject* Map::GetSlowElementsMap() {
+  if (!has_fast_elements() && !has_fast_double_elements()) return this;
+  Object* obj;
+  { MaybeObject* maybe_obj = CopyDropTransitions();
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  Map* new_map = Map::cast(obj);
+  new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+  isolate()->counters()->map_to_slow_elements()->Increment();
+  return new_map;
 }
 
 
 DescriptorArray* Map::instance_descriptors() {
   Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
   if (object->IsSmi()) {
-    return GetHeap()->empty_descriptor_array();
+    return HEAP->empty_descriptor_array();
   } else {
     return DescriptorArray::cast(object);
   }
@@ -3345,8 +3317,7 @@
                                    WriteBarrierMode mode) {
   Object* object = READ_FIELD(this,
                               kInstanceDescriptorsOrBitField3Offset);
-  Heap* heap = GetHeap();
-  if (value == heap->empty_descriptor_array()) {
+  if (value == isolate()->heap()->empty_descriptor_array()) {
     clear_instance_descriptors();
     return;
   } else {
@@ -3359,8 +3330,10 @@
   }
   ASSERT(!is_shared());
   WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
-  CONDITIONAL_WRITE_BARRIER(
-      heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(),
+                            this,
+                            kInstanceDescriptorsOrBitField3Offset,
+                            mode);
 }
 
 
@@ -3389,22 +3362,14 @@
 }
 
 
-FixedArray* Map::unchecked_prototype_transitions() {
-  return reinterpret_cast<FixedArray*>(
-      READ_FIELD(this, kPrototypeTransitionsOffset));
-}
-
-
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
 ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
-ACCESSORS(JSFunction,
-          next_function_link,
-          Object,
-          kNextFunctionLinkOffset)
+ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
+ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
+                 kNextFunctionLinkOffset)
 
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3416,10 +3381,7 @@
 ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
 ACCESSORS(AccessorInfo, data, Object, kDataOffset)
 ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
-
-ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
-ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
+ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
 
 ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
 ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -3457,7 +3419,7 @@
           kInstanceCallHandlerOffset)
 ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
           kAccessCheckInfoOffset)
-ACCESSORS_TO_SMI(FunctionTemplateInfo, flag, kFlagOffset)
+ACCESSORS(FunctionTemplateInfo, flag, Smi, kFlagOffset)
 
 ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
 ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -3471,18 +3433,17 @@
 ACCESSORS(Script, source, Object, kSourceOffset)
 ACCESSORS(Script, name, Object, kNameOffset)
 ACCESSORS(Script, id, Object, kIdOffset)
-ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
-ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
+ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
+ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
 ACCESSORS(Script, data, Object, kDataOffset)
 ACCESSORS(Script, context_data, Object, kContextOffset)
 ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
-ACCESSORS_TO_SMI(Script, type, kTypeOffset)
-ACCESSORS_TO_SMI(Script, compilation_type, kCompilationTypeOffset)
-ACCESSORS_TO_SMI(Script, compilation_state, kCompilationStateOffset)
+ACCESSORS(Script, type, Smi, kTypeOffset)
+ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset)
 ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
 ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
-                 kEvalFrominstructionsOffsetOffset)
+ACCESSORS(Script, eval_from_instructions_offset, Smi,
+          kEvalFrominstructionsOffsetOffset)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
@@ -3490,15 +3451,15 @@
 ACCESSORS(DebugInfo, code, Code, kPatchedCodeIndex)
 ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
 
-ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
-ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex)
-ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex)
+ACCESSORS(BreakPointInfo, code_position, Smi, kCodePositionIndex)
+ACCESSORS(BreakPointInfo, source_position, Smi, kSourcePositionIndex)
+ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
 ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
 #endif
 
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -3508,8 +3469,6 @@
 ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
           kThisPropertyAssignmentsOffset)
 
-SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
-
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
                kHiddenPrototypeBit)
 BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
@@ -3556,8 +3515,6 @@
 SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
               kThisPropertyAssignmentsCountOffset)
 SMI_ACCESSORS(SharedFunctionInfo, opt_count, kOptCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-SMI_ACCESSORS(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
 #else
 
 #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset)             \
@@ -3608,9 +3565,6 @@
                         this_property_assignments_count,
                         kThisPropertyAssignmentsCountOffset)
 PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, opt_count, kOptCountOffset)
-
-PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, deopt_counter, kDeoptCounterOffset)
 #endif
 
 
@@ -3632,7 +3586,7 @@
 
 
 bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
-  return initial_map() != GetHeap()->undefined_value();
+  return initial_map() != HEAP->undefined_value();
 }
 
 
@@ -3654,49 +3608,14 @@
 }
 
 
-LanguageMode SharedFunctionInfo::language_mode() {
-  int hints = compiler_hints();
-  if (BooleanBit::get(hints, kExtendedModeFunction)) {
-    ASSERT(BooleanBit::get(hints, kStrictModeFunction));
-    return EXTENDED_MODE;
-  }
-  return BooleanBit::get(hints, kStrictModeFunction)
-      ? STRICT_MODE : CLASSIC_MODE;
-}
-
-
-void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
-  // We only allow language mode transitions that go set the same language mode
-  // again or go up in the chain:
-  //   CLASSIC_MODE -> STRICT_MODE -> EXTENDED_MODE.
-  ASSERT(this->language_mode() == CLASSIC_MODE ||
-         this->language_mode() == language_mode ||
-         language_mode == EXTENDED_MODE);
-  int hints = compiler_hints();
-  hints = BooleanBit::set(
-      hints, kStrictModeFunction, language_mode != CLASSIC_MODE);
-  hints = BooleanBit::set(
-      hints, kExtendedModeFunction, language_mode == EXTENDED_MODE);
-  set_compiler_hints(hints);
-}
-
-
-bool SharedFunctionInfo::is_classic_mode() {
-  return !BooleanBit::get(compiler_hints(), kStrictModeFunction);
-}
-
-BOOL_GETTER(SharedFunctionInfo, compiler_hints, is_extended_mode,
-            kExtendedModeFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, strict_mode,
+               kStrictModeFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
                name_should_print_as_anonymous,
                kNameShouldPrintAsAnonymous)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
-               kDontOptimize)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
 
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -3746,23 +3665,30 @@
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kCodeOffset, value);
-  CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
+  ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
 }
 
 
-ScopeInfo* SharedFunctionInfo::scope_info() {
-  return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
+SerializedScopeInfo* SharedFunctionInfo::scope_info() {
+  return reinterpret_cast<SerializedScopeInfo*>(
+      READ_FIELD(this, kScopeInfoOffset));
 }
 
 
-void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
+void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
                                         WriteBarrierMode mode) {
   WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
-  CONDITIONAL_WRITE_BARRIER(GetHeap(),
-                            this,
-                            kScopeInfoOffset,
-                            reinterpret_cast<Object*>(value),
-                            mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
+}
+
+
+Smi* SharedFunctionInfo::deopt_counter() {
+  return reinterpret_cast<Smi*>(READ_FIELD(this, kDeoptCounterOffset));
+}
+
+
+void SharedFunctionInfo::set_deopt_counter(Smi* value) {
+  WRITE_FIELD(this, kDeoptCounterOffset, value);
 }
 
 
@@ -3800,8 +3726,8 @@
 
 
 void SharedFunctionInfo::set_code_age(int code_age) {
-  int hints = compiler_hints() & ~(kCodeAgeMask << kCodeAgeShift);
-  set_compiler_hints(hints | ((code_age & kCodeAgeMask) << kCodeAgeShift));
+  set_compiler_hints(compiler_hints() |
+                     ((code_age & kCodeAgeMask) << kCodeAgeShift));
 }
 
 
@@ -3849,13 +3775,10 @@
 
 
 void JSFunction::set_code(Code* value) {
+  // Skip the write barrier because code is never in new space.
   ASSERT(!HEAP->InNewSpace(value));
   Address entry = value->entry();
   WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
-  GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
-      this,
-      HeapObject::RawField(this, kCodeEntryOffset),
-      value);
 }
 
 
@@ -3895,7 +3818,7 @@
 void JSFunction::set_context(Object* value) {
   ASSERT(value->IsUndefined() || value->IsContext());
   WRITE_FIELD(this, kContextOffset, value);
-  WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
+  WRITE_BARRIER(this, kContextOffset);
 }
 
 ACCESSORS(JSFunction, prototype_or_initial_map, Object,
@@ -3912,40 +3835,6 @@
 }
 
 
-MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
-    Map* initial_map) {
-  Context* global_context = context()->global_context();
-  Object* array_function =
-      global_context->get(Context::ARRAY_FUNCTION_INDEX);
-  if (array_function->IsJSFunction() &&
-      this == JSFunction::cast(array_function)) {
-    ASSERT(initial_map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
-
-    MaybeObject* maybe_map = initial_map->CopyDropTransitions();
-    Map* new_double_map = NULL;
-    if (!maybe_map->To<Map>(&new_double_map)) return maybe_map;
-    new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
-    maybe_map = initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS,
-                                                   new_double_map);
-    if (maybe_map->IsFailure()) return maybe_map;
-
-    maybe_map = new_double_map->CopyDropTransitions();
-    Map* new_object_map = NULL;
-    if (!maybe_map->To<Map>(&new_object_map)) return maybe_map;
-    new_object_map->set_elements_kind(FAST_ELEMENTS);
-    maybe_map = new_double_map->AddElementsTransition(FAST_ELEMENTS,
-                                                      new_object_map);
-    if (maybe_map->IsFailure()) return maybe_map;
-
-    global_context->set_smi_js_array_map(initial_map);
-    global_context->set_double_js_array_map(new_double_map);
-    global_context->set_object_js_array_map(new_object_map);
-  }
-  set_initial_map(initial_map);
-  return this;
-}
-
-
 bool JSFunction::has_initial_map() {
   return prototype_or_initial_map()->IsMap();
 }
@@ -3988,36 +3877,7 @@
 }
 
 
-FixedArray* JSFunction::literals() {
-  ASSERT(!shared()->bound());
-  return literals_or_bindings();
-}
-
-
-void JSFunction::set_literals(FixedArray* literals) {
-  ASSERT(!shared()->bound());
-  set_literals_or_bindings(literals);
-}
-
-
-FixedArray* JSFunction::function_bindings() {
-  ASSERT(shared()->bound());
-  return literals_or_bindings();
-}
-
-
-void JSFunction::set_function_bindings(FixedArray* bindings) {
-  ASSERT(shared()->bound());
-  // Bound function literal may be initialized to the empty fixed array
-  // before the bindings are set.
-  ASSERT(bindings == GetHeap()->empty_fixed_array() ||
-         bindings->map() == GetHeap()->fixed_cow_array_map());
-  set_literals_or_bindings(bindings);
-}
-
-
 int JSFunction::NumberOfLiterals() {
-  ASSERT(!shared()->bound());
   return literals()->length();
 }
 
@@ -4032,7 +3892,7 @@
                                               Object* value) {
   ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
-  WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
+  WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
 }
 
 
@@ -4051,7 +3911,6 @@
 
 
 ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
-ACCESSORS(JSProxy, hash, Object, kHashOffset)
 ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
 ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
 
@@ -4064,19 +3923,22 @@
 }
 
 
-ACCESSORS(JSSet, table, Object, kTableOffset)
-ACCESSORS(JSMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, table, Object, kTableOffset)
-ACCESSORS(JSWeakMap, next, Object, kNextOffset)
+ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset)
+ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset)
 
 
-Address Foreign::foreign_address() {
-  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kForeignAddressOffset));
+ObjectHashTable* JSWeakMap::unchecked_table() {
+  return reinterpret_cast<ObjectHashTable*>(READ_FIELD(this, kTableOffset));
 }
 
 
-void Foreign::set_foreign_address(Address value) {
-  WRITE_INTPTR_FIELD(this, kForeignAddressOffset, OffsetFrom(value));
+Address Foreign::address() {
+  return AddressFrom<Address>(READ_INTPTR_FIELD(this, kAddressOffset));
+}
+
+
+void Foreign::set_address(Address value) {
+  WRITE_INTPTR_FIELD(this, kAddressOffset, OffsetFrom(value));
 }
 
 
@@ -4090,24 +3952,6 @@
 }
 
 
-ACCESSORS(JSDate, value, Object, kValueOffset)
-ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
-ACCESSORS(JSDate, year, Object, kYearOffset)
-ACCESSORS(JSDate, month, Object, kMonthOffset)
-ACCESSORS(JSDate, day, Object, kDayOffset)
-ACCESSORS(JSDate, weekday, Object, kWeekdayOffset)
-ACCESSORS(JSDate, hour, Object, kHourOffset)
-ACCESSORS(JSDate, min, Object, kMinOffset)
-ACCESSORS(JSDate, sec, Object, kSecOffset)
-
-
-JSDate* JSDate::cast(Object* obj) {
-  ASSERT(obj->IsJSDate());
-  ASSERT(HeapObject::cast(obj)->Size() == JSDate::kSize);
-  return reinterpret_cast<JSDate*>(obj);
-}
-
-
 ACCESSORS(JSMessageObject, type, String, kTypeOffset)
 ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
 ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
@@ -4126,11 +3970,10 @@
 
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
-ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
 ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
-ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
-INT_ACCESSORS(Code, ic_age, kICAgeOffset)
+ACCESSORS(Code, next_code_flushing_candidate,
+          Object, kNextCodeFlushingCandidateOffset)
+
 
 byte* Code::instruction_start()  {
   return FIELD_ADDR(this, kHeaderSize);
@@ -4173,8 +4016,9 @@
 }
 
 
-bool Code::contains(byte* inner_pointer) {
-  return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
+bool Code::contains(byte* pc) {
+  return (instruction_start() <= pc) &&
+      (pc <= instruction_start() + instruction_size());
 }
 
 
@@ -4253,7 +4097,6 @@
   if (value->IsSmi()) {
     fa->set_unchecked(index, Smi::cast(value));
   } else {
-    // We only do this during GC, so we don't need to notify the write barrier.
     fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
   }
 }
@@ -4261,23 +4104,15 @@
 
 ElementsKind JSObject::GetElementsKind() {
   ElementsKind kind = map()->elements_kind();
-#if DEBUG
-  FixedArrayBase* fixed_array =
-      reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
-  Map* map = fixed_array->map();
-    ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
-            (map == GetHeap()->fixed_array_map() ||
-             map == GetHeap()->fixed_cow_array_map())) ||
-           (kind == FAST_DOUBLE_ELEMENTS &&
-            (fixed_array->IsFixedDoubleArray() ||
-            fixed_array == GetHeap()->empty_fixed_array())) ||
-           (kind == DICTIONARY_ELEMENTS &&
-            fixed_array->IsFixedArray() &&
-            fixed_array->IsDictionary()) ||
-           (kind > DICTIONARY_ELEMENTS));
-    ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) ||
-           (elements()->IsFixedArray() && elements()->length() >= 2));
-#endif
+  ASSERT((kind == FAST_ELEMENTS &&
+          (elements()->map() == GetHeap()->fixed_array_map() ||
+           elements()->map() == GetHeap()->fixed_cow_array_map())) ||
+         (kind == FAST_DOUBLE_ELEMENTS &&
+          elements()->IsFixedDoubleArray()) ||
+         (kind == DICTIONARY_ELEMENTS &&
+          elements()->IsFixedArray() &&
+          elements()->IsDictionary()) ||
+         (kind > DICTIONARY_ELEMENTS));
   return kind;
 }
 
@@ -4292,18 +4127,6 @@
 }
 
 
-bool JSObject::HasFastSmiOnlyElements() {
-  return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
-}
-
-
-bool JSObject::HasFastTypeElements() {
-  ElementsKind elements_kind = GetElementsKind();
-  return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-      elements_kind == FAST_ELEMENTS;
-}
-
-
 bool JSObject::HasFastDoubleElements() {
   return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
 }
@@ -4314,11 +4137,6 @@
 }
 
 
-bool JSObject::HasNonStrictArgumentsElements() {
-  return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
-}
-
-
 bool JSObject::HasExternalArrayElements() {
   HeapObject* array = elements();
   ASSERT(array != NULL);
@@ -4361,8 +4179,16 @@
 }
 
 
+bool JSObject::AllowsSetElementsLength() {
+  bool result = elements()->IsFixedArray() ||
+      elements()->IsFixedDoubleArray();
+  ASSERT(result == !HasExternalArrayElements());
+  return result;
+}
+
+
 MaybeObject* JSObject::EnsureWritableFastElements() {
-  ASSERT(HasFastTypeElements());
+  ASSERT(HasFastElements());
   FixedArray* elems = FixedArray::cast(elements());
   Isolate* isolate = GetIsolate();
   if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4426,11 +4252,7 @@
 }
 
 
-void StringHasher::AddCharacter(uint32_t c) {
-  if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-    AddSurrogatePair(c);  // Not inlined.
-    return;
-  }
+void StringHasher::AddCharacter(uc32 c) {
   // Use the Jenkins one-at-a-time hash function to update the hash
   // for the given character.
   raw_running_hash_ += c;
@@ -4459,12 +4281,8 @@
 }
 
 
-void StringHasher::AddCharacterNoIndex(uint32_t c) {
+void StringHasher::AddCharacterNoIndex(uc32 c) {
   ASSERT(!is_array_index());
-  if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-    AddSurrogatePairNoIndex(c);  // Not inlined.
-    return;
-  }
   raw_running_hash_ += c;
   raw_running_hash_ += (raw_running_hash_ << 10);
   raw_running_hash_ ^= (raw_running_hash_ >> 6);
@@ -4548,18 +4366,44 @@
 }
 
 
-MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
-  return IsJSProxy()
-      ? JSProxy::cast(this)->GetIdentityHash(flag)
-      : JSObject::cast(this)->GetIdentityHash(flag);
+bool JSObject::HasHiddenPropertiesObject() {
+  ASSERT(!IsJSGlobalProxy());
+  return GetPropertyAttributePostInterceptor(this,
+                                             GetHeap()->hidden_symbol(),
+                                             false) != ABSENT;
 }
 
 
-bool JSReceiver::HasElement(uint32_t index) {
-  if (IsJSProxy()) {
-    return JSProxy::cast(this)->HasElementWithHandler(index);
-  }
-  return JSObject::cast(this)->HasElementWithReceiver(this, index);
+Object* JSObject::GetHiddenPropertiesObject() {
+  ASSERT(!IsJSGlobalProxy());
+  PropertyAttributes attributes;
+  // You can't install a getter on a property indexed by the hidden symbol,
+  // so we can be sure that GetLocalPropertyPostInterceptor returns a real
+  // object.
+  Object* result =
+      GetLocalPropertyPostInterceptor(this,
+                                      GetHeap()->hidden_symbol(),
+                                      &attributes)->ToObjectUnchecked();
+  return result;
+}
+
+
+MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
+  ASSERT(!IsJSGlobalProxy());
+  return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+                                    hidden_obj,
+                                    DONT_ENUM,
+                                    kNonStrictMode);
+}
+
+
+bool JSObject::HasHiddenProperties() {
+  return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
+}
+
+
+bool JSObject::HasElement(uint32_t index) {
+  return HasElementWithReceiver(this, index);
 }
 
 
@@ -4622,7 +4466,7 @@
   WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
   FixedArray::set(index, key, mode);
   FixedArray::set(index+1, value, mode);
-  FixedArray::set(index+2, details.AsSmi());
+  FixedArray::fast_set(this, index+2, details.AsSmi());
 }
 
 
@@ -4682,45 +4526,47 @@
 }
 
 
-template <int entrysize>
-bool ObjectHashTableShape<entrysize>::IsMatch(Object* key, Object* other) {
-  return key->SameValue(other);
+bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
+  return key == JSObject::cast(other);
 }
 
 
-template <int entrysize>
-uint32_t ObjectHashTableShape<entrysize>::Hash(Object* key) {
-  MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
-  return Smi::cast(maybe_hash->ToObjectChecked())->value();
+uint32_t ObjectHashTableShape::Hash(JSObject* key) {
+  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+  ASSERT(!maybe_hash->IsFailure());
+  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
 }
 
 
-template <int entrysize>
-uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key,
-                                                        Object* other) {
-  MaybeObject* maybe_hash = other->GetHash(OMIT_CREATION);
-  return Smi::cast(maybe_hash->ToObjectChecked())->value();
+uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
+  MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
+      JSObject::OMIT_CREATION);
+  ASSERT(!maybe_hash->IsFailure());
+  return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
 }
 
 
-template <int entrysize>
-MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Object* key) {
+MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
   return key;
 }
 
 
+void ObjectHashTable::RemoveEntry(int entry) {
+  RemoveEntry(entry, GetHeap());
+}
+
+
 void Map::ClearCodeCache(Heap* heap) {
   // No write barrier is needed since empty_fixed_array is not in new space.
   // Please note this function is used during marking:
   //  - MarkCompactCollector::MarkUnmarkedObject
-  //  - IncrementalMarking::Step
   ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
   WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
 }
 
 
 void JSArray::EnsureSize(int required_size) {
-  ASSERT(HasFastTypeElements());
+  ASSERT(HasFastElements());
   FixedArray* elts = FixedArray::cast(elements());
   const int kArraySizeThatFitsComfortablyInNewSpace = 128;
   if (elts->length() < required_size) {
@@ -4738,31 +4584,13 @@
 
 
 void JSArray::set_length(Smi* length) {
-  // Don't need a write barrier for a Smi.
   set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
 }
 
 
-bool JSArray::AllowsSetElementsLength() {
-  bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
-  ASSERT(result == !HasExternalArrayElements());
-  return result;
-}
-
-
-MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
-  MaybeObject* maybe_result = EnsureCanContainElements(
-      storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
-  if (maybe_result->IsFailure()) return maybe_result;
-  ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
-          GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
-         ((storage->map() != GetHeap()->fixed_double_array_map()) &&
-          ((GetElementsKind() == FAST_ELEMENTS) ||
-           (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
-            FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
-  set_elements(storage);
+void JSArray::SetContent(FixedArray* storage) {
   set_length(Smi::FromInt(storage->length()));
-  return this;
+  set_elements(storage);
 }
 
 
@@ -4772,57 +4600,6 @@
 }
 
 
-MaybeObject* FixedDoubleArray::Copy() {
-  if (length() == 0) return this;
-  return GetHeap()->CopyFixedDoubleArray(this);
-}
-
-
-void TypeFeedbackCells::SetAstId(int index, Smi* id) {
-  set(1 + index * 2, id);
-}
-
-
-Smi* TypeFeedbackCells::AstId(int index) {
-  return Smi::cast(get(1 + index * 2));
-}
-
-
-void TypeFeedbackCells::SetCell(int index, JSGlobalPropertyCell* cell) {
-  set(index * 2, cell);
-}
-
-
-JSGlobalPropertyCell* TypeFeedbackCells::Cell(int index) {
-  return JSGlobalPropertyCell::cast(get(index * 2));
-}
-
-
-Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
-  return isolate->factory()->the_hole_value();
-}
-
-
-Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
-  return isolate->factory()->undefined_value();
-}
-
-
-Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
-  return heap->raw_unchecked_the_hole_value();
-}
-
-
-SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
-SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count,
-              kIcWithTypeinfoCountOffset)
-ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
-          kTypeFeedbackCellsOffset)
-
-
-SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
-
-
 Relocatable::Relocatable(Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
   isolate_ = isolate;
@@ -4845,14 +4622,14 @@
 
 void Foreign::ForeignIterateBody(ObjectVisitor* v) {
   v->VisitExternalReference(
-      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
+      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
 }
 
 
 template<typename StaticVisitor>
 void Foreign::ForeignIterateBody() {
   StaticVisitor::VisitExternalReference(
-      reinterpret_cast<Address*>(FIELD_ADDR(this, kForeignAddressOffset)));
+      reinterpret_cast<Address *>(FIELD_ADDR(this, kAddressOffset)));
 }
 
 
@@ -4905,27 +4682,22 @@
 
 #undef SLOT_ADDR
 
-#undef TYPE_CHECKER
+
 #undef CAST_ACCESSOR
 #undef INT_ACCESSORS
-#undef ACCESSORS
-#undef ACCESSORS_TO_SMI
 #undef SMI_ACCESSORS
-#undef BOOL_GETTER
-#undef BOOL_ACCESSORS
+#undef ACCESSORS
 #undef FIELD_ADDR
 #undef READ_FIELD
 #undef WRITE_FIELD
 #undef WRITE_BARRIER
 #undef CONDITIONAL_WRITE_BARRIER
+#undef READ_MEMADDR_FIELD
+#undef WRITE_MEMADDR_FIELD
 #undef READ_DOUBLE_FIELD
 #undef WRITE_DOUBLE_FIELD
 #undef READ_INT_FIELD
 #undef WRITE_INT_FIELD
-#undef READ_INTPTR_FIELD
-#undef WRITE_INTPTR_FIELD
-#undef READ_UINT32_FIELD
-#undef WRITE_UINT32_FIELD
 #undef READ_SHORT_FIELD
 #undef WRITE_SHORT_FIELD
 #undef READ_BYTE_FIELD
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 38e6138..0398572 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -82,18 +82,12 @@
     case HEAP_NUMBER_TYPE:
       HeapNumber::cast(this)->HeapNumberPrint(out);
       break;
-    case FIXED_DOUBLE_ARRAY_TYPE:
-      FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
-      break;
     case FIXED_ARRAY_TYPE:
       FixedArray::cast(this)->FixedArrayPrint(out);
       break;
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayPrint(out);
       break;
-    case FREE_SPACE_TYPE:
-      FreeSpace::cast(this)->FreeSpacePrint(out);
-      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
       break;
@@ -151,9 +145,6 @@
       PrintF(out, "Value wrapper around:");
       JSValue::cast(this)->value()->Print(out);
       break;
-    case JS_DATE_TYPE:
-      JSDate::cast(this)->value()->Print(out);
-      break;
     case CODE_TYPE:
       Code::cast(this)->CodePrint(out);
       break;
@@ -198,11 +189,6 @@
 }
 
 
-void FreeSpace::FreeSpacePrint(FILE* out) {
-  PrintF(out, "free space, size %d", Size());
-}
-
-
 void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
   PrintF(out, "external pixel array");
 }
@@ -270,37 +256,16 @@
           descs->GetCallbacksObject(i)->ShortPrint(out);
           PrintF(out, " (callback)\n");
           break;
-        case ELEMENTS_TRANSITION: {
-          PrintF(out, "(elements transition to ");
-          Object* descriptor_contents = descs->GetValue(i);
-          if (descriptor_contents->IsMap()) {
-            Map* map = Map::cast(descriptor_contents);
-            PrintElementsKind(out, map->elements_kind());
-          } else {
-            FixedArray* map_array = FixedArray::cast(descriptor_contents);
-            for (int i = 0; i < map_array->length(); ++i) {
-              Map* map = Map::cast(map_array->get(i));
-              if (i != 0) {
-                PrintF(out, ", ");
-              }
-              PrintElementsKind(out, map->elements_kind());
-            }
-          }
-          PrintF(out, ")\n");
-          break;
-        }
         case MAP_TRANSITION:
-          PrintF(out, "(map transition)\n");
+          PrintF(out, " (map transition)\n");
           break;
         case CONSTANT_TRANSITION:
-          PrintF(out, "(constant transition)\n");
+          PrintF(out, " (constant transition)\n");
           break;
         case NULL_DESCRIPTOR:
-          PrintF(out, "(null descriptor)\n");
+          PrintF(out, " (null descriptor)\n");
           break;
-        case NORMAL:  // only in slow mode
-        case HANDLER:  // only in lookup results, not in descriptors
-        case INTERCEPTOR:  // only in lookup results, not in descriptors
+        default:
           UNREACHABLE();
           break;
       }
@@ -312,10 +277,7 @@
 
 
 void JSObject::PrintElements(FILE* out) {
-  // Don't call GetElementsKind, its validation code can cause the printer to
-  // fail when debugging.
-  switch (map()->elements_kind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
+  switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
       FixedArray* p = FixedArray::cast(elements());
@@ -423,13 +385,8 @@
 
 void JSObject::JSObjectPrint(FILE* out) {
   PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
-  PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
-  // Don't call GetElementsKind, its validation code can cause the printer to
-  // fail when debugging.
-  PrintElementsKind(out, this->map()->elements_kind());
-  PrintF(out,
-         "]\n - prototype = %p\n",
-         reinterpret_cast<void*>(GetPrototype()));
+  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
   PrintF(out, " {\n");
   PrintProperties(out);
   PrintElements(out);
@@ -449,9 +406,6 @@
     case EXTERNAL_ASCII_SYMBOL_TYPE:
     case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
     case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
-    case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
-    case SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
-    case SHORT_EXTERNAL_SYMBOL_TYPE: return "SHORT_EXTERNAL_SYMBOL";
     case ASCII_STRING_TYPE: return "ASCII_STRING";
     case STRING_TYPE: return "TWO_BYTE_STRING";
     case CONS_STRING_TYPE:
@@ -459,12 +413,8 @@
     case EXTERNAL_ASCII_STRING_TYPE:
     case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
     case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
-    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
-    case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
-    case SHORT_EXTERNAL_STRING_TYPE: return "SHORT_EXTERNAL_STRING";
     case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
     case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
-    case FREE_SPACE_TYPE: return "FREE_SPACE";
     case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
     case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -508,9 +458,7 @@
   PrintF(out, " - type: %s\n", TypeToString(instance_type()));
   PrintF(out, " - instance size: %d\n", instance_size());
   PrintF(out, " - inobject properties: %d\n", inobject_properties());
-  PrintF(out, " - elements kind: ");
-  PrintElementsKind(out, elements_kind());
-  PrintF(out, "\n - pre-allocated property fields: %d\n",
+  PrintF(out, " - pre-allocated property fields: %d\n",
       pre_allocated_property_fields());
   PrintF(out, " - unused property fields: %d\n", unused_property_fields());
   if (is_hidden_prototype()) {
@@ -557,21 +505,6 @@
 }
 
 
-void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "TypeFeedbackInfo");
-  PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d",
-         ic_total_count(), ic_with_typeinfo_count());
-  PrintF(out, "\n - type_feedback_cells: ");
-  type_feedback_cells()->FixedArrayPrint(out);
-}
-
-
-void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AliasedArgumentsEntry");
-  PrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
-}
-
-
 void FixedArray::FixedArrayPrint(FILE* out) {
   HeapObject::PrintHeader(out, "FixedArray");
   PrintF(out, " - length: %d", length());
@@ -583,20 +516,6 @@
 }
 
 
-void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "FixedDoubleArray");
-  PrintF(out, " - length: %d", length());
-  for (int i = 0; i < length(); i++) {
-    if (is_the_hole(i)) {
-      PrintF(out, "\n  [%d]: <the hole>", i);
-    } else {
-      PrintF(out, "\n  [%d]: %g", i, get_scalar(i));
-    }
-  }
-  PrintF(out, "\n");
-}
-
-
 void JSValue::JSValuePrint(FILE* out) {
   HeapObject::PrintHeader(out, "ValueObject");
   value()->Print(out);
@@ -649,7 +568,7 @@
 
 
 // This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-ASCII characters
+// Since the string can also be in two-byte encoding, non-ascii characters
 // will be ignored in the output.
 char* String::ToAsciiArray() {
   // Static so that subsequent calls frees previously allocated space.
@@ -663,37 +582,11 @@
 }
 
 
-static const char* const weekdays[] = {
-  "???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
-};
-
-void JSDate::JSDatePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSDate");
-  PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - value = ");
-  value()->Print(out);
-  if (!year()->IsSmi()) {
-    PrintF(out, " - time = NaN\n");
-  } else {
-    PrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
-           weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
-           year()->IsSmi() ? Smi::cast(year())->value() : -1,
-           month()->IsSmi() ? Smi::cast(month())->value() : -1,
-           day()->IsSmi() ? Smi::cast(day())->value() : -1,
-           hour()->IsSmi() ? Smi::cast(hour())->value() : -1,
-           min()->IsSmi() ? Smi::cast(min())->value() : -1,
-           sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
-  }
-}
-
-
 void JSProxy::JSProxyPrint(FILE* out) {
   HeapObject::PrintHeader(out, "JSProxy");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
   PrintF(out, " - handler = ");
   handler()->Print(out);
-  PrintF(out, " - hash = ");
-  hash()->Print(out);
   PrintF(out, "\n");
 }
 
@@ -714,6 +607,7 @@
 void JSWeakMap::JSWeakMapPrint(FILE* out) {
   HeapObject::PrintHeader(out, "JSWeakMap");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+  PrintF(out, " - number of elements = %d\n", table()->NumberOfElements());
   PrintF(out, " - table = ");
   table()->ShortPrint(out);
   PrintF(out, "\n");
@@ -813,7 +707,7 @@
 
 
 void Foreign::ForeignPrint(FILE* out) {
-  PrintF(out, "foreign address : %p", foreign_address());
+  PrintF(out, "foreign address : %p", address());
 }
 
 
@@ -832,15 +726,6 @@
 }
 
 
-void AccessorPair::AccessorPairPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AccessorPair");
-  PrintF(out, "\n - getter: ");
-  getter()->ShortPrint(out);
-  PrintF(out, "\n - setter: ");
-  setter()->ShortPrint(out);
-}
-
-
 void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
   HeapObject::PrintHeader(out, "AccessCheckInfo");
   PrintF(out, "\n - named_callback: ");
@@ -917,15 +802,10 @@
 
 void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
   HeapObject::PrintHeader(out, "ObjectTemplateInfo");
-  PrintF(out, " - tag: ");
-  tag()->ShortPrint(out);
-  PrintF(out, "\n - property_list: ");
-  property_list()->ShortPrint(out);
   PrintF(out, "\n - constructor: ");
   constructor()->ShortPrint(out);
   PrintF(out, "\n - internal_field_count: ");
   internal_field_count()->ShortPrint(out);
-  PrintF(out, "\n");
 }
 
 
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
deleted file mode 100644
index 627d1bc..0000000
--- a/src/objects-visiting-inl.h
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_OBJECTS_VISITING_INL_H_
-#define V8_OBJECTS_VISITING_INL_H_
-
-
-namespace v8 {
-namespace internal {
-
-template<typename StaticVisitor>
-void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
-  table_.Register(kVisitShortcutCandidate,
-                  &FixedBodyVisitor<StaticVisitor,
-                  ConsString::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitConsString,
-                  &FixedBodyVisitor<StaticVisitor,
-                  ConsString::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitSlicedString,
-                  &FixedBodyVisitor<StaticVisitor,
-                  SlicedString::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitFixedArray,
-                  &FlexibleBodyVisitor<StaticVisitor,
-                  FixedArray::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-
-  table_.Register(kVisitGlobalContext,
-                  &FixedBodyVisitor<StaticVisitor,
-                  Context::ScavengeBodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitByteArray, &VisitByteArray);
-
-  table_.Register(kVisitSharedFunctionInfo,
-                  &FixedBodyVisitor<StaticVisitor,
-                  SharedFunctionInfo::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
-
-  table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
-  table_.Register(kVisitJSFunction,
-                  &JSObjectVisitor::
-                      template VisitSpecialized<JSFunction::kSize>);
-
-  table_.Register(kVisitFreeSpace, &VisitFreeSpace);
-
-  table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
-
-  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
-  table_.template RegisterSpecializations<DataObjectVisitor,
-                                          kVisitDataObject,
-                                          kVisitDataObjectGeneric>();
-
-  table_.template RegisterSpecializations<JSObjectVisitor,
-                                          kVisitJSObject,
-                                          kVisitJSObjectGeneric>();
-  table_.template RegisterSpecializations<StructVisitor,
-                                          kVisitStruct,
-                                          kVisitStructGeneric>();
-}
-
-
-void Code::CodeIterateBody(ObjectVisitor* v) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // There are two places where we iterate code bodies: here and the
-  // templated CodeIterateBody (below).  They should be kept in sync.
-  IteratePointer(v, kRelocationInfoOffset);
-  IteratePointer(v, kHandlerTableOffset);
-  IteratePointer(v, kDeoptimizationDataOffset);
-  IteratePointer(v, kTypeFeedbackInfoOffset);
-
-  RelocIterator it(this, mode_mask);
-  for (; !it.done(); it.next()) {
-    it.rinfo()->Visit(v);
-  }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // There are two places where we iterate code bodies: here and the
-  // non-templated CodeIterateBody (above).  They should be kept in sync.
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
-
-  RelocIterator it(this, mode_mask);
-  for (; !it.done(); it.next()) {
-    it.rinfo()->template Visit<StaticVisitor>(heap);
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index c7c8a87..0aa21dd 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -64,7 +64,7 @@
       case kExternalStringTag:
         return GetVisitorIdForSize(kVisitDataObject,
                                    kVisitDataObjectGeneric,
-                                   instance_size);
+                                   ExternalString::kSize);
     }
     UNREACHABLE();
   }
@@ -73,9 +73,6 @@
     case BYTE_ARRAY_TYPE:
       return kVisitByteArray;
 
-    case FREE_SPACE_TYPE:
-      return kVisitFreeSpace;
-
     case FIXED_ARRAY_TYPE:
       return kVisitFixedArray;
 
@@ -94,16 +91,6 @@
     case JS_GLOBAL_PROPERTY_CELL_TYPE:
       return kVisitPropertyCell;
 
-    case JS_SET_TYPE:
-      return GetVisitorIdForSize(kVisitStruct,
-                                 kVisitStructGeneric,
-                                 JSSet::kSize);
-
-    case JS_MAP_TYPE:
-      return GetVisitorIdForSize(kVisitStruct,
-                                 kVisitStructGeneric,
-                                 JSMap::kSize);
-
     case JS_WEAK_MAP_TYPE:
       return kVisitJSWeakMap;
 
@@ -134,7 +121,6 @@
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
-    case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
     case JS_GLOBAL_OBJECT_TYPE:
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 26e79ae..4ce1bd0 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -30,6 +30,22 @@
 
 #include "allocation.h"
 
+#if V8_TARGET_ARCH_IA32
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/assembler-mips.h"
+#include "mips/assembler-mips-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
 // Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -51,7 +67,6 @@
     kVisitSeqTwoByteString,
     kVisitShortcutCandidate,
     kVisitByteArray,
-    kVisitFreeSpace,
     kVisitFixedArray,
     kVisitFixedDoubleArray,
     kVisitGlobalContext,
@@ -135,7 +150,7 @@
            (base == kVisitJSObject));
     ASSERT(IsAligned(object_size, kPointerSize));
     ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
-    ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
+    ASSERT(object_size < Page::kMaxHeapObjectSize);
 
     const VisitorId specialization = static_cast<VisitorId>(
         base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
@@ -157,10 +172,6 @@
     }
   }
 
-  inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
-    return reinterpret_cast<Callback>(callbacks_[id]);
-  }
-
   inline Callback GetVisitor(Map* map) {
     return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
   }
@@ -225,7 +236,7 @@
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     int object_size = BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(),
+        map->heap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -236,7 +247,7 @@
   static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
     ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(),
+        map->heap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -250,7 +261,7 @@
  public:
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(),
+        map->heap(),
         object,
         BodyDescriptor::kStartOffset,
         BodyDescriptor::kEndOffset);
@@ -278,7 +289,63 @@
 template<typename StaticVisitor>
 class StaticNewSpaceVisitor : public StaticVisitorBase {
  public:
-  static void Initialize();
+  static void Initialize() {
+    table_.Register(kVisitShortcutCandidate,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      ConsString::BodyDescriptor,
+                                      int>::Visit);
+
+    table_.Register(kVisitConsString,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      ConsString::BodyDescriptor,
+                                      int>::Visit);
+
+    table_.Register(kVisitSlicedString,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      SlicedString::BodyDescriptor,
+                                      int>::Visit);
+
+    table_.Register(kVisitFixedArray,
+                    &FlexibleBodyVisitor<StaticVisitor,
+                                         FixedArray::BodyDescriptor,
+                                         int>::Visit);
+
+    table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
+    table_.Register(kVisitGlobalContext,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      Context::ScavengeBodyDescriptor,
+                                      int>::Visit);
+
+    table_.Register(kVisitByteArray, &VisitByteArray);
+
+    table_.Register(kVisitSharedFunctionInfo,
+                    &FixedBodyVisitor<StaticVisitor,
+                                      SharedFunctionInfo::BodyDescriptor,
+                                      int>::Visit);
+
+    table_.Register(kVisitJSWeakMap, &VisitJSObject);
+
+    table_.Register(kVisitJSRegExp, &VisitJSObject);
+
+    table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+
+    table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+    table_.Register(kVisitJSFunction,
+                    &JSObjectVisitor::
+                        template VisitSpecialized<JSFunction::kSize>);
+
+    table_.RegisterSpecializations<DataObjectVisitor,
+                                   kVisitDataObject,
+                                   kVisitDataObjectGeneric>();
+    table_.RegisterSpecializations<JSObjectVisitor,
+                                   kVisitJSObject,
+                                   kVisitJSObjectGeneric>();
+    table_.RegisterSpecializations<StructVisitor,
+                                   kVisitStruct,
+                                   kVisitStructGeneric>();
+  }
 
   static inline int IterateBody(Map* map, HeapObject* obj) {
     return table_.GetVisitor(map)(map, obj);
@@ -312,10 +379,6 @@
         SeqTwoByteStringSize(map->instance_type());
   }
 
-  static inline int VisitFreeSpace(Map* map, HeapObject* object) {
-    return FreeSpace::cast(object)->Size();
-  }
-
   class DataObjectVisitor {
    public:
     template<int object_size>
@@ -347,6 +410,55 @@
   StaticNewSpaceVisitor<StaticVisitor>::table_;
 
 
+void Code::CodeIterateBody(ObjectVisitor* v) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // Use the relocation info pointer before it is visited by
+  // the heap compaction in the next statement.
+  RelocIterator it(this, mode_mask);
+
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
+
+  for (; !it.done(); it.next()) {
+    it.rinfo()->Visit(v);
+  }
+}
+
+
+template<typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // Use the relocation info pointer before it is visited by
+  // the heap compaction in the next statement.
+  RelocIterator it(this, mode_mask);
+
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+
+  for (; !it.done(); it.next()) {
+    it.rinfo()->template Visit<StaticVisitor>(heap);
+  }
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/objects.cc b/src/objects.cc
index 64d85a0..88ebbf4 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,16 +33,13 @@
 #include "codegen.h"
 #include "debug.h"
 #include "deoptimizer.h"
-#include "date.h"
 #include "elements.h"
 #include "execution.h"
 #include "full-codegen.h"
 #include "hydrogen.h"
 #include "objects-inl.h"
 #include "objects-visiting.h"
-#include "objects-visiting-inl.h"
 #include "macro-assembler.h"
-#include "mark-compact.h"
 #include "safepoint-table.h"
 #include "string-stream.h"
 #include "utils.h"
@@ -56,11 +53,10 @@
 namespace v8 {
 namespace internal {
 
-void PrintElementsKind(FILE* out, ElementsKind kind) {
-  ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
-  PrintF(out, "%s", accessor->name());
-}
-
+// Getters and setters are stored in a fixed array property.  These are
+// constants for their indices.
+const int kGetterIndex = 0;
+const int kSetterIndex = 1;
 
 MUST_USE_RESULT static MaybeObject* CreateJSValue(JSFunction* constructor,
                                                   Object* value) {
@@ -136,27 +132,34 @@
 
 void Object::Lookup(String* name, LookupResult* result) {
   Object* holder = NULL;
-  if (IsJSReceiver()) {
-    holder = this;
-  } else {
+  if (IsSmi()) {
     Context* global_context = Isolate::Current()->context()->global_context();
-    if (IsNumber()) {
-      holder = global_context->number_function()->instance_prototype();
-    } else if (IsString()) {
+    holder = global_context->number_function()->instance_prototype();
+  } else {
+    HeapObject* heap_object = HeapObject::cast(this);
+    if (heap_object->IsJSObject()) {
+      return JSObject::cast(this)->Lookup(name, result);
+    } else if (heap_object->IsJSProxy()) {
+      return result->HandlerResult();
+    }
+    Context* global_context = Isolate::Current()->context()->global_context();
+    if (heap_object->IsString()) {
       holder = global_context->string_function()->instance_prototype();
-    } else if (IsBoolean()) {
+    } else if (heap_object->IsHeapNumber()) {
+      holder = global_context->number_function()->instance_prototype();
+    } else if (heap_object->IsBoolean()) {
       holder = global_context->boolean_function()->instance_prototype();
     }
   }
   ASSERT(holder != NULL);  // Cannot handle null or undefined.
-  JSReceiver::cast(holder)->Lookup(name, result);
+  JSObject::cast(holder)->Lookup(name, result);
 }
 
 
 MaybeObject* Object::GetPropertyWithReceiver(Object* receiver,
                                              String* name,
                                              PropertyAttributes* attributes) {
-  LookupResult result(name->GetIsolate());
+  LookupResult result;
   Lookup(name, &result);
   MaybeObject* value = GetProperty(receiver, &result, name, attributes);
   ASSERT(*attributes <= ABSENT);
@@ -164,9 +167,10 @@
 }
 
 
-MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
-                                               Object* structure,
-                                               String* name) {
+MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
+                                             Object* structure,
+                                             String* name,
+                                             Object* holder) {
   Isolate* isolate = name->GetIsolate();
   // To accommodate both the old and the new api we switch on the
   // data structure used to store the callbacks.  Eventually foreign
@@ -174,7 +178,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->foreign_address());
+            Foreign::cast(structure)->address());
     MaybeObject* value = (callback->getter)(receiver, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     return value;
@@ -187,9 +191,10 @@
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     HandleScope scope(isolate);
     JSObject* self = JSObject::cast(receiver);
+    JSObject* holder_handle = JSObject::cast(holder);
     Handle<String> key(name);
     LOG(isolate, ApiNamedPropertyAccess("load", self, name));
-    CustomArguments args(isolate, data->data(), self, this);
+    CustomArguments args(isolate, data->data(), self, holder_handle);
     v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
@@ -205,11 +210,11 @@
   }
 
   // __defineGetter__ callback
-  if (structure->IsAccessorPair()) {
-    Object* getter = AccessorPair::cast(structure)->getter();
-    if (getter->IsSpecFunction()) {
-      // TODO(rossberg): nicer would be to cast to some JSCallable here...
-      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
+  if (structure->IsFixedArray()) {
+    Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+    if (getter->IsJSFunction()) {
+      return Object::GetPropertyWithDefinedGetter(receiver,
+                                                  JSFunction::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -220,75 +225,50 @@
 }
 
 
-MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
-                                             String* name_raw) {
-  Isolate* isolate = GetIsolate();
+MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
+                                            String* name_raw,
+                                            Object* handler_raw) {
+  Isolate* isolate = name_raw->GetIsolate();
   HandleScope scope(isolate);
   Handle<Object> receiver(receiver_raw);
   Handle<Object> name(name_raw);
+  Handle<Object> handler(handler_raw);
 
-  Handle<Object> args[] = { receiver, name };
-  Handle<Object> result = CallTrap(
-    "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
   if (isolate->has_pending_exception()) return Failure::Exception();
+  if (trap->IsUndefined()) {
+    // Get the derived `get' property.
+    trap = isolate->derived_get_trap();
+  }
+
+  // Call trap function.
+  Object** args[] = { receiver.location(), name.location() };
+  bool has_exception;
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
 
   return *result;
 }
 
 
-Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
-  Isolate* isolate = object->IsHeapObject()
-      ? Handle<HeapObject>::cast(object)->GetIsolate()
-      : Isolate::Current();
-  CALL_HEAP_FUNCTION(isolate, object->GetElement(index), Object);
-}
-
-
-MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
-                                            uint32_t index) {
-  String* name;
-  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
-  if (!maybe->To<String>(&name)) return maybe;
-  return GetPropertyWithHandler(receiver, name);
-}
-
-
-MaybeObject* JSProxy::SetElementWithHandler(uint32_t index,
-                                            Object* value,
-                                            StrictModeFlag strict_mode) {
-  String* name;
-  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
-  if (!maybe->To<String>(&name)) return maybe;
-  return SetPropertyWithHandler(name, value, NONE, strict_mode);
-}
-
-
-bool JSProxy::HasElementWithHandler(uint32_t index) {
-  String* name;
-  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
-  if (!maybe->To<String>(&name)) return maybe;
-  return HasPropertyWithHandler(name);
-}
-
-
 MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
-                                                  JSReceiver* getter) {
+                                                  JSFunction* getter) {
   HandleScope scope;
-  Handle<JSReceiver> fun(getter);
+  Handle<JSFunction> fun(JSFunction::cast(getter));
   Handle<Object> self(receiver);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = fun->GetHeap()->isolate()->debug();
   // Handle stepping into a getter if step into is active.
-  // TODO(rossberg): should this apply to getters that are function proxies?
-  if (debug->StepInActive() && fun->IsJSFunction()) {
-    debug->HandleStepIn(
-        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+  if (debug->StepInActive()) {
+    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
   }
 #endif
-
   bool has_pending_exception;
   Handle<Object> result =
-      Execution::Call(fun, self, 0, NULL, &has_pending_exception, true);
+      Execution::Call(fun, self, 0, NULL, &has_pending_exception);
   // Check for pending exception and return the result.
   if (has_pending_exception) return Failure::Exception();
   return *result;
@@ -310,8 +290,10 @@
           AccessorInfo* info = AccessorInfo::cast(obj);
           if (info->all_can_read()) {
             *attributes = result->GetAttributes();
-            return result->holder()->GetPropertyWithCallback(
-                receiver, result->GetCallbackObject(), name);
+            return GetPropertyWithCallback(receiver,
+                                           result->GetCallbackObject(),
+                                           name,
+                                           result->holder());
           }
         }
         break;
@@ -320,7 +302,7 @@
       case FIELD:
       case CONSTANT_FUNCTION: {
         // Search ALL_CAN_READ accessors in prototype chain.
-        LookupResult r(GetIsolate());
+        LookupResult r;
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
         if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
@@ -333,7 +315,7 @@
       case INTERCEPTOR: {
         // If the object has an interceptor, try real named properties.
         // No access check in GetPropertyAttributeWithInterceptor.
-        LookupResult r(GetIsolate());
+        LookupResult r;
         result->holder()->LookupRealNamedProperty(name, &r);
         if (r.IsProperty()) {
           return GetPropertyWithFailedAccessCheck(receiver,
@@ -380,7 +362,7 @@
       case CONSTANT_FUNCTION: {
         if (!continue_search) break;
         // Search ALL_CAN_READ accessors in prototype chain.
-        LookupResult r(GetIsolate());
+        LookupResult r;
         result->holder()->LookupRealNamedPropertyInPrototypes(name, &r);
         if (r.IsProperty()) {
           return GetPropertyAttributeWithFailedAccessCheck(receiver,
@@ -394,7 +376,7 @@
       case INTERCEPTOR: {
         // If the object has an interceptor, try real named properties.
         // No access check in GetPropertyAttributeWithInterceptor.
-        LookupResult r(GetIsolate());
+        LookupResult r;
         if (continue_search) {
           result->holder()->LookupRealNamedProperty(name, &r);
         } else {
@@ -414,7 +396,7 @@
     }
   }
 
-  GetIsolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+  GetHeap()->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
   return ABSENT;
 }
 
@@ -444,16 +426,6 @@
 }
 
 
-Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object,
-                                               Handle<String> key,
-                                               Handle<Object> value,
-                                               PropertyDetails details) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetNormalizedProperty(*key, *value, details),
-                     Object);
-}
-
-
 MaybeObject* JSObject::SetNormalizedProperty(String* name,
                                              Object* value,
                                              PropertyDetails details) {
@@ -514,7 +486,7 @@
       }
       JSGlobalPropertyCell* cell =
           JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
-      cell->set_value(cell->GetHeap()->the_hole_value());
+      cell->set_value(cell->heap()->the_hole_value());
       dictionary->DetailsAtPut(entry, details.AsDeleted());
     } else {
       Object* deleted = dictionary->DeleteProperty(entry, mode);
@@ -548,21 +520,6 @@
 }
 
 
-Handle<Object> Object::GetProperty(Handle<Object> object,
-                                   Handle<Object> receiver,
-                                   LookupResult* result,
-                                   Handle<String> key,
-                                   PropertyAttributes* attributes) {
-  Isolate* isolate = object->IsHeapObject()
-      ? Handle<HeapObject>::cast(object)->GetIsolate()
-      : Isolate::Current();
-  CALL_HEAP_FUNCTION(
-      isolate,
-      object->GetProperty(*receiver, result, *key, attributes),
-      Object);
-}
-
-
 MaybeObject* Object::GetProperty(Object* receiver,
                                  LookupResult* result,
                                  String* name,
@@ -580,9 +537,7 @@
   // holder in the prototype chain.
   // Proxy handlers do not use the proxy's prototype, so we can skip this.
   if (!result->IsHandler()) {
-    Object* last = result->IsProperty()
-        ? result->holder()
-        : Object::cast(heap->null_value());
+    Object* last = result->IsProperty() ? result->holder() : heap->null_value();
     ASSERT(this != this->GetPrototype());
     for (Object* current = this; true; current = current->GetPrototype()) {
       if (current->IsAccessCheckNeeded()) {
@@ -611,26 +566,30 @@
   }
   *attributes = result->GetAttributes();
   Object* value;
+  JSObject* holder = result->holder();
   switch (result->type()) {
     case NORMAL:
-      value = result->holder()->GetNormalizedProperty(result);
+      value = holder->GetNormalizedProperty(result);
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case FIELD:
-      value = result->holder()->FastPropertyAt(result->GetFieldIndex());
+      value = holder->FastPropertyAt(result->GetFieldIndex());
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case CONSTANT_FUNCTION:
       return result->GetConstantFunction();
     case CALLBACKS:
-      return result->holder()->GetPropertyWithCallback(
-          receiver, result->GetCallbackObject(), name);
-    case HANDLER:
-      return result->proxy()->GetPropertyWithHandler(receiver, name);
+      return GetPropertyWithCallback(receiver,
+                                     result->GetCallbackObject(),
+                                     name,
+                                     holder);
+    case HANDLER: {
+      JSProxy* proxy = JSProxy::cast(this);
+      return GetPropertyWithHandler(receiver, name, proxy->handler());
+    }
     case INTERCEPTOR: {
       JSObject* recvr = JSObject::cast(receiver);
-      return result->holder()->GetPropertyWithInterceptor(
-          recvr, name, attributes);
+      return holder->GetPropertyWithInterceptor(recvr, name, attributes);
     }
     case MAP_TRANSITION:
     case ELEMENTS_TRANSITION:
@@ -654,21 +613,28 @@
   for (holder = this;
        holder != heap->null_value();
        holder = holder->GetPrototype()) {
-    if (!holder->IsJSObject()) {
-      Isolate* isolate = heap->isolate();
-      Context* global_context = isolate->context()->global_context();
-      if (holder->IsNumber()) {
-        holder = global_context->number_function()->instance_prototype();
-      } else if (holder->IsString()) {
-        holder = global_context->string_function()->instance_prototype();
-      } else if (holder->IsBoolean()) {
-        holder = global_context->boolean_function()->instance_prototype();
-      } else if (holder->IsJSProxy()) {
-        return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
-      } else {
-        // Undefined and null have no indexed properties.
-        ASSERT(holder->IsUndefined() || holder->IsNull());
-        return heap->undefined_value();
+    if (holder->IsSmi()) {
+      Context* global_context = Isolate::Current()->context()->global_context();
+      holder = global_context->number_function()->instance_prototype();
+    } else {
+      HeapObject* heap_object = HeapObject::cast(holder);
+      if (!heap_object->IsJSObject()) {
+        Isolate* isolate = heap->isolate();
+        Context* global_context = isolate->context()->global_context();
+        if (heap_object->IsString()) {
+          holder = global_context->string_function()->instance_prototype();
+        } else if (heap_object->IsHeapNumber()) {
+          holder = global_context->number_function()->instance_prototype();
+        } else if (heap_object->IsBoolean()) {
+          holder = global_context->boolean_function()->instance_prototype();
+        } else if (heap_object->IsJSProxy()) {
+          // TODO(rossberg): do something
+          return heap->undefined_value();  // For now...
+        } else {
+          // Undefined and null have no indexed properties.
+          ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
+          return heap->undefined_value();
+        }
       }
     }
 
@@ -692,7 +658,10 @@
 
     if (js_object->elements() != heap->empty_fixed_array()) {
       MaybeObject* result = js_object->GetElementsAccessor()->Get(
-          receiver, js_object, index);
+          js_object->elements(),
+          index,
+          js_object,
+          receiver);
       if (result != heap->the_hole_value()) return result;
     }
   }
@@ -732,49 +701,6 @@
 }
 
 
-MaybeObject* Object::GetHash(CreationFlag flag) {
-  // The object is either a number, a string, an odd-ball,
-  // a real JS object, or a Harmony proxy.
-  if (IsNumber()) {
-    uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
-    return Smi::FromInt(hash & Smi::kMaxValue);
-  }
-  if (IsString()) {
-    uint32_t hash = String::cast(this)->Hash();
-    return Smi::FromInt(hash);
-  }
-  if (IsOddball()) {
-    uint32_t hash = Oddball::cast(this)->to_string()->Hash();
-    return Smi::FromInt(hash);
-  }
-  if (IsJSReceiver()) {
-    return JSReceiver::cast(this)->GetIdentityHash(flag);
-  }
-
-  UNREACHABLE();
-  return Smi::FromInt(0);
-}
-
-
-bool Object::SameValue(Object* other) {
-  if (other == this) return true;
-  if (!IsHeapObject() || !other->IsHeapObject()) return false;
-
-  // The object is either a number, a string, an odd-ball,
-  // a real JS object, or a Harmony proxy.
-  if (IsNumber() && other->IsNumber()) {
-    double this_value = Number();
-    double other_value = other->Number();
-    return (this_value == other_value) ||
-        (isnan(this_value) && isnan(other_value));
-  }
-  if (IsString() && other->IsString()) {
-    return String::cast(this)->Equals(String::cast(other));
-  }
-  return false;
-}
-
-
 void Object::ShortPrint(FILE* out) {
   HeapStringAllocator allocator;
   StringStream accumulator(&allocator);
@@ -892,7 +818,7 @@
                     len - first_length);
       }
       cs->set_first(result);
-      cs->set_second(heap->empty_string(), SKIP_WRITE_BARRIER);
+      cs->set_second(heap->empty_string());
       return result;
     }
     default:
@@ -918,40 +844,39 @@
 #endif  // DEBUG
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kShortSize) {
+  if (size < ExternalString::kSize) {
+    // The string is too small to fit an external String in its place. This can
+    // only happen for zero length strings.
     return false;
   }
+  ASSERT(size >= ExternalString::kSize);
   bool is_ascii = this->IsAsciiRepresentation();
   bool is_symbol = this->IsSymbol();
+  int length = this->length();
+  int hash_field = this->hash_field();
 
   // Morph the object to an external string by adjusting the map and
   // reinitializing the fields.
-  if (size >= ExternalString::kSize) {
-    this->set_map_no_write_barrier(
-        is_symbol
-            ? (is_ascii ?  heap->external_symbol_with_ascii_data_map()
-                        :  heap->external_symbol_map())
-            : (is_ascii ?  heap->external_string_with_ascii_data_map()
-                        :  heap->external_string_map()));
-  } else {
-    this->set_map_no_write_barrier(
-        is_symbol
-            ? (is_ascii ?  heap->short_external_symbol_with_ascii_data_map()
-                        :  heap->short_external_symbol_map())
-            : (is_ascii ?  heap->short_external_string_with_ascii_data_map()
-                        :  heap->short_external_string_map()));
-  }
+  this->set_map(is_ascii ?
+                heap->external_string_with_ascii_data_map() :
+                heap->external_string_map());
   ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
+  self->set_length(length);
+  self->set_hash_field(hash_field);
   self->set_resource(resource);
-  if (is_symbol) self->Hash();  // Force regeneration of the hash value.
+  // Additionally make the object into an external symbol if the original string
+  // was a symbol to start with.
+  if (is_symbol) {
+    self->Hash();  // Force regeneration of the hash value.
+    // Now morph this external string into a external symbol.
+    this->set_map(is_ascii ?
+                  heap->external_symbol_with_ascii_data_map() :
+                  heap->external_symbol_map());
+  }
 
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
-  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
-    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
-                                               new_size - size);
-  }
   return true;
 }
 
@@ -970,33 +895,34 @@
 #endif  // DEBUG
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kShortSize) {
+  if (size < ExternalString::kSize) {
+    // The string is too small to fit an external String in its place. This can
+    // only happen for zero length strings.
     return false;
   }
+  ASSERT(size >= ExternalString::kSize);
   bool is_symbol = this->IsSymbol();
+  int length = this->length();
+  int hash_field = this->hash_field();
 
   // Morph the object to an external string by adjusting the map and
-  // reinitializing the fields.  Use short version if space is limited.
-  if (size >= ExternalString::kSize) {
-    this->set_map_no_write_barrier(
-        is_symbol ? heap->external_ascii_symbol_map()
-                  : heap->external_ascii_string_map());
-  } else {
-    this->set_map_no_write_barrier(
-        is_symbol ? heap->short_external_ascii_symbol_map()
-                  : heap->short_external_ascii_string_map());
-  }
+  // reinitializing the fields.
+  this->set_map(heap->external_ascii_string_map());
   ExternalAsciiString* self = ExternalAsciiString::cast(this);
+  self->set_length(length);
+  self->set_hash_field(hash_field);
   self->set_resource(resource);
-  if (is_symbol) self->Hash();  // Force regeneration of the hash value.
+  // Additionally make the object into an external symbol if the original string
+  // was a symbol to start with.
+  if (is_symbol) {
+    self->Hash();  // Force regeneration of the hash value.
+    // Now morph this external string into a external symbol.
+    this->set_map(heap->external_ascii_symbol_map());
+  }
 
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
-  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
-    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
-                                               new_size - size);
-  }
   return true;
 }
 
@@ -1068,11 +994,12 @@
   switch (map()->instance_type()) {
     case JS_ARRAY_TYPE: {
       double length = JSArray::cast(this)->length()->Number();
-      accumulator->Add("<JS Array[%u]>", static_cast<uint32_t>(length));
+      accumulator->Add("<JS array[%u]>", static_cast<uint32_t>(length));
       break;
     }
     case JS_WEAK_MAP_TYPE: {
-      accumulator->Add("<JS WeakMap>");
+      int elements = JSWeakMap::cast(this)->table()->NumberOfElements();
+      accumulator->Add("<JS WeakMap[%d]>", elements);
       break;
     }
     case JS_REGEXP_TYPE: {
@@ -1100,7 +1027,7 @@
     // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
     default: {
       Map* map_of_this = map();
-      Heap* heap = GetHeap();
+      Heap* heap = map_of_this->heap();
       Object* constructor = map_of_this->constructor();
       bool printed = false;
       if (constructor->IsHeapObject() &&
@@ -1122,6 +1049,7 @@
                        global_object ? "Global Object: " : "",
                        vowel ? "n" : "");
                 accumulator->Put(str);
+                accumulator->Put('>');
                 printed = true;
               }
             }
@@ -1142,28 +1070,8 @@
 }
 
 
-void JSObject::PrintElementsTransition(
-    FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
-    ElementsKind to_kind, FixedArrayBase* to_elements) {
-  if (from_kind != to_kind) {
-    PrintF(file, "elements transition [");
-    PrintElementsKind(file, from_kind);
-    PrintF(file, " -> ");
-    PrintElementsKind(file, to_kind);
-    PrintF(file, "] in ");
-    JavaScriptFrame::PrintTop(file, false, true);
-    PrintF(file, " for ");
-    ShortPrint(file);
-    PrintF(file, " from ");
-    from_elements->ShortPrint(file);
-    PrintF(file, " to ");
-    to_elements->ShortPrint(file);
-    PrintF(file, "\n");
-  }
-}
-
-
 void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
+  // if (!HEAP->InNewSpace(this)) PrintF("*", this);
   Heap* heap = GetHeap();
   if (!heap->Contains(this)) {
     accumulator->Add("!!!INVALID POINTER!!!");
@@ -1186,21 +1094,14 @@
   }
   switch (map()->instance_type()) {
     case MAP_TYPE:
-      accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind());
+      accumulator->Add("<Map>");
       break;
     case FIXED_ARRAY_TYPE:
       accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
       break;
-    case FIXED_DOUBLE_ARRAY_TYPE:
-      accumulator->Add("<FixedDoubleArray[%u]>",
-                       FixedDoubleArray::cast(this)->length());
-      break;
     case BYTE_ARRAY_TYPE:
       accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
       break;
-    case FREE_SPACE_TYPE:
-      accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
-      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       accumulator->Add("<ExternalPixelArray[%u]>",
                        ExternalPixelArray::cast(this)->length());
@@ -1339,10 +1240,7 @@
     case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_VALUE_TYPE:
-    case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
-    case JS_SET_TYPE:
-    case JS_MAP_TYPE:
     case JS_WEAK_MAP_TYPE:
     case JS_REGEXP_TYPE:
     case JS_GLOBAL_PROXY_TYPE:
@@ -1379,7 +1277,6 @@
     case HEAP_NUMBER_TYPE:
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
-    case FREE_SPACE_TYPE:
     case EXTERNAL_PIXEL_ARRAY_TYPE:
     case EXTERNAL_BYTE_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -1611,6 +1508,8 @@
     String* name,
     JSFunction* function,
     PropertyAttributes attributes) {
+  ASSERT(!GetHeap()->InNewSpace(function));
+
   // Allocate new instance descriptors with (name, function) added
   ConstantFunctionDescriptor d(name, function, attributes);
   Object* new_descriptors;
@@ -1634,7 +1533,7 @@
 
   // If the old map is the global object map (from new Object()),
   // then transitions are not added to it, so we are done.
-  Heap* heap = GetHeap();
+  Heap* heap = old_map->heap();
   if (old_map == heap->isolate()->context()->global_context()->
       object_function()->map()) {
     return function;
@@ -1710,10 +1609,10 @@
                                    StrictModeFlag strict_mode) {
   ASSERT(!IsJSGlobalProxy());
   Map* map_of_this = map();
-  Heap* heap = GetHeap();
+  Heap* heap = map_of_this->heap();
   if (!map_of_this->is_extensible()) {
     if (strict_mode == kNonStrictMode) {
-      return value;
+      return heap->undefined_value();
     } else {
       Handle<Object> args[1] = {Handle<String>(name)};
       return heap->isolate()->Throw(
@@ -1725,7 +1624,7 @@
     // Ensure the descriptor array does not get too big.
     if (map_of_this->instance_descriptors()->number_of_descriptors() <
         DescriptorArray::kMaxNumberOfDescriptors) {
-      if (value->IsJSFunction()) {
+      if (value->IsJSFunction() && !heap->InNewSpace(value)) {
         return AddConstantFunctionProperty(name,
                                            JSFunction::cast(value),
                                            attributes);
@@ -1752,21 +1651,13 @@
     PropertyAttributes attributes,
     StrictModeFlag strict_mode) {
   // Check local property, ignore interceptor.
-  LookupResult result(GetIsolate());
+  LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsFound()) {
     // An existing property, a map transition or a null descriptor was
     // found.  Use set property to handle all these cases.
     return SetProperty(&result, name, value, attributes, strict_mode);
   }
-  bool found = false;
-  MaybeObject* result_object;
-  result_object = SetPropertyWithCallbackSetterInPrototypes(name,
-                                                            value,
-                                                            attributes,
-                                                            &found,
-                                                            strict_mode);
-  if (found) return result_object;
   // Add a new real property.
   return AddProperty(name, value, attributes, strict_mode);
 }
@@ -1780,7 +1671,7 @@
   int new_enumeration_index = 0;  // 0 means "Use the next available index."
   if (old_index != -1) {
     // All calls to ReplaceSlowProperty have had all transitions removed.
-    ASSERT(!dictionary->ContainsTransition(old_index));
+    ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
     new_enumeration_index = dictionary->DetailsAt(old_index).index();
   }
 
@@ -1805,7 +1696,7 @@
     return result;
   }
   // Do not add transitions to the map of "new Object()".
-  if (map() == GetIsolate()->context()->global_context()->
+  if (map() == old_map->heap()->isolate()->context()->global_context()->
       object_function()->map()) {
     return result;
   }
@@ -1930,22 +1821,11 @@
 }
 
 
-Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
-                                       Handle<String> key,
-                                       Handle<Object> value,
-                                       PropertyAttributes attributes,
-                                       StrictModeFlag strict_mode) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetProperty(*key, *value, attributes, strict_mode),
-                     Object);
-}
-
-
 MaybeObject* JSReceiver::SetProperty(String* name,
                                      Object* value,
                                      PropertyAttributes attributes,
                                      StrictModeFlag strict_mode) {
-  LookupResult result(GetIsolate());
+  LookupResult result;
   LocalLookup(name, &result);
   return SetProperty(&result, name, value, attributes, strict_mode);
 }
@@ -1970,7 +1850,7 @@
   if (structure->IsForeign()) {
     AccessorDescriptor* callback =
         reinterpret_cast<AccessorDescriptor*>(
-            Foreign::cast(structure)->foreign_address());
+            Foreign::cast(structure)->address());
     MaybeObject* obj = (callback->setter)(this,  value, callback->data);
     RETURN_IF_SCHEDULED_EXCEPTION(isolate);
     if (obj->IsFailure()) return obj;
@@ -1998,11 +1878,10 @@
     return *value_handle;
   }
 
-  if (structure->IsAccessorPair()) {
-    Object* setter = AccessorPair::cast(structure)->setter();
-    if (setter->IsSpecFunction()) {
-      // TODO(rossberg): nicer would be to cast to some JSCallable here...
-     return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
+  if (structure->IsFixedArray()) {
+    Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+    if (setter->IsJSFunction()) {
+     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -2021,24 +1900,22 @@
 }
 
 
-MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
-                                                      Object* value) {
+MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
+                                                    Object* value) {
   Isolate* isolate = GetIsolate();
   Handle<Object> value_handle(value, isolate);
-  Handle<JSReceiver> fun(setter, isolate);
-  Handle<JSReceiver> self(this, isolate);
+  Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
+  Handle<JSObject> self(this, isolate);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = isolate->debug();
   // Handle stepping into a setter if step into is active.
-  // TODO(rossberg): should this apply to getters that are function proxies?
-  if (debug->StepInActive() && fun->IsJSFunction()) {
-    debug->HandleStepIn(
-        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
+  if (debug->StepInActive()) {
+    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
   }
 #endif
   bool has_pending_exception;
-  Handle<Object> argv[] = { value_handle };
-  Execution::Call(fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception);
+  Object** argv[] = { value_handle.location() };
+  Execution::Call(fun, self, 1, argv, &has_pending_exception);
   // Check for pending exception and return the result.
   if (has_pending_exception) return Failure::Exception();
   return *value_handle;
@@ -2051,9 +1928,6 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
-    if (pt->IsJSProxy()) {
-      return result->HandlerResult(JSProxy::cast(pt));
-    }
     JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
     if (result->IsProperty()) {
       if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
@@ -2074,16 +1948,6 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
-    if (pt->IsJSProxy()) {
-      String* name;
-      MaybeObject* maybe = GetHeap()->Uint32ToString(index);
-      if (!maybe->To<String>(&name)) {
-        *found = true;  // Force abort
-        return maybe;
-      }
-      return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
-          name, value, NONE, strict_mode, found);
-    }
     if (!JSObject::cast(pt)->HasDictionaryElements()) {
       continue;
     }
@@ -2106,48 +1970,6 @@
   return heap->the_hole_value();
 }
 
-MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
-    String* name,
-    Object* value,
-    PropertyAttributes attributes,
-    bool* found,
-    StrictModeFlag strict_mode) {
-  Heap* heap = GetHeap();
-  // We could not find a local property so let's check whether there is an
-  // accessor that wants to handle the property.
-  LookupResult accessor_result(heap->isolate());
-  LookupCallbackSetterInPrototypes(name, &accessor_result);
-  if (accessor_result.IsFound()) {
-    *found = true;
-    if (accessor_result.type() == CALLBACKS) {
-      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
-                                     name,
-                                     value,
-                                     accessor_result.holder(),
-                                     strict_mode);
-    } else if (accessor_result.type() == HANDLER) {
-      // There is a proxy in the prototype chain. Invoke its
-      // getPropertyDescriptor trap.
-      bool found = false;
-      // SetPropertyWithHandlerIfDefiningSetter can cause GC,
-      // make sure to use the handlified references after calling
-      // the function.
-      Handle<JSObject> self(this);
-      Handle<String> hname(name);
-      Handle<Object> hvalue(value);
-      MaybeObject* result =
-          accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
-              name, value, attributes, strict_mode, &found);
-      if (found) return result;
-      // The proxy does not define the property as an accessor.
-      // Consequently, it has no effect on setting the receiver.
-      return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
-    }
-  }
-  *found = false;
-  return heap->the_hole_value();
-}
-
 
 void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
   DescriptorArray* descriptors = map()->instance_descriptors();
@@ -2164,8 +1986,7 @@
                               String* name,
                               LookupResult* result) {
   DescriptorArray* descriptors = instance_descriptors();
-  DescriptorLookupCache* cache =
-      GetHeap()->isolate()->descriptor_lookup_cache();
+  DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
   int number = cache->Lookup(descriptors, name);
   if (number == DescriptorLookupCache::kAbsent) {
     number = descriptors->Search(name);
@@ -2179,295 +2000,75 @@
 }
 
 
-static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
-  ASSERT(!map.is_null());
-  for (int i = 0; i < maps->length(); ++i) {
-    if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true;
-  }
-  return false;
-}
-
-
-template <class T>
-static Handle<T> MaybeNull(T* p) {
-  if (p == NULL) return Handle<T>::null();
-  return Handle<T>(p);
-}
-
-
-Handle<Map> Map::FindTransitionedMap(MapHandleList* candidates) {
-  ElementsKind elms_kind = elements_kind();
-  if (elms_kind == FAST_DOUBLE_ELEMENTS) {
-    bool dummy = true;
-    Handle<Map> fast_map =
-        MaybeNull(LookupElementsTransitionMap(FAST_ELEMENTS, &dummy));
-    if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
-      return fast_map;
-    }
-    return Handle<Map>::null();
-  }
-  if (elms_kind == FAST_SMI_ONLY_ELEMENTS) {
-    bool dummy = true;
-    Handle<Map> double_map =
-        MaybeNull(LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, &dummy));
-    // In the current implementation, if the DOUBLE map doesn't exist, the
-    // FAST map can't exist either.
-    if (double_map.is_null()) return Handle<Map>::null();
-    Handle<Map> fast_map =
-        MaybeNull(double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
-                                                          &dummy));
-    if (!fast_map.is_null() && ContainsMap(candidates, fast_map)) {
-      return fast_map;
-    }
-    if (ContainsMap(candidates, double_map)) return double_map;
-  }
-  return Handle<Map>::null();
-}
-
-static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
-                                                   ElementsKind elements_kind) {
-  if (descriptor_contents->IsMap()) {
-    Map* map = Map::cast(descriptor_contents);
-    if (map->elements_kind() == elements_kind) {
-      return map;
-    }
-    return NULL;
-  }
-
-  FixedArray* map_array = FixedArray::cast(descriptor_contents);
-  for (int i = 0; i < map_array->length(); ++i) {
-    Object* current = map_array->get(i);
-    // Skip undefined slots, they are sentinels for reclaimed maps.
-    if (!current->IsUndefined()) {
-      Map* current_map = Map::cast(map_array->get(i));
-      if (current_map->elements_kind() == elements_kind) {
-        return current_map;
-      }
-    }
-  }
-
-  return NULL;
-}
-
-
-static MaybeObject* AddElementsTransitionMapToDescriptor(
-    Object* descriptor_contents,
-    Map* new_map) {
-  // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
-  // simply add the map.
-  if (descriptor_contents == NULL) {
-    return new_map;
-  }
-
-  // There was already a map in the descriptor, create a 2-element FixedArray
-  // to contain the existing map plus the new one.
-  FixedArray* new_array;
-  Heap* heap = new_map->GetHeap();
-  if (descriptor_contents->IsMap()) {
-    // Must tenure, DescriptorArray expects no new-space objects.
-    MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
-    if (!maybe_new_array->To<FixedArray>(&new_array)) {
-      return maybe_new_array;
-    }
-    new_array->set(0, descriptor_contents);
-    new_array->set(1, new_map);
-    return new_array;
-  }
-
-  // The descriptor already contained a list of maps for different ElementKinds
-  // of ELEMENTS_TRANSITION, first check the existing array for an undefined
-  // slot, and if that's not available, create a FixedArray to hold the existing
-  // maps plus the new one and fill it in.
-  FixedArray* array = FixedArray::cast(descriptor_contents);
-  for (int i = 0; i < array->length(); ++i) {
-    if (array->get(i)->IsUndefined()) {
-      array->set(i, new_map);
-      return array;
-    }
-  }
-
-  // Must tenure, DescriptorArray expects no new-space objects.
-  MaybeObject* maybe_new_array =
-      heap->AllocateFixedArray(array->length() + 1, TENURED);
-  if (!maybe_new_array->To<FixedArray>(&new_array)) {
-    return maybe_new_array;
-  }
-  int i = 0;
-  while (i < array->length()) {
-    new_array->set(i, array->get(i));
-    ++i;
-  }
-  new_array->set(i, new_map);
-  return new_array;
-}
-
-
-String* Map::elements_transition_sentinel_name() {
-  return GetHeap()->empty_symbol();
-}
-
-
-Object* Map::GetDescriptorContents(String* sentinel_name,
-                                   bool* safe_to_add_transition) {
-  // Get the cached index for the descriptors lookup, or find and cache it.
+MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind,
+                                           bool safe_to_add_transition) {
+  Heap* current_heap = heap();
   DescriptorArray* descriptors = instance_descriptors();
-  DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
-  int index = cache->Lookup(descriptors, sentinel_name);
-  if (index == DescriptorLookupCache::kAbsent) {
-    index = descriptors->Search(sentinel_name);
-    cache->Update(descriptors, sentinel_name, index);
-  }
-  // If the transition already exists, return its descriptor.
-  if (index != DescriptorArray::kNotFound) {
-    PropertyDetails details(descriptors->GetDetails(index));
-    if (details.type() == ELEMENTS_TRANSITION) {
-      return descriptors->GetValue(index);
-    } else {
-      if (safe_to_add_transition != NULL) {
-        *safe_to_add_transition = false;
-      }
-    }
-  }
-  return NULL;
-}
-
-
-Map* Map::LookupElementsTransitionMap(ElementsKind elements_kind,
-                                      bool* safe_to_add_transition) {
-  // Special case: indirect SMI->FAST transition (cf. comment in
-  // AddElementsTransition()).
-  if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
-      elements_kind == FAST_ELEMENTS) {
-    Map* double_map = this->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS,
-                                                        safe_to_add_transition);
-    if (double_map == NULL) return double_map;
-    return double_map->LookupElementsTransitionMap(FAST_ELEMENTS,
-                                                   safe_to_add_transition);
-  }
-  Object* descriptor_contents = GetDescriptorContents(
-      elements_transition_sentinel_name(), safe_to_add_transition);
-  if (descriptor_contents != NULL) {
-    Map* maybe_transition_map =
-        GetElementsTransitionMapFromDescriptor(descriptor_contents,
-                                               elements_kind);
-    ASSERT(maybe_transition_map == NULL || maybe_transition_map->IsMap());
-    return maybe_transition_map;
-  }
-  return NULL;
-}
-
-
-MaybeObject* Map::AddElementsTransition(ElementsKind elements_kind,
-                                        Map* transitioned_map) {
-  // The map transition graph should be a tree, therefore the transition
-  // from SMI to FAST elements is not done directly, but by going through
-  // DOUBLE elements first.
-  if (this->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
-      elements_kind == FAST_ELEMENTS) {
-    bool safe_to_add = true;
-    Map* double_map = this->LookupElementsTransitionMap(
-        FAST_DOUBLE_ELEMENTS, &safe_to_add);
-    // This method is only called when safe_to_add_transition has been found
-    // to be true earlier.
-    ASSERT(safe_to_add);
-
-    if (double_map == NULL) {
-      MaybeObject* maybe_map = this->CopyDropTransitions();
-      if (!maybe_map->To(&double_map)) return maybe_map;
-      double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
-      MaybeObject* maybe_double_transition = this->AddElementsTransition(
-          FAST_DOUBLE_ELEMENTS, double_map);
-      if (maybe_double_transition->IsFailure()) return maybe_double_transition;
-    }
-    return double_map->AddElementsTransition(FAST_ELEMENTS, transitioned_map);
-  }
-
-  bool safe_to_add_transition = true;
-  Object* descriptor_contents = GetDescriptorContents(
-      elements_transition_sentinel_name(), &safe_to_add_transition);
-  // This method is only called when safe_to_add_transition has been found
-  // to be true earlier.
-  ASSERT(safe_to_add_transition);
-  MaybeObject* maybe_new_contents =
-      AddElementsTransitionMapToDescriptor(descriptor_contents,
-                                           transitioned_map);
-  Object* new_contents;
-  if (!maybe_new_contents->ToObject(&new_contents)) {
-    return maybe_new_contents;
-  }
-
-  ElementsTransitionDescriptor desc(elements_transition_sentinel_name(),
-                                    new_contents);
-  Object* new_descriptors;
-  MaybeObject* maybe_new_descriptors =
-      instance_descriptors()->CopyInsert(&desc, KEEP_TRANSITIONS);
-  if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
-    return maybe_new_descriptors;
-  }
-  set_instance_descriptors(DescriptorArray::cast(new_descriptors));
-  return this;
-}
-
-
-Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
-                                               ElementsKind to_kind) {
-  Isolate* isolate = object->GetIsolate();
-  CALL_HEAP_FUNCTION(isolate,
-                     object->GetElementsTransitionMap(isolate, to_kind),
-                     Map);
-}
-
-
-MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
-  Map* current_map = map();
-  ElementsKind from_kind = current_map->elements_kind();
-
-  if (from_kind == to_kind) return current_map;
-
-  // Only objects with FastProperties can have DescriptorArrays and can track
-  // element-related maps. Also don't add descriptors to maps that are shared.
-  bool safe_to_add_transition = HasFastProperties() &&
-      !current_map->IsUndefined() &&
-      !current_map->is_shared();
-
-  // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps caused by objects
-  // with elements that switch back and forth between dictionary and fast
-  // element mode.
-  if (from_kind == DICTIONARY_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    safe_to_add_transition = false;
-  }
+  String* elements_transition_sentinel_name = current_heap->empty_symbol();
 
   if (safe_to_add_transition) {
     // It's only safe to manipulate the descriptor array if it would be
     // safe to add a transition.
-    Map* maybe_transition_map = current_map->LookupElementsTransitionMap(
-        to_kind, &safe_to_add_transition);
-    if (maybe_transition_map != NULL) {
-      return maybe_transition_map;
+
+    ASSERT(!is_shared());  // no transitions can be added to shared maps.
+    // Check if the elements transition already exists.
+    DescriptorLookupCache* cache =
+        current_heap->isolate()->descriptor_lookup_cache();
+    int index = cache->Lookup(descriptors, elements_transition_sentinel_name);
+    if (index == DescriptorLookupCache::kAbsent) {
+      index = descriptors->Search(elements_transition_sentinel_name);
+      cache->Update(descriptors,
+                    elements_transition_sentinel_name,
+                    index);
+    }
+
+    // If the transition already exists, check the type. If there is a match,
+    // return it.
+    if (index != DescriptorArray::kNotFound) {
+      PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
+      if (details.type() == ELEMENTS_TRANSITION &&
+          details.elements_kind() == elements_kind) {
+        return descriptors->GetValue(index);
+      } else {
+        safe_to_add_transition = false;
+      }
     }
   }
 
-  Map* new_map = NULL;
-
   // No transition to an existing map for the given ElementsKind. Make a new
   // one.
-  { MaybeObject* maybe_map = current_map->CopyDropTransitions();
-    if (!maybe_map->To(&new_map)) return maybe_map;
+  Object* obj;
+  { MaybeObject* maybe_map = CopyDropTransitions();
+    if (!maybe_map->ToObject(&obj)) return maybe_map;
   }
+  Map* new_map = Map::cast(obj);
 
-  new_map->set_elements_kind(to_kind);
+  new_map->set_elements_kind(elements_kind);
+  GetIsolate()->counters()->map_to_external_array_elements()->Increment();
 
   // Only remember the map transition if the object's map is NOT equal to the
   // global object_function's map and there is not an already existing
   // non-matching element transition.
-  Context* global_context = GetIsolate()->context()->global_context();
-  bool allow_map_transition = safe_to_add_transition &&
-      (global_context->object_function()->map() != map());
+  bool allow_map_transition =
+      safe_to_add_transition &&
+      (GetIsolate()->context()->global_context()->object_function()->map() !=
+       map());
   if (allow_map_transition) {
-    MaybeObject* maybe_transition =
-        current_map->AddElementsTransition(to_kind, new_map);
-    if (maybe_transition->IsFailure()) return maybe_transition;
+    // Allocate new instance descriptors for the old map with map transition.
+    ElementsTransitionDescriptor desc(elements_transition_sentinel_name,
+                                      Map::cast(new_map),
+                                      elements_kind);
+    Object* new_descriptors;
+    MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
+        &desc,
+        KEEP_TRANSITIONS);
+    if (!maybe_new_descriptors->ToObject(&new_descriptors)) {
+      return maybe_new_descriptors;
+    }
+    descriptors = DescriptorArray::cast(new_descriptors);
+    set_instance_descriptors(descriptors);
   }
+
   return new_map;
 }
 
@@ -2478,7 +2079,6 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
-    // A GlobalProxy's prototype should always be a proper JSObject.
     return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
   }
 
@@ -2573,7 +2173,7 @@
         case INTERCEPTOR: {
           // Try lookup real named properties. Note that only property can be
           // set is callbacks marked as ALL_CAN_WRITE on the prototype chain.
-          LookupResult r(GetIsolate());
+          LookupResult r;
           LookupRealNamedProperty(name, &r);
           if (r.IsProperty()) {
             return SetPropertyWithFailedAccessCheck(&r,
@@ -2591,10 +2191,10 @@
     }
   }
 
-  Isolate* isolate = GetIsolate();
-  HandleScope scope(isolate);
+  Heap* heap = GetHeap();
+  HandleScope scope(heap->isolate());
   Handle<Object> value_handle(value);
-  isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+  heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
   return *value_handle;
 }
 
@@ -2605,7 +2205,7 @@
                                      PropertyAttributes attributes,
                                      StrictModeFlag strict_mode) {
   if (result->IsFound() && result->type() == HANDLER) {
-    return result->proxy()->SetPropertyWithHandler(
+    return JSProxy::cast(this)->SetPropertyWithHandler(
         key, value, attributes, strict_mode);
   } else {
     return JSObject::cast(this)->SetPropertyForResult(
@@ -2619,11 +2219,22 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
+  Handle<Object> handler(this->handler());
 
-  Handle<Object> args[] = { name };
-  Handle<Object> result = CallTrap(
-    "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
   if (isolate->has_pending_exception()) return Failure::Exception();
+  if (trap->IsUndefined()) {
+    trap = isolate->derived_has_trap();
+  }
+
+  // Call trap function.
+  Object** args[] = { name.location() };
+  bool has_exception;
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
 
   return result->ToBoolean()->IsTrue();
 }
@@ -2639,85 +2250,24 @@
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
   Handle<Object> value(value_raw);
+  Handle<Object> handler(this->handler());
 
-  Handle<Object> args[] = { receiver, name, value };
-  CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
   if (isolate->has_pending_exception()) return Failure::Exception();
-
-  return *value;
-}
-
-
-MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
-    String* name_raw,
-    Object* value_raw,
-    PropertyAttributes attributes,
-    StrictModeFlag strict_mode,
-    bool* found) {
-  *found = true;  // except where defined otherwise...
-  Isolate* isolate = GetHeap()->isolate();
-  Handle<JSProxy> proxy(this);
-  Handle<Object> handler(this->handler());  // Trap might morph proxy.
-  Handle<String> name(name_raw);
-  Handle<Object> value(value_raw);
-  Handle<Object> args[] = { name };
-  Handle<Object> result = proxy->CallTrap(
-      "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
-  if (isolate->has_pending_exception()) return Failure::Exception();
-
-  if (!result->IsUndefined()) {
-    // The proxy handler cares about this property.
-    // Check whether it is virtualized as an accessor.
-    // Emulate [[GetProperty]] semantics for proxies.
-    bool has_pending_exception;
-    Handle<Object> argv[] = { result };
-    Handle<Object> desc =
-        Execution::Call(isolate->to_complete_property_descriptor(), result,
-                        ARRAY_SIZE(argv), argv, &has_pending_exception);
-    if (has_pending_exception) return Failure::Exception();
-
-    Handle<String> conf_name =
-        isolate->factory()->LookupAsciiSymbol("configurable_");
-    Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
-    ASSERT(!isolate->has_pending_exception());
-    if (configurable->IsFalse()) {
-      Handle<String> trap =
-          isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
-      Handle<Object> args[] = { handler, trap, name };
-      Handle<Object> error = isolate->factory()->NewTypeError(
-          "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
-      return isolate->Throw(*error);
-    }
-    ASSERT(configurable->IsTrue());
-
-    // Check for AccessorDescriptor.
-    Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
-    Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
-    ASSERT(!isolate->has_pending_exception());
-    if (!setter->IsUndefined()) {
-      // We have a setter -- invoke it.
-      // TODO(rossberg): nicer would be to cast to some JSCallable here...
-      return proxy->SetPropertyWithDefinedSetter(
-          JSReceiver::cast(*setter), *value);
-    } else {
-      Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
-      Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
-      ASSERT(!isolate->has_pending_exception());
-      if (!getter->IsUndefined()) {
-        // We have a getter but no setter -- the property may not be
-        // written. In strict mode, throw an error.
-        if (strict_mode == kNonStrictMode) return *value;
-        Handle<Object> args[] = { name, proxy };
-        Handle<Object> error = isolate->factory()->NewTypeError(
-            "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
-        return isolate->Throw(*error);
-      }
-    }
-    // Fall-through.
+  if (trap->IsUndefined()) {
+    trap = isolate->derived_set_trap();
   }
 
-  // The proxy does not define the property as an accessor.
-  *found = false;
+  // Call trap function.
+  Object** args[] = {
+      receiver.location(), name.location(), value.location()
+  };
+  bool has_exception;
+  Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
+
   return *value;
 }
 
@@ -2728,16 +2278,31 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
+  Handle<Object> handler(this->handler());
 
-  Handle<Object> args[] = { name };
-  Handle<Object> result = CallTrap(
-    "delete", Handle<Object>(), ARRAY_SIZE(args), args);
+  // Extract trap function.
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
   if (isolate->has_pending_exception()) return Failure::Exception();
+  if (trap->IsUndefined()) {
+    Handle<Object> args[] = { handler, trap_name };
+    Handle<Object> error = isolate->factory()->NewTypeError(
+        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+    isolate->Throw(*error);
+    return Failure::Exception();
+  }
+
+  // Call trap function.
+  Object** args[] = { name.location() };
+  bool has_exception;
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
+  if (has_exception) return Failure::Exception();
 
   Object* bool_result = result->ToBoolean();
-  if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
-    Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
-    Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
+  if (mode == STRICT_DELETION &&
+      bool_result == isolate->heap()->false_value()) {
+    Handle<Object> args[] = { handler, trap_name };
     Handle<Object> error = isolate->factory()->NewTypeError(
         "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
@@ -2747,76 +2312,39 @@
 }
 
 
-MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
-    uint32_t index,
-    DeleteMode mode) {
-  Isolate* isolate = GetIsolate();
-  HandleScope scope(isolate);
-  Handle<String> name = isolate->factory()->Uint32ToString(index);
-  return JSProxy::DeletePropertyWithHandler(*name, mode);
-}
-
-
 MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
     JSReceiver* receiver_raw,
-    String* name_raw) {
+    String* name_raw,
+    bool* has_exception) {
   Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
-  Handle<JSProxy> proxy(this);
-  Handle<Object> handler(this->handler());  // Trap might morph proxy.
   Handle<JSReceiver> receiver(receiver_raw);
   Handle<Object> name(name_raw);
+  Handle<Object> handler(this->handler());
 
-  Handle<Object> args[] = { name };
-  Handle<Object> result = CallTrap(
-    "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
+  // Extract trap function.
+  Handle<String> trap_name =
+      isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
   if (isolate->has_pending_exception()) return NONE;
-
-  if (result->IsUndefined()) return ABSENT;
-
-  bool has_pending_exception;
-  Handle<Object> argv[] = { result };
-  Handle<Object> desc =
-      Execution::Call(isolate->to_complete_property_descriptor(), result,
-                      ARRAY_SIZE(argv), argv, &has_pending_exception);
-  if (has_pending_exception) return NONE;
-
-  // Convert result to PropertyAttributes.
-  Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
-  Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
-  if (isolate->has_pending_exception()) return NONE;
-  Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
-  Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
-  if (isolate->has_pending_exception()) return NONE;
-  Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
-  Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
-  if (isolate->has_pending_exception()) return NONE;
-
-  if (configurable->IsFalse()) {
-    Handle<String> trap =
-        isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
-    Handle<Object> args[] = { handler, trap, name };
+  if (trap->IsUndefined()) {
+    Handle<Object> args[] = { handler, trap_name };
     Handle<Object> error = isolate->factory()->NewTypeError(
-        "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
+    *has_exception = true;
     return NONE;
   }
 
-  int attributes = NONE;
-  if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
-  if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
-  if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
-  return static_cast<PropertyAttributes>(attributes);
-}
+  // Call trap function.
+  Object** args[] = { name.location() };
+  Handle<Object> result =
+      Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
+  if (has_exception) return NONE;
 
-
-MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
-    JSReceiver* receiver,
-    uint32_t index) {
-  Isolate* isolate = GetIsolate();
-  HandleScope scope(isolate);
-  Handle<String> name = isolate->factory()->Uint32ToString(index);
-  return GetPropertyAttributeWithHandler(receiver, *name);
+  // TODO(rossberg): convert result to PropertyAttributes
+  USE(result);
+  return NONE;
 }
 
 
@@ -2825,9 +2353,6 @@
   HandleScope scope(isolate);
   Handle<JSProxy> self(this);
 
-  // Save identity hash.
-  MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
-
   if (IsJSFunctionProxy()) {
     isolate->factory()->BecomeJSFunction(self);
     // Code will be set on the JavaScript side.
@@ -2835,42 +2360,9 @@
     isolate->factory()->BecomeJSObject(self);
   }
   ASSERT(self->IsJSObject());
-
-  // Inherit identity, if it was present.
-  Object* hash;
-  if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
-    Handle<JSObject> new_self(JSObject::cast(*self));
-    isolate->factory()->SetIdentityHash(new_self, hash);
-  }
 }
 
 
-MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
-                                                 Handle<Object> derived,
-                                                 int argc,
-                                                 Handle<Object> argv[]) {
-  Isolate* isolate = GetIsolate();
-  Handle<Object> handler(this->handler());
-
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
-  if (isolate->has_pending_exception()) return trap;
-
-  if (trap->IsUndefined()) {
-    if (derived.is_null()) {
-      Handle<Object> args[] = { handler, trap_name };
-      Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
-      isolate->Throw(*error);
-      return Handle<Object>();
-    }
-    trap = Handle<Object>(derived);
-  }
-
-  bool threw;
-  return Execution::Call(trap, handler, argc, argv, &threw);
-}
-
 
 MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
                                             String* name,
@@ -2895,46 +2387,48 @@
   }
 
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()) {
-    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-      return SetPropertyWithFailedAccessCheck(
-          result, name, value, true, strict_mode);
-    }
+  if (IsAccessCheckNeeded()
+      && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    return SetPropertyWithFailedAccessCheck(result,
+                                            name,
+                                            value,
+                                            true,
+                                            strict_mode);
   }
 
   if (IsJSGlobalProxy()) {
     Object* proto = GetPrototype();
     if (proto->IsNull()) return value;
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->SetPropertyForResult(
+    return JSObject::cast(proto)->SetProperty(
         result, name, value, attributes, strict_mode);
   }
 
   if (!result->IsProperty() && !IsJSContextExtensionObject()) {
-    bool found = false;
-    MaybeObject* result_object;
-    result_object = SetPropertyWithCallbackSetterInPrototypes(name,
-                                                              value,
-                                                              attributes,
-                                                              &found,
-                                                              strict_mode);
-    if (found) return result_object;
+    // We could not find a local property so let's check whether there is an
+    // accessor that wants to handle the property.
+    LookupResult accessor_result;
+    LookupCallbackSetterInPrototypes(name, &accessor_result);
+    if (accessor_result.IsProperty()) {
+      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+                                     name,
+                                     value,
+                                     accessor_result.holder(),
+                                     strict_mode);
+    }
   }
-
-  // At this point, no GC should have happened, as this would invalidate
-  // 'result', which we cannot handlify!
-
   if (!result->IsFound()) {
     // Neither properties nor transitions found.
     return AddProperty(name, value, attributes, strict_mode);
   }
   if (result->IsReadOnly() && result->IsProperty()) {
     if (strict_mode == kStrictMode) {
-      Handle<JSObject> self(this);
-      Handle<String> hname(name);
-      Handle<Object> args[] = { hname, self };
+      HandleScope scope(heap->isolate());
+      Handle<String> key(name);
+      Handle<Object> holder(this);
+      Handle<Object> args[2] = { key, holder };
       return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
-          "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
+          "strict_read_only_property", HandleVector(args, 2)));
     } else {
       return value;
     }
@@ -2978,6 +2472,7 @@
       ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
       JSFunction* function =
           JSFunction::cast(target_descriptors->GetValue(number));
+      ASSERT(!HEAP->InNewSpace(function));
       if (value == function) {
         set_map(target_map);
         return value;
@@ -2989,11 +2484,10 @@
     case NULL_DESCRIPTOR:
     case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
-    case HANDLER:
+    default:
       UNREACHABLE();
-      return value;
   }
-  UNREACHABLE();  // keep the compiler happy
+  UNREACHABLE();
   return value;
 }
 
@@ -3007,18 +2501,6 @@
 // Note that this method cannot be used to set the prototype of a function
 // because ConvertDescriptorToField() which is called in "case CALLBACKS:"
 // doesn't handle function prototypes correctly.
-Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
-    Handle<JSObject> object,
-    Handle<String> key,
-    Handle<Object> value,
-    PropertyAttributes attributes) {
-  CALL_HEAP_FUNCTION(
-    object->GetIsolate(),
-    object->SetLocalPropertyIgnoreAttributes(*key, *value, attributes),
-    Object);
-}
-
-
 MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
     String* name,
     Object* value,
@@ -3027,12 +2509,12 @@
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
   AssertNoContextChange ncc;
-  Isolate* isolate = GetIsolate();
-  LookupResult result(isolate);
+  LookupResult result;
   LocalLookup(name, &result);
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
       return SetPropertyWithFailedAccessCheck(&result,
                                               name,
                                               value,
@@ -3090,11 +2572,10 @@
     case NULL_DESCRIPTOR:
     case ELEMENTS_TRANSITION:
       return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
-    case HANDLER:
+    default:
       UNREACHABLE();
-      return value;
   }
-  UNREACHABLE();  // keep the compiler happy
+  UNREACHABLE();
   return value;
 }
 
@@ -3104,7 +2585,7 @@
       String* name,
       bool continue_search) {
   // Check local property, ignore interceptor.
-  LookupResult result(GetIsolate());
+  LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) return result.GetAttributes();
 
@@ -3176,11 +2657,12 @@
       String* key) {
   uint32_t index = 0;
   if (IsJSObject() && key->AsArrayIndex(&index)) {
-    return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
-        ? NONE : ABSENT;
+    if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
+      return NONE;
+    return ABSENT;
   }
   // Named property.
-  LookupResult result(GetIsolate());
+  LookupResult result;
   Lookup(key, &result);
   return GetPropertyAttribute(receiver, &result, key, true);
 }
@@ -3207,8 +2689,10 @@
       case CALLBACKS:
         return result->GetAttributes();
       case HANDLER: {
-        return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
-            receiver, name);
+        // TODO(rossberg): propagate exceptions properly.
+        bool has_exception = false;
+        return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
+            receiver, name, &has_exception);
       }
       case INTERCEPTOR:
         return result->holder()->GetPropertyAttributeWithInterceptor(
@@ -3229,7 +2713,7 @@
     return ABSENT;
   }
   // Named property.
-  LookupResult result(GetIsolate());
+  LookupResult result;
   LocalLookup(name, &result);
   return GetPropertyAttribute(this, &result, name, false);
 }
@@ -3244,9 +2728,7 @@
   if (result->IsMap() &&
       Map::cast(result)->EquivalentToForNormalization(fast, mode)) {
 #ifdef DEBUG
-    if (FLAG_verify_heap) {
-      Map::cast(result)->SharedMapVerify();
-    }
+    Map::cast(result)->SharedMapVerify();
     if (FLAG_enable_slow_asserts) {
       // The cached map should match newly created normalized map bit-by-bit.
       Object* fresh;
@@ -3282,15 +2764,6 @@
 }
 
 
-void JSObject::UpdateMapCodeCache(Handle<JSObject> object,
-                                  Handle<String> name,
-                                  Handle<Code> code) {
-  Isolate* isolate = object->GetIsolate();
-  CALL_HEAP_FUNCTION_VOID(isolate,
-                          object->UpdateMapCodeCache(*name, *code));
-}
-
-
 MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) {
   if (map()->is_shared()) {
     // Fast case maps are never marked as shared.
@@ -3309,15 +2782,6 @@
 }
 
 
-void JSObject::NormalizeProperties(Handle<JSObject> object,
-                                   PropertyNormalizationMode mode,
-                                   int expected_additional_properties) {
-  CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
-                          object->NormalizeProperties(
-                              mode, expected_additional_properties));
-}
-
-
 MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
                                            int expected_additional_properties) {
   if (!HasFastProperties()) return this;
@@ -3336,10 +2800,12 @@
   } else {
     property_count += 2;  // Make space for two more properties.
   }
-  StringDictionary* dictionary;
-  { MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count);
-    if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+  Object* obj;
+  { MaybeObject* maybe_obj =
+        StringDictionary::Allocate(property_count);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
+  StringDictionary* dictionary = StringDictionary::cast(obj);
 
   DescriptorArray* descs = map_of_this->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -3349,31 +2815,36 @@
         PropertyDetails d =
             PropertyDetails(details.attributes(), NORMAL, details.index());
         Object* value = descs->GetConstantFunction(i);
-        MaybeObject* maybe_dictionary =
-            dictionary->Add(descs->GetKey(i), value, d);
-        if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+        Object* result;
+        { MaybeObject* maybe_result =
+              dictionary->Add(descs->GetKey(i), value, d);
+          if (!maybe_result->ToObject(&result)) return maybe_result;
+        }
+        dictionary = StringDictionary::cast(result);
         break;
       }
       case FIELD: {
         PropertyDetails d =
             PropertyDetails(details.attributes(), NORMAL, details.index());
         Object* value = FastPropertyAt(descs->GetFieldIndex(i));
-        MaybeObject* maybe_dictionary =
-            dictionary->Add(descs->GetKey(i), value, d);
-        if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+        Object* result;
+        { MaybeObject* maybe_result =
+              dictionary->Add(descs->GetKey(i), value, d);
+          if (!maybe_result->ToObject(&result)) return maybe_result;
+        }
+        dictionary = StringDictionary::cast(result);
         break;
       }
       case CALLBACKS: {
-        if (!descs->IsProperty(i)) break;
+        PropertyDetails d =
+            PropertyDetails(details.attributes(), CALLBACKS, details.index());
         Object* value = descs->GetCallbacksObject(i);
-        if (value->IsAccessorPair()) {
-          MaybeObject* maybe_copy =
-              AccessorPair::cast(value)->CopyWithoutTransitions();
-          if (!maybe_copy->To(&value)) return maybe_copy;
+        Object* result;
+        { MaybeObject* maybe_result =
+              dictionary->Add(descs->GetKey(i), value, d);
+          if (!maybe_result->ToObject(&result)) return maybe_result;
         }
-        MaybeObject* maybe_dictionary =
-            dictionary->Add(descs->GetKey(i), value, details);
-        if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+        dictionary = StringDictionary::cast(result);
         break;
       }
       case MAP_TRANSITION:
@@ -3382,25 +2853,23 @@
       case INTERCEPTOR:
       case ELEMENTS_TRANSITION:
         break;
-      case HANDLER:
-      case NORMAL:
+      default:
         UNREACHABLE();
-        break;
     }
   }
 
-  Heap* current_heap = GetHeap();
+  Heap* current_heap = map_of_this->heap();
 
   // Copy the next enumeration index from instance descriptor.
   int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
   dictionary->SetNextEnumerationIndex(index);
 
-  Map* new_map;
-  { MaybeObject* maybe_map =
+  { MaybeObject* maybe_obj =
         current_heap->isolate()->context()->global_context()->
         normalized_map_cache()->Get(this, mode);
-    if (!maybe_map->To(&new_map)) return maybe_map;
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
+  Map* new_map = Map::cast(obj);
 
   // We have now successfully allocated all the necessary objects.
   // Changes can now be made with the guarantee that all of them take effect.
@@ -3411,11 +2880,6 @@
   ASSERT(instance_size_delta >= 0);
   current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
                                      instance_size_delta);
-  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
-    MemoryChunk::IncrementLiveBytesFromMutator(this->address(),
-                                               -instance_size_delta);
-  }
-
 
   set_map(new_map);
   new_map->clear_instance_descriptors();
@@ -3434,14 +2898,6 @@
 }
 
 
-void JSObject::TransformToFastProperties(Handle<JSObject> object,
-                                         int unused_property_fields) {
-  CALL_HEAP_FUNCTION_VOID(
-      object->GetIsolate(),
-      object->TransformToFastProperties(unused_property_fields));
-}
-
-
 MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) {
   if (HasFastProperties()) return this;
   ASSERT(!IsGlobalObject());
@@ -3450,14 +2906,6 @@
 }
 
 
-Handle<SeededNumberDictionary> JSObject::NormalizeElements(
-    Handle<JSObject> object) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->NormalizeElements(),
-                     SeededNumberDictionary);
-}
-
-
 MaybeObject* JSObject::NormalizeElements() {
   ASSERT(!HasExternalArrayElements());
 
@@ -3465,14 +2913,13 @@
   FixedArrayBase* array = FixedArrayBase::cast(elements());
   Map* old_map = array->map();
   bool is_arguments =
-      (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
+      (old_map == old_map->heap()->non_strict_arguments_elements_map());
   if (is_arguments) {
     array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
   }
   if (array->IsDictionary()) return array;
 
   ASSERT(HasFastElements() ||
-         HasFastSmiOnlyElements() ||
          HasFastDoubleElements() ||
          HasFastArgumentsElements());
   // Compute the effective length and allocate a new backing store.
@@ -3507,8 +2954,7 @@
         if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
       }
     } else {
-      ASSERT(old_map->has_fast_elements() ||
-             old_map->has_fast_smi_only_elements());
+      ASSERT(old_map->has_fast_elements());
       value = FixedArray::cast(array)->get(i);
     }
     PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -3528,15 +2974,13 @@
     // Set the new map first to satify the elements type assert in
     // set_elements().
     Object* new_map;
-    MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
-                                                  DICTIONARY_ELEMENTS);
+    MaybeObject* maybe = map()->GetSlowElementsMap();
     if (!maybe->ToObject(&new_map)) return maybe;
     set_map(Map::cast(new_map));
     set_elements(dictionary);
   }
 
-  old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
-      Increment();
+  old_map->isolate()->counters()->elements_to_dictionary()->Increment();
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
@@ -3550,252 +2994,102 @@
 }
 
 
-Smi* JSReceiver::GenerateIdentityHash() {
+MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
   Isolate* isolate = GetIsolate();
+  Heap* heap = isolate->heap();
+  Object* holder = BypassGlobalProxy();
+  if (holder->IsUndefined()) return heap->undefined_value();
+  JSObject* obj = JSObject::cast(holder);
+  if (obj->HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = obj->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == heap->hidden_symbol()) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
+    }
+  }
+
+  // Only attempt to find the hidden properties in the local object and not
+  // in the prototype chain.
+  if (!obj->HasHiddenPropertiesObject()) {
+    // Hidden properties object not found. Allocate a new hidden properties
+    // object if requested. Otherwise return the undefined value.
+    if (flag == ALLOW_CREATION) {
+      Object* hidden_obj;
+      { MaybeObject* maybe_obj = heap->AllocateJSObject(
+            isolate->context()->global_context()->object_function());
+        if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
+      }
+      // Don't allow leakage of the hidden object through accessors
+      // on Object.prototype.
+      {
+        MaybeObject* maybe_obj =
+            JSObject::cast(hidden_obj)->SetPrototype(heap->null_value(), false);
+        if (maybe_obj->IsFailure()) return maybe_obj;
+      }
+      return obj->SetHiddenPropertiesObject(hidden_obj);
+    } else {
+      return heap->undefined_value();
+    }
+  }
+  return obj->GetHiddenPropertiesObject();
+}
+
+
+MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
+  Isolate* isolate = GetIsolate();
+  Object* hidden_props_obj;
+  { MaybeObject* maybe_obj = GetHiddenProperties(flag);
+    if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
+  }
+  if (!hidden_props_obj->IsJSObject()) {
+    // We failed to create hidden properties.  That's a detached
+    // global proxy.
+    ASSERT(hidden_props_obj->IsUndefined());
+    return Smi::FromInt(0);
+  }
+  JSObject* hidden_props = JSObject::cast(hidden_props_obj);
+  String* hash_symbol = isolate->heap()->identity_hash_symbol();
+  {
+    // Note that HasLocalProperty() can cause a GC in the general case in the
+    // presence of interceptors.
+    AssertNoAllocation no_alloc;
+    if (hidden_props->HasLocalProperty(hash_symbol)) {
+      MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
+      return Smi::cast(hash->ToObjectChecked());
+    }
+  }
 
   int hash_value;
   int attempts = 0;
   do {
     // Generate a random 32-bit hash value but limit range to fit
     // within a smi.
-    hash_value = V8::RandomPrivate(isolate) & Smi::kMaxValue;
+    hash_value = V8::Random(isolate) & Smi::kMaxValue;
     attempts++;
   } while (hash_value == 0 && attempts < 30);
   hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
 
-  return Smi::FromInt(hash_value);
-}
-
-
-MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
-  MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
-                                         hash);
-  if (maybe->IsFailure()) return maybe;
-  return this;
-}
-
-
-int JSObject::GetIdentityHash(Handle<JSObject> obj) {
-  CALL_AND_RETRY(obj->GetIsolate(),
-                 obj->GetIdentityHash(ALLOW_CREATION),
-                 return Smi::cast(__object__)->value(),
-                 return 0);
-}
-
-
-MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
-  Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
-  if (stored_value->IsSmi()) return stored_value;
-
-  // Do not generate permanent identity hash code if not requested.
-  if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
-
-  Smi* hash = GenerateIdentityHash();
-  MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
-                                          hash);
-  if (result->IsFailure()) return result;
-  if (result->ToObjectUnchecked()->IsUndefined()) {
-    // Trying to get hash of detached proxy.
-    return Smi::FromInt(0);
+  Smi* hash = Smi::FromInt(hash_value);
+  { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
+        hash_symbol,
+        hash,
+        static_cast<PropertyAttributes>(None));
+    if (result->IsFailure()) return result;
   }
   return hash;
 }
 
 
-MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
-  Object* hash = this->hash();
-  if (!hash->IsSmi() && flag == ALLOW_CREATION) {
-    hash = GenerateIdentityHash();
-    set_hash(hash);
-  }
-  return hash;
-}
-
-
-Object* JSObject::GetHiddenProperty(String* key) {
-  if (IsJSGlobalProxy()) {
-    // For a proxy, use the prototype as target object.
-    Object* proxy_parent = GetPrototype();
-    // If the proxy is detached, return undefined.
-    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
-    ASSERT(proxy_parent->IsJSGlobalObject());
-    return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
-  }
-  ASSERT(!IsJSGlobalProxy());
-  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
-  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
-  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
-    return GetHeap()->undefined_value();
-  }
-  StringDictionary* dictionary =
-      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
-  int entry = dictionary->FindEntry(key);
-  if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
-  return dictionary->ValueAt(entry);
-}
-
-
-Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
-                                 Handle<String> key,
-                                 Handle<Object> value) {
-  CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->SetHiddenProperty(*key, *value),
-                     Object);
-}
-
-
-MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
-  if (IsJSGlobalProxy()) {
-    // For a proxy, use the prototype as target object.
-    Object* proxy_parent = GetPrototype();
-    // If the proxy is detached, return undefined.
-    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
-    ASSERT(proxy_parent->IsJSGlobalObject());
-    return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
-  }
-  ASSERT(!IsJSGlobalProxy());
-  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
-  StringDictionary* dictionary;
-  if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
-
-  // If it was found, check if the key is already in the dictionary.
-  int entry = dictionary->FindEntry(key);
-  if (entry != StringDictionary::kNotFound) {
-    // If key was found, just update the value.
-    dictionary->ValueAtPut(entry, value);
-    return this;
-  }
-  // Key was not already in the dictionary, so add the entry.
-  MaybeObject* insert_result = dictionary->Add(key,
-                                               value,
-                                               PropertyDetails(NONE, NORMAL));
-  StringDictionary* new_dict;
-  if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
-  if (new_dict != dictionary) {
-    // If adding the key expanded the dictionary (i.e., Add returned a new
-    // dictionary), store it back to the object.
-    MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
-    if (store_result->IsFailure()) return store_result;
-  }
-  // Return this to mark success.
-  return this;
-}
-
-
-void JSObject::DeleteHiddenProperty(String* key) {
-  if (IsJSGlobalProxy()) {
-    // For a proxy, use the prototype as target object.
-    Object* proxy_parent = GetPrototype();
-    // If the proxy is detached, return immediately.
-    if (proxy_parent->IsNull()) return;
-    ASSERT(proxy_parent->IsJSGlobalObject());
-    JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
-    return;
-  }
-  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
-  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
-  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
-  StringDictionary* dictionary =
-      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
-  int entry = dictionary->FindEntry(key);
-  if (entry == StringDictionary::kNotFound) {
-    // Key wasn't in dictionary. Deletion is a success.
-    return;
-  }
-  // Key was in the dictionary. Remove it.
-  dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
-}
-
-
-bool JSObject::HasHiddenProperties() {
-  return GetPropertyAttributePostInterceptor(this,
-                                             GetHeap()->hidden_symbol(),
-                                             false) != ABSENT;
-}
-
-
-MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
-  ASSERT(!IsJSGlobalProxy());
-  if (HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden symbol. Since the
-    // hidden symbols hash code is zero (and no other string has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = this->map()->instance_descriptors();
-    if ((descriptors->number_of_descriptors() > 0) &&
-        (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
-      if (descriptors->GetType(0) == FIELD) {
-        Object* hidden_store =
-            this->FastPropertyAt(descriptors->GetFieldIndex(0));
-        return StringDictionary::cast(hidden_store);
-      } else {
-        ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
-               descriptors->GetType(0) == MAP_TRANSITION);
-      }
-    }
-  } else {
-    PropertyAttributes attributes;
-    // You can't install a getter on a property indexed by the hidden symbol,
-    // so we can be sure that GetLocalPropertyPostInterceptor returns a real
-    // object.
-    Object* lookup =
-        GetLocalPropertyPostInterceptor(this,
-                                        GetHeap()->hidden_symbol(),
-                                        &attributes)->ToObjectUnchecked();
-    if (!lookup->IsUndefined()) {
-      return StringDictionary::cast(lookup);
-    }
-  }
-  if (!create_if_absent) return GetHeap()->undefined_value();
-  const int kInitialSize = 5;
-  MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
-  StringDictionary* dictionary;
-  if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
-  MaybeObject* store_result =
-      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
-                                 dictionary,
-                                 DONT_ENUM,
-                                 kNonStrictMode);
-  if (store_result->IsFailure()) return store_result;
-  return dictionary;
-}
-
-
-MaybeObject* JSObject::SetHiddenPropertiesDictionary(
-    StringDictionary* dictionary) {
-  ASSERT(!IsJSGlobalProxy());
-  ASSERT(HasHiddenProperties());
-  if (HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden symbol. Since the
-    // hidden symbols hash code is zero (and no other string has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = this->map()->instance_descriptors();
-    if ((descriptors->number_of_descriptors() > 0) &&
-        (descriptors->GetKey(0) == GetHeap()->hidden_symbol())) {
-      if (descriptors->GetType(0) == FIELD) {
-        this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
-        return this;
-      } else {
-        ASSERT(descriptors->GetType(0) == NULL_DESCRIPTOR ||
-               descriptors->GetType(0) == MAP_TRANSITION);
-      }
-    }
-  }
-  MaybeObject* store_result =
-      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
-                                 dictionary,
-                                 DONT_ENUM,
-                                 kNonStrictMode);
-  if (store_result->IsFailure()) return store_result;
-  return this;
-}
-
-
 MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
                                                      DeleteMode mode) {
   // Check local property, ignore interceptor.
-  LookupResult result(GetIsolate());
+  LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
   if (!result.IsProperty()) return GetHeap()->true_value();
 
@@ -3877,14 +3171,6 @@
 }
 
 
-Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
-                                       uint32_t index) {
-  CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->DeleteElement(index, JSObject::NORMAL_DELETION),
-                     Object);
-}
-
-
 MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
   Isolate* isolate = GetIsolate();
   // Check access rights if needed.
@@ -3913,11 +3199,12 @@
 }
 
 
-Handle<Object> JSObject::DeleteProperty(Handle<JSObject> obj,
-                              Handle<String> prop) {
-  CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION),
-                     Object);
+MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
+  } else {
+    return JSObject::cast(this)->DeleteProperty(name, mode);
+  }
 }
 
 
@@ -3944,7 +3231,7 @@
   if (name->AsArrayIndex(&index)) {
     return DeleteElement(index, mode);
   } else {
-    LookupResult result(isolate);
+    LookupResult result;
     LocalLookup(name, &result);
     if (!result.IsProperty()) return isolate->heap()->true_value();
     // Ignore attributes if forcing a deletion.
@@ -3978,27 +3265,10 @@
 }
 
 
-MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
-  if (IsJSProxy()) {
-    return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
-  }
-  return JSObject::cast(this)->DeleteElement(index, mode);
-}
-
-
-MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
-  if (IsJSProxy()) {
-    return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
-  }
-  return JSObject::cast(this)->DeleteProperty(name, mode);
-}
-
-
 bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
                                             ElementsKind kind,
                                             Object* object) {
-  ASSERT(kind == FAST_ELEMENTS ||
-         kind == DICTIONARY_ELEMENTS);
+  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
   if (kind == FAST_ELEMENTS) {
     int length = IsJSArray()
         ? Smi::cast(JSArray::cast(this)->length())->value()
@@ -4019,7 +3289,7 @@
 // Check whether this object references another object.
 bool JSObject::ReferencesObject(Object* obj) {
   Map* map_of_this = map();
-  Heap* heap = GetHeap();
+  Heap* heap = map_of_this->heap();
   AssertNoAllocation no_alloc;
 
   // Is the object the constructor for this object?
@@ -4054,8 +3324,6 @@
       // Raw pixels and external arrays do not reference other
       // objects.
       break;
-    case FAST_SMI_ONLY_ELEMENTS:
-      break;
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(this->elements());
@@ -4121,11 +3389,6 @@
 }
 
 
-Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object);
-}
-
-
 MaybeObject* JSObject::PreventExtensions() {
   Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded() &&
@@ -4177,16 +3440,15 @@
 
 
 // Tests for the fast common case for property enumeration:
-// - This object and all prototypes has an enum cache (which means that
-//   it is no proxy, has no interceptors and needs no access checks).
+// - This object and all prototypes has an enum cache (which means that it has
+//   no interceptors and needs no access checks).
 // - This object has no elements.
 // - No prototype has enumerable properties/elements.
-bool JSReceiver::IsSimpleEnum() {
+bool JSObject::IsSimpleEnum() {
   Heap* heap = GetHeap();
   for (Object* o = this;
        o != heap->null_value();
        o = JSObject::cast(o)->GetPrototype()) {
-    if (!o->IsJSObject()) return false;
     JSObject* curr = JSObject::cast(o);
     if (!curr->map()->instance_descriptors()->HasEnumCache()) return false;
     ASSERT(!curr->HasNamedInterceptor());
@@ -4203,14 +3465,11 @@
 }
 
 
-int Map::NumberOfDescribedProperties(PropertyAttributes filter) {
+int Map::NumberOfDescribedProperties() {
   int result = 0;
   DescriptorArray* descs = instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    PropertyDetails details(descs->GetDetails(i));
-    if (descs->IsProperty(i) && (details.attributes() & filter) == 0) {
-      result++;
-    }
+    if (descs->IsProperty(i)) result++;
   }
   return result;
 }
@@ -4252,6 +3511,15 @@
 
 
 void JSReceiver::LocalLookup(String* name, LookupResult* result) {
+  if (IsJSProxy()) {
+    result->HandlerResult();
+  } else {
+    JSObject::cast(this)->LocalLookup(name, result);
+  }
+}
+
+
+void JSObject::LocalLookup(String* name, LookupResult* result) {
   ASSERT(name->IsString());
 
   Heap* heap = GetHeap();
@@ -4260,36 +3528,28 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
-    return JSReceiver::cast(proto)->LocalLookup(name, result);
-  }
-
-  if (IsJSProxy()) {
-    result->HandlerResult(JSProxy::cast(this));
-    return;
+    return JSObject::cast(proto)->LocalLookup(name, result);
   }
 
   // Do not use inline caching if the object is a non-global object
   // that requires access checks.
-  if (IsAccessCheckNeeded()) {
+  if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
     result->DisallowCaching();
   }
 
-  JSObject* js_object = JSObject::cast(this);
-
   // Check __proto__ before interceptor.
   if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
-    result->ConstantResult(js_object);
+    result->ConstantResult(this);
     return;
   }
 
   // Check for lookup interceptor except when bootstrapping.
-  if (js_object->HasNamedInterceptor() &&
-      !heap->isolate()->bootstrapper()->IsActive()) {
-    result->InterceptorResult(js_object);
+  if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
+    result->InterceptorResult(this);
     return;
   }
 
-  js_object->LocalLookupRealNamedProperty(name, result);
+  LocalLookupRealNamedProperty(name, result);
 }
 
 
@@ -4299,7 +3559,7 @@
   for (Object* current = this;
        current != heap->null_value();
        current = JSObject::cast(current)->GetPrototype()) {
-    JSReceiver::cast(current)->LocalLookup(name, result);
+    JSObject::cast(current)->LocalLookup(name, result);
     if (result->IsProperty()) return;
   }
   result->NotFound();
@@ -4310,144 +3570,128 @@
 void JSObject::LookupCallback(String* name, LookupResult* result) {
   Heap* heap = GetHeap();
   for (Object* current = this;
-       current != heap->null_value() && current->IsJSObject();
+       current != heap->null_value();
        current = JSObject::cast(current)->GetPrototype()) {
     JSObject::cast(current)->LocalLookupRealNamedProperty(name, result);
-    if (result->IsFound() && result->type() == CALLBACKS) return;
+    if (result->IsProperty() && result->type() == CALLBACKS) return;
   }
   result->NotFound();
 }
 
 
-// Try to update an accessor in an elements dictionary. Return true if the
-// update succeeded, and false otherwise.
-static bool UpdateGetterSetterInDictionary(
-    SeededNumberDictionary* dictionary,
-    uint32_t index,
-    Object* getter,
-    Object* setter,
-    PropertyAttributes attributes) {
+// Search for a getter or setter in an elements dictionary.  Returns either
+// undefined if the element is read-only, or the getter/setter pair (fixed
+// array) if there is an existing one, or the hole value if the element does
+// not exist or is a normal non-getter/setter data element.
+static Object* FindGetterSetterInDictionary(SeededNumberDictionary* dictionary,
+                                            uint32_t index,
+                                            Heap* heap) {
   int entry = dictionary->FindEntry(index);
   if (entry != SeededNumberDictionary::kNotFound) {
     Object* result = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
-    if (details.type() == CALLBACKS && result->IsAccessorPair()) {
-      ASSERT(!details.IsDontDelete());
-      if (details.attributes() != attributes) {
-        dictionary->DetailsAtPut(entry,
-                                 PropertyDetails(attributes, CALLBACKS, index));
-      }
-      AccessorPair::cast(result)->SetComponents(getter, setter);
-      return true;
-    }
+    if (details.IsReadOnly()) return heap->undefined_value();
+    if (details.type() == CALLBACKS && result->IsFixedArray()) return result;
   }
-  return false;
+  return heap->the_hole_value();
 }
 
 
-MaybeObject* JSObject::DefineElementAccessor(uint32_t index,
-                                             Object* getter,
-                                             Object* setter,
-                                             PropertyAttributes attributes) {
-  switch (GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
-    case FAST_ELEMENTS:
-    case FAST_DOUBLE_ELEMENTS:
-      break;
-    case EXTERNAL_PIXEL_ELEMENTS:
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS:
-      // Ignore getters and setters on pixel and external array elements.
-      return GetHeap()->undefined_value();
-    case DICTIONARY_ELEMENTS:
-      if (UpdateGetterSetterInDictionary(element_dictionary(),
-                                         index,
-                                         getter,
-                                         setter,
-                                         attributes)) {
-        return GetHeap()->undefined_value();
+MaybeObject* JSObject::DefineGetterSetter(String* name,
+                                          PropertyAttributes attributes) {
+  Heap* heap = GetHeap();
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Try to flatten before operating on the string.
+  name->TryFlatten();
+
+  if (!CanSetCallback(name)) {
+    return heap->undefined_value();
+  }
+
+  uint32_t index = 0;
+  bool is_element = name->AsArrayIndex(&index);
+
+  if (is_element) {
+    switch (GetElementsKind()) {
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+        break;
+      case EXTERNAL_PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+        // Ignore getters and setters on pixel and external array
+        // elements.
+        return heap->undefined_value();
+      case DICTIONARY_ELEMENTS: {
+        Object* probe =
+            FindGetterSetterInDictionary(element_dictionary(), index, heap);
+        if (!probe->IsTheHole()) return probe;
+        // Otherwise allow to override it.
+        break;
       }
-      break;
-    case NON_STRICT_ARGUMENTS_ELEMENTS: {
-      // Ascertain whether we have read-only properties or an existing
-      // getter/setter pair in an arguments elements dictionary backing
-      // store.
-      FixedArray* parameter_map = FixedArray::cast(elements());
-      uint32_t length = parameter_map->length();
-      Object* probe =
-          index < (length - 2) ? parameter_map->get(index + 2) : NULL;
-      if (probe == NULL || probe->IsTheHole()) {
-        FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-        if (arguments->IsDictionary()) {
-          SeededNumberDictionary* dictionary =
-              SeededNumberDictionary::cast(arguments);
-          if (UpdateGetterSetterInDictionary(dictionary,
-                                             index,
-                                             getter,
-                                             setter,
-                                             attributes)) {
-            return GetHeap()->undefined_value();
+      case NON_STRICT_ARGUMENTS_ELEMENTS: {
+        // Ascertain whether we have read-only properties or an existing
+        // getter/setter pair in an arguments elements dictionary backing
+        // store.
+        FixedArray* parameter_map = FixedArray::cast(elements());
+        uint32_t length = parameter_map->length();
+        Object* probe =
+            index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+        if (probe == NULL || probe->IsTheHole()) {
+          FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+          if (arguments->IsDictionary()) {
+            SeededNumberDictionary* dictionary =
+                SeededNumberDictionary::cast(arguments);
+            probe = FindGetterSetterInDictionary(dictionary, index, heap);
+            if (!probe->IsTheHole()) return probe;
           }
         }
+        break;
       }
-      break;
     }
-  }
-
-  AccessorPair* accessors;
-  { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
-    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
-  }
-  accessors->SetComponents(getter, setter);
-
-  return SetElementCallback(index, accessors, attributes);
-}
-
-
-MaybeObject* JSObject::DefinePropertyAccessor(String* name,
-                                              Object* getter,
-                                              Object* setter,
-                                              PropertyAttributes attributes) {
-  // Lookup the name.
-  LookupResult result(GetHeap()->isolate());
-  LocalLookupRealNamedProperty(name, &result);
-  if (result.IsFound()) {
-    if (result.type() == CALLBACKS) {
-      ASSERT(!result.IsDontDelete());
-      Object* obj = result.GetCallbackObject();
-      // Need to preserve old getters/setters.
-      if (obj->IsAccessorPair()) {
-        AccessorPair* copy;
-        { MaybeObject* maybe_copy =
-              AccessorPair::cast(obj)->CopyWithoutTransitions();
-          if (!maybe_copy->To(&copy)) return maybe_copy;
+  } else {
+    // Lookup the name.
+    LookupResult result;
+    LocalLookup(name, &result);
+    if (result.IsProperty()) {
+      if (result.IsReadOnly()) return heap->undefined_value();
+      if (result.type() == CALLBACKS) {
+        Object* obj = result.GetCallbackObject();
+        // Need to preserve old getters/setters.
+        if (obj->IsFixedArray()) {
+          // Use set to update attributes.
+          return SetPropertyCallback(name, obj, attributes);
         }
-        copy->SetComponents(getter, setter);
-        // Use set to update attributes.
-        return SetPropertyCallback(name, copy, attributes);
       }
     }
   }
 
-  AccessorPair* accessors;
-  { MaybeObject* maybe_accessors = GetHeap()->AllocateAccessorPair();
-    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
+  // Allocate the fixed array to hold getter and setter.
+  Object* structure;
+  { MaybeObject* maybe_structure = heap->AllocateFixedArray(2, TENURED);
+    if (!maybe_structure->ToObject(&structure)) return maybe_structure;
   }
-  accessors->SetComponents(getter, setter);
 
-  return SetPropertyCallback(name, accessors, attributes);
+  if (is_element) {
+    return SetElementCallback(index, structure, attributes);
+  } else {
+    return SetPropertyCallback(name, structure, attributes);
+  }
 }
 
 
 bool JSObject::CanSetCallback(String* name) {
-  ASSERT(!IsAccessCheckNeeded() ||
-         GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET));
+  ASSERT(!IsAccessCheckNeeded()
+         || Isolate::Current()->MayNamedAccess(this, name, v8::ACCESS_SET));
 
   // Check if there is an API defined callback object which prohibits
   // callback overwriting in this object or it's prototype chain.
@@ -4455,7 +3699,7 @@
   // certain accessors such as window.location should not be allowed
   // to be overwritten because allowing overwriting could potentially
   // cause security problems.
-  LookupResult callback_result(GetIsolate());
+  LookupResult callback_result;
   LookupCallback(name, &callback_result);
   if (callback_result.IsProperty()) {
     Object* obj = callback_result.GetCallbackObject();
@@ -4475,15 +3719,19 @@
   PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
 
   // Normalize elements to make this operation simple.
-  SeededNumberDictionary* dictionary;
-  { MaybeObject* maybe_dictionary = NormalizeElements();
-    if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+  SeededNumberDictionary* dictionary = NULL;
+  { Object* result;
+    MaybeObject* maybe = NormalizeElements();
+    if (!maybe->ToObject(&result)) return maybe;
+    dictionary = SeededNumberDictionary::cast(result);
   }
   ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
 
   // Update the dictionary with the new CALLBACKS property.
-  { MaybeObject* maybe_dictionary = dictionary->Set(index, structure, details);
-    if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
+  { Object* result;
+    MaybeObject* maybe = dictionary->Set(index, structure, details);
+    if (!maybe->ToObject(&result)) return maybe;
+    dictionary = SeededNumberDictionary::cast(result);
   }
 
   dictionary->set_requires_slow_elements();
@@ -4495,7 +3743,8 @@
     // switch to a direct backing store without the parameter map.  This
     // would allow GC of the context.
     FixedArray* parameter_map = FixedArray::cast(elements());
-    if (index < static_cast<uint32_t>(parameter_map->length()) - 2) {
+    uint32_t length = parameter_map->length();
+    if (index < length - 2) {
       parameter_map->set(index + 2, GetHeap()->the_hole_value());
     }
     parameter_map->set(1, dictionary);
@@ -4503,26 +3752,33 @@
     set_elements(dictionary);
   }
 
-  return GetHeap()->undefined_value();
+  return structure;
 }
 
 
 MaybeObject* JSObject::SetPropertyCallback(String* name,
                                            Object* structure,
                                            PropertyAttributes attributes) {
+  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+
+  bool convert_back_to_fast = HasFastProperties() &&
+      (map()->instance_descriptors()->number_of_descriptors()
+          < DescriptorArray::kMaxNumberOfDescriptors);
+
   // Normalize object to make this operation simple.
+  Object* ok;
   { MaybeObject* maybe_ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
-    if (maybe_ok->IsFailure()) return maybe_ok;
+    if (!maybe_ok->ToObject(&ok)) return maybe_ok;
   }
 
   // For the global object allocate a new map to invalidate the global inline
   // caches which have a global property cell reference directly in the code.
   if (IsGlobalObject()) {
-    Map* new_map;
+    Object* new_map;
     { MaybeObject* maybe_new_map = map()->CopyDropDescriptors();
-      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+      if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
     }
-    set_map(new_map);
+    set_map(Map::cast(new_map));
     // When running crankshaft, changing the map is not enough. We
     // need to deoptimize all functions that rely on this global
     // object.
@@ -4530,29 +3786,24 @@
   }
 
   // Update the dictionary with the new CALLBACKS property.
-  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-  { MaybeObject* maybe_ok = SetNormalizedProperty(name, structure, details);
-    if (maybe_ok->IsFailure()) return maybe_ok;
+  Object* result;
+  { MaybeObject* maybe_result = SetNormalizedProperty(name, structure, details);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  return GetHeap()->undefined_value();
-}
-
-
-void JSObject::DefineAccessor(Handle<JSObject> object,
-                              Handle<String> name,
-                              Handle<Object> getter,
-                              Handle<Object> setter,
-                              PropertyAttributes attributes) {
-  CALL_HEAP_FUNCTION_VOID(
-      object->GetIsolate(),
-      object->DefineAccessor(*name, *getter, *setter, attributes));
+  if (convert_back_to_fast) {
+    { MaybeObject* maybe_ok = TransformToFastProperties(0);
+      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+    }
+  }
+  return result;
 }
 
 MaybeObject* JSObject::DefineAccessor(String* name,
-                                      Object* getter,
-                                      Object* setter,
+                                      bool is_getter,
+                                      Object* fun,
                                       PropertyAttributes attributes) {
+  ASSERT(fun->IsJSFunction() || fun->IsUndefined());
   Isolate* isolate = GetIsolate();
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
@@ -4565,23 +3816,17 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return this;
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->DefineAccessor(
-        name, getter, setter, attributes);
+    return JSObject::cast(proto)->DefineAccessor(name, is_getter,
+                                                 fun, attributes);
   }
 
-  // Make sure that the top context does not change when doing callbacks or
-  // interceptor calls.
-  AssertNoContextChange ncc;
-
-  // Try to flatten before operating on the string.
-  name->TryFlatten();
-
-  if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
-
-  uint32_t index = 0;
-  return name->AsArrayIndex(&index) ?
-      DefineElementAccessor(index, getter, setter, attributes) :
-      DefinePropertyAccessor(name, getter, setter, attributes);
+  Object* array;
+  { MaybeObject* maybe_array = DefineGetterSetter(name, attributes);
+    if (!maybe_array->ToObject(&array)) return maybe_array;
+  }
+  if (array->IsUndefined()) return array;
+  FixedArray::cast(array)->set(is_getter ? 0 : 1, fun);
+  return this;
 }
 
 
@@ -4621,7 +3866,6 @@
 
     // Accessors overwrite previous callbacks (cf. with getters/setters).
     switch (GetElementsKind()) {
-      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
         break;
@@ -4644,22 +3888,24 @@
         break;
     }
 
+    Object* ok;
     { MaybeObject* maybe_ok =
           SetElementCallback(index, info, info->property_attributes());
-      if (maybe_ok->IsFailure()) return maybe_ok;
+      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
     }
   } else {
     // Lookup the name.
-    LookupResult result(isolate);
+    LookupResult result;
     LocalLookup(name, &result);
     // ES5 forbids turning a property into an accessor if it's not
     // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
     if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
       return isolate->heap()->undefined_value();
     }
+    Object* ok;
     { MaybeObject* maybe_ok =
           SetPropertyCallback(name, info, info->property_attributes());
-      if (maybe_ok->IsFailure()) return maybe_ok;
+      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
     }
   }
 
@@ -4667,7 +3913,7 @@
 }
 
 
-Object* JSObject::LookupAccessor(String* name, AccessorComponent component) {
+Object* JSObject::LookupAccessor(String* name, bool is_getter) {
   Heap* heap = GetHeap();
 
   // Make sure that the top context does not change when doing callbacks or
@@ -4682,6 +3928,7 @@
   }
 
   // Make the lookup and include prototypes.
+  int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
   uint32_t index = 0;
   if (name->AsArrayIndex(&index)) {
     for (Object* obj = this;
@@ -4693,9 +3940,11 @@
         int entry = dictionary->FindEntry(index);
         if (entry != SeededNumberDictionary::kNotFound) {
           Object* element = dictionary->ValueAt(entry);
-          if (dictionary->DetailsAt(entry).type() == CALLBACKS &&
-              element->IsAccessorPair()) {
-            return AccessorPair::cast(element)->GetComponent(component);
+          PropertyDetails details = dictionary->DetailsAt(entry);
+          if (details.type() == CALLBACKS) {
+            if (element->IsFixedArray()) {
+              return FixedArray::cast(element)->get(accessor_index);
+            }
           }
         }
       }
@@ -4704,14 +3953,14 @@
     for (Object* obj = this;
          obj != heap->null_value();
          obj = JSObject::cast(obj)->GetPrototype()) {
-      LookupResult result(heap->isolate());
+      LookupResult result;
       JSObject::cast(obj)->LocalLookup(name, &result);
       if (result.IsProperty()) {
         if (result.IsReadOnly()) return heap->undefined_value();
         if (result.type() == CALLBACKS) {
           Object* obj = result.GetCallbackObject();
-          if (obj->IsAccessorPair()) {
-            return AccessorPair::cast(obj)->GetComponent(component);
+          if (obj->IsFixedArray()) {
+            return FixedArray::cast(obj)->get(accessor_index);
           }
         }
       }
@@ -4812,7 +4061,7 @@
   Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
 
 #ifdef DEBUG
-  if (FLAG_verify_heap && Map::cast(result)->is_shared()) {
+  if (Map::cast(result)->is_shared()) {
     Map::cast(result)->SharedMapVerify();
   }
 #endif
@@ -4835,19 +4084,12 @@
   return new_map;
 }
 
-void Map::UpdateCodeCache(Handle<Map> map,
-                          Handle<String> name,
-                          Handle<Code> code) {
-  Isolate* isolate = map->GetIsolate();
-  CALL_HEAP_FUNCTION_VOID(isolate,
-                          map->UpdateCodeCache(*name, *code));
-}
 
 MaybeObject* Map::UpdateCodeCache(String* name, Code* code) {
   // Allocate the code cache if not present.
   if (code_cache()->IsFixedArray()) {
     Object* result;
-    { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
+    { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     set_code_cache(result);
@@ -4885,219 +4127,75 @@
 }
 
 
-// An iterator over all map transitions in an descriptor array, reusing the map
-// field of the contens array while it is running.
-class IntrusiveMapTransitionIterator {
- public:
-  explicit IntrusiveMapTransitionIterator(DescriptorArray* descriptor_array)
-      : descriptor_array_(descriptor_array) { }
-
-  void Start() {
-    ASSERT(!IsIterating());
-    if (HasContentArray()) *ContentHeader() = Smi::FromInt(0);
-  }
-
-  bool IsIterating() {
-    return HasContentArray() && (*ContentHeader())->IsSmi();
-  }
-
-  Map* Next() {
-    ASSERT(IsIterating());
-    FixedArray* contents = ContentArray();
-    // Attention, tricky index manipulation ahead: Every entry in the contents
-    // array consists of a value/details pair, so the index is typically even.
-    // An exception is made for CALLBACKS entries: An even index means we look
-    // at its getter, and an odd index means we look at its setter.
-    int index = Smi::cast(*ContentHeader())->value();
-    while (index < contents->length()) {
-      PropertyDetails details(Smi::cast(contents->get(index | 1)));
-      switch (details.type()) {
-        case MAP_TRANSITION:
-        case CONSTANT_TRANSITION:
-        case ELEMENTS_TRANSITION:
-          // We definitely have a map transition.
-          *ContentHeader() = Smi::FromInt(index + 2);
-          return static_cast<Map*>(contents->get(index));
-        case CALLBACKS: {
-          // We might have a map transition in a getter or in a setter.
-          AccessorPair* accessors =
-              static_cast<AccessorPair*>(contents->get(index & ~1));
-          Object* accessor =
-              ((index & 1) == 0) ? accessors->getter() : accessors->setter();
-          index++;
-          if (accessor->IsMap()) {
-            *ContentHeader() = Smi::FromInt(index);
-            return static_cast<Map*>(accessor);
-          }
+void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+  // Traverse the transition tree without using a stack.  We do this by
+  // reversing the pointers in the maps and descriptor arrays.
+  Map* current = this;
+  Map* meta_map = heap()->meta_map();
+  Object** map_or_index_field = NULL;
+  while (current != meta_map) {
+    DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
+        *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset));
+    if (!d->IsEmpty()) {
+      FixedArray* contents = reinterpret_cast<FixedArray*>(
+          d->get(DescriptorArray::kContentArrayIndex));
+      map_or_index_field = RawField(contents, HeapObject::kMapOffset);
+      Object* map_or_index = *map_or_index_field;
+      bool map_done = true;  // Controls a nested continue statement.
+      for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
+           i < contents->length();
+           i += 2) {
+        PropertyDetails details(Smi::cast(contents->get(i + 1)));
+        if (details.IsTransition()) {
+          // Found a map in the transition array.  We record our progress in
+          // the transition array by recording the current map in the map field
+          // of the next map and recording the index in the transition array in
+          // the map field of the array.
+          Map* next = Map::cast(contents->get(i));
+          next->set_map(current);
+          *map_or_index_field = Smi::FromInt(i + 2);
+          current = next;
+          map_done = false;
           break;
         }
-        case NORMAL:
-        case FIELD:
-        case CONSTANT_FUNCTION:
-        case HANDLER:
-        case INTERCEPTOR:
-        case NULL_DESCRIPTOR:
-          // We definitely have no map transition.
-          index += 2;
-          break;
+      }
+      if (!map_done) continue;
+    } else {
+      map_or_index_field = NULL;
+    }
+    // That was the regular transitions, now for the prototype transitions.
+    FixedArray* prototype_transitions =
+        current->unchecked_prototype_transitions();
+    Object** proto_map_or_index_field =
+        RawField(prototype_transitions, HeapObject::kMapOffset);
+    Object* map_or_index = *proto_map_or_index_field;
+    const int start = kProtoTransitionHeaderSize + kProtoTransitionMapOffset;
+    int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : start;
+    if (i < prototype_transitions->length()) {
+      // Found a map in the prototype transition array.  Record progress in
+      // an analogous way to the regular transitions array above.
+      Object* perhaps_map = prototype_transitions->get(i);
+      if (perhaps_map->IsMap()) {
+        Map* next = Map::cast(perhaps_map);
+        next->set_map(current);
+        *proto_map_or_index_field =
+            Smi::FromInt(i + kProtoTransitionElementsPerEntry);
+        current = next;
+        continue;
       }
     }
-    *ContentHeader() = descriptor_array_->GetHeap()->fixed_array_map();
-    return NULL;
-  }
-
- private:
-  bool HasContentArray() {
-    return descriptor_array_-> length() > DescriptorArray::kContentArrayIndex;
-  }
-
-  FixedArray* ContentArray() {
-    Object* array = descriptor_array_->get(DescriptorArray::kContentArrayIndex);
-    return static_cast<FixedArray*>(array);
-  }
-
-  Object** ContentHeader() {
-    return HeapObject::RawField(ContentArray(), DescriptorArray::kMapOffset);
-  }
-
-  DescriptorArray* descriptor_array_;
-};
-
-
-// An iterator over all prototype transitions, reusing the map field of the
-// underlying array while it is running.
-class IntrusivePrototypeTransitionIterator {
- public:
-  explicit IntrusivePrototypeTransitionIterator(FixedArray* proto_trans)
-      : proto_trans_(proto_trans) { }
-
-  void Start() {
-    ASSERT(!IsIterating());
-    if (HasTransitions()) *Header() = Smi::FromInt(0);
-  }
-
-  bool IsIterating() {
-    return HasTransitions() && (*Header())->IsSmi();
-  }
-
-  Map* Next() {
-    ASSERT(IsIterating());
-    int transitionNumber = Smi::cast(*Header())->value();
-    if (transitionNumber < NumberOfTransitions()) {
-      *Header() = Smi::FromInt(transitionNumber + 1);
-      return GetTransition(transitionNumber);
+    *proto_map_or_index_field = heap()->fixed_array_map();
+    if (map_or_index_field != NULL) {
+      *map_or_index_field = heap()->fixed_array_map();
     }
-    *Header() = proto_trans_->GetHeap()->fixed_array_map();
-    return NULL;
-  }
 
- private:
-  bool HasTransitions() {
-    return proto_trans_->length() >= Map::kProtoTransitionHeaderSize;
-  }
-
-  Object** Header() {
-    return HeapObject::RawField(proto_trans_, FixedArray::kMapOffset);
-  }
-
-  int NumberOfTransitions() {
-    Object* num = proto_trans_->get(Map::kProtoTransitionNumberOfEntriesOffset);
-    return Smi::cast(num)->value();
-  }
-
-  Map* GetTransition(int transitionNumber) {
-    return Map::cast(proto_trans_->get(IndexFor(transitionNumber)));
-  }
-
-  int IndexFor(int transitionNumber) {
-    return Map::kProtoTransitionHeaderSize +
-        Map::kProtoTransitionMapOffset +
-        transitionNumber * Map::kProtoTransitionElementsPerEntry;
-  }
-
-  FixedArray* proto_trans_;
-};
-
-
-// To traverse the transition tree iteratively, we have to store two kinds of
-// information in a map: The parent map in the traversal and which children of a
-// node have already been visited. To do this without additional memory, we
-// temporarily reuse two maps with known values:
-//
-//  (1) The map of the map temporarily holds the parent, and is restored to the
-//      meta map afterwards.
-//
-//  (2) The info which children have already been visited depends on which part
-//      of the map we currently iterate:
-//
-//    (a) If we currently follow normal map transitions, we temporarily store
-//        the current index in the map of the FixedArray of the desciptor
-//        array's contents, and restore it to the fixed array map afterwards.
-//        Note that a single descriptor can have 0, 1, or 2 transitions.
-//
-//    (b) If we currently follow prototype transitions, we temporarily store
-//        the current index in the map of the FixedArray holding the prototype
-//        transitions, and restore it to the fixed array map afterwards.
-//
-// Note that the child iterator is just a concatenation of two iterators: One
-// iterating over map transitions and one iterating over prototype transisitons.
-class TraversableMap : public Map {
- public:
-  // Record the parent in the traversal within this map. Note that this destroys
-  // this map's map!
-  void SetParent(TraversableMap* parent) { set_map_no_write_barrier(parent); }
-
-  // Reset the current map's map, returning the parent previously stored in it.
-  TraversableMap* GetAndResetParent() {
-    TraversableMap* old_parent = static_cast<TraversableMap*>(map());
-    set_map_no_write_barrier(GetHeap()->meta_map());
-    return old_parent;
-  }
-
-  // Start iterating over this map's children, possibly destroying a FixedArray
-  // map (see explanation above).
-  void ChildIteratorStart() {
-    IntrusiveMapTransitionIterator(instance_descriptors()).Start();
-    IntrusivePrototypeTransitionIterator(
-        unchecked_prototype_transitions()).Start();
-  }
-
-  // If we have an unvisited child map, return that one and advance. If we have
-  // none, return NULL and reset any destroyed FixedArray maps.
-  TraversableMap* ChildIteratorNext() {
-    IntrusiveMapTransitionIterator descriptor_iterator(instance_descriptors());
-    if (descriptor_iterator.IsIterating()) {
-      Map* next = descriptor_iterator.Next();
-      if (next != NULL) return static_cast<TraversableMap*>(next);
-    }
-    IntrusivePrototypeTransitionIterator
-        proto_iterator(unchecked_prototype_transitions());
-    if (proto_iterator.IsIterating()) {
-      Map* next = proto_iterator.Next();
-      if (next != NULL) return static_cast<TraversableMap*>(next);
-    }
-    return NULL;
-  }
-};
-
-
-// Traverse the transition tree in postorder without using the C++ stack by
-// doing pointer reversal.
-void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
-  TraversableMap* current = static_cast<TraversableMap*>(this);
-  current->ChildIteratorStart();
-  while (true) {
-    TraversableMap* child = current->ChildIteratorNext();
-    if (child != NULL) {
-      child->ChildIteratorStart();
-      child->SetParent(current);
-      current = child;
-    } else {
-      TraversableMap* parent = current->GetAndResetParent();
-      callback(current, data);
-      if (current == this) break;
-      current = parent;
-    }
+    // The callback expects a map to have a real map as its map, so we save
+    // the map field, which is being used to track the traversal and put the
+    // correct map (the meta_map) in place while we do the callback.
+    Map* prev = current->map();
+    current->set_map(meta_map);
+    callback(current, data);
+    current = prev;
   }
 }
 
@@ -5311,7 +4409,7 @@
   MUST_USE_RESULT MaybeObject* AsObject() {
     ASSERT(code_ != NULL);
     Object* obj;
-    { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
+    { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* pair = FixedArray::cast(obj);
@@ -5369,22 +4467,13 @@
 void CodeCacheHashTable::RemoveByIndex(int index) {
   ASSERT(index >= 0);
   Heap* heap = GetHeap();
-  set(EntryToIndex(index), heap->the_hole_value());
-  set(EntryToIndex(index) + 1, heap->the_hole_value());
+  set(EntryToIndex(index), heap->null_value());
+  set(EntryToIndex(index) + 1, heap->null_value());
   ElementRemoved();
 }
 
 
-void PolymorphicCodeCache::Update(Handle<PolymorphicCodeCache> cache,
-                                  MapHandleList* maps,
-                                  Code::Flags flags,
-                                  Handle<Code> code) {
-  Isolate* isolate = cache->GetIsolate();
-  CALL_HEAP_FUNCTION_VOID(isolate, cache->Update(maps, flags, *code));
-}
-
-
-MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps,
+MaybeObject* PolymorphicCodeCache::Update(MapList* maps,
                                           Code::Flags flags,
                                           Code* code) {
   // Initialize cache if necessary.
@@ -5412,14 +4501,13 @@
 }
 
 
-Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
-                                            Code::Flags flags) {
+Object* PolymorphicCodeCache::Lookup(MapList* maps, Code::Flags flags) {
   if (!cache()->IsUndefined()) {
     PolymorphicCodeCacheHashTable* hash_table =
         PolymorphicCodeCacheHashTable::cast(cache());
-    return Handle<Object>(hash_table->Lookup(maps, flags));
+    return hash_table->Lookup(maps, flags);
   } else {
-    return GetIsolate()->factory()->undefined_value();
+    return GetHeap()->undefined_value();
   }
 }
 
@@ -5430,12 +4518,12 @@
 class PolymorphicCodeCacheHashTableKey : public HashTableKey {
  public:
   // Callers must ensure that |maps| outlives the newly constructed object.
-  PolymorphicCodeCacheHashTableKey(MapHandleList* maps, int code_flags)
+  PolymorphicCodeCacheHashTableKey(MapList* maps, int code_flags)
       : maps_(maps),
         code_flags_(code_flags) {}
 
   bool IsMatch(Object* other) {
-    MapHandleList other_maps(kDefaultListAllocationSize);
+    MapList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(other, &other_flags, &other_maps);
     if (code_flags_ != other_flags) return false;
@@ -5451,7 +4539,7 @@
     for (int i = 0; i < maps_->length(); ++i) {
       bool match_found = false;
       for (int j = 0; j < other_maps.length(); ++j) {
-        if (*(maps_->at(i)) == *(other_maps.at(j))) {
+        if (maps_->at(i)->EquivalentTo(other_maps.at(j))) {
           match_found = true;
           break;
         }
@@ -5461,7 +4549,7 @@
     return true;
   }
 
-  static uint32_t MapsHashHelper(MapHandleList* maps, int code_flags) {
+  static uint32_t MapsHashHelper(MapList* maps, int code_flags) {
     uint32_t hash = code_flags;
     for (int i = 0; i < maps->length(); ++i) {
       hash ^= maps->at(i)->Hash();
@@ -5474,7 +4562,7 @@
   }
 
   uint32_t HashForObject(Object* obj) {
-    MapHandleList other_maps(kDefaultListAllocationSize);
+    MapList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(obj, &other_flags, &other_maps);
     return MapsHashHelper(&other_maps, other_flags);
@@ -5492,32 +4580,29 @@
     FixedArray* list = FixedArray::cast(obj);
     list->set(0, Smi::FromInt(code_flags_));
     for (int i = 0; i < maps_->length(); ++i) {
-      list->set(i + 1, *maps_->at(i));
+      list->set(i + 1, maps_->at(i));
     }
     return list;
   }
 
  private:
-  static MapHandleList* FromObject(Object* obj,
-                                   int* code_flags,
-                                   MapHandleList* maps) {
+  static MapList* FromObject(Object* obj, int* code_flags, MapList* maps) {
     FixedArray* list = FixedArray::cast(obj);
     maps->Rewind(0);
     *code_flags = Smi::cast(list->get(0))->value();
     for (int i = 1; i < list->length(); ++i) {
-      maps->Add(Handle<Map>(Map::cast(list->get(i))));
+      maps->Add(Map::cast(list->get(i)));
     }
     return maps;
   }
 
-  MapHandleList* maps_;  // weak.
+  MapList* maps_;  // weak.
   int code_flags_;
   static const int kDefaultListAllocationSize = kMaxKeyedPolymorphism + 1;
 };
 
 
-Object* PolymorphicCodeCacheHashTable::Lookup(MapHandleList* maps,
-                                              int code_flags) {
+Object* PolymorphicCodeCacheHashTable::Lookup(MapList* maps, int code_flags) {
   PolymorphicCodeCacheHashTableKey key(maps, code_flags);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -5525,7 +4610,7 @@
 }
 
 
-MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps,
+MaybeObject* PolymorphicCodeCacheHashTable::Put(MapList* maps,
                                                 int code_flags,
                                                 Code* code) {
   PolymorphicCodeCacheHashTableKey key(maps, code_flags);
@@ -5549,7 +4634,7 @@
 MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) {
   ElementsAccessor* accessor = array->GetElementsAccessor();
   MaybeObject* maybe_result =
-      accessor->AddElementsToFixedArray(array, array, this);
+      accessor->AddElementsToFixedArray(array->elements(), this, array, array);
   FixedArray* result;
   if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
 #ifdef DEBUG
@@ -5567,7 +4652,7 @@
 MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) {
   ElementsAccessor* accessor = ElementsAccessor::ForArray(other);
   MaybeObject* maybe_result =
-      accessor->AddElementsToFixedArray(NULL, NULL, this, other);
+      accessor->AddElementsToFixedArray(other, this, NULL, NULL);
   FixedArray* result;
   if (!maybe_result->To<FixedArray>(&result)) return maybe_result;
 #ifdef DEBUG
@@ -5594,9 +4679,7 @@
   AssertNoAllocation no_gc;
   int len = length();
   if (new_length < len) len = new_length;
-  // We are taking the map from the old fixed array so the map is sure to
-  // be an immortal immutable object.
-  result->set_map_no_write_barrier(map());
+  result->set_map(map());
   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < len; i++) {
     result->set(i, get(i), mode);
@@ -5653,61 +4736,23 @@
 
 
 void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
-                                   FixedArray* new_cache,
-                                   Object* new_index_cache) {
+                                   FixedArray* new_cache) {
   ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
-  ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
   if (HasEnumCache()) {
     FixedArray::cast(get(kEnumerationIndexIndex))->
       set(kEnumCacheBridgeCacheIndex, new_cache);
-    FixedArray::cast(get(kEnumerationIndexIndex))->
-      set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
   } else {
     if (IsEmpty()) return;  // Do nothing for empty descriptor array.
     FixedArray::cast(bridge_storage)->
       set(kEnumCacheBridgeCacheIndex, new_cache);
-    FixedArray::cast(bridge_storage)->
-      set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
-    NoWriteBarrierSet(FixedArray::cast(bridge_storage),
-                      kEnumCacheBridgeEnumIndex,
-                      get(kEnumerationIndexIndex));
+    fast_set(FixedArray::cast(bridge_storage),
+             kEnumCacheBridgeEnumIndex,
+             get(kEnumerationIndexIndex));
     set(kEnumerationIndexIndex, bridge_storage);
   }
 }
 
 
-static bool InsertionPointFound(String* key1, String* key2) {
-  return key1->Hash() > key2->Hash() || key1 == key2;
-}
-
-
-void DescriptorArray::CopyFrom(Handle<DescriptorArray> dst,
-                               int dst_index,
-                               Handle<DescriptorArray> src,
-                               int src_index,
-                               const WhitenessWitness& witness) {
-  CALL_HEAP_FUNCTION_VOID(dst->GetIsolate(),
-                          dst->CopyFrom(dst_index, *src, src_index, witness));
-}
-
-
-MaybeObject* DescriptorArray::CopyFrom(int dst_index,
-                                       DescriptorArray* src,
-                                       int src_index,
-                                       const WhitenessWitness& witness) {
-  Object* value = src->GetValue(src_index);
-  PropertyDetails details(src->GetDetails(src_index));
-  if (details.type() == CALLBACKS && value->IsAccessorPair()) {
-    MaybeObject* maybe_copy =
-        AccessorPair::cast(value)->CopyWithoutTransitions();
-    if (!maybe_copy->To(&value)) return maybe_copy;
-  }
-  Descriptor desc(src->GetKey(src_index), value, details);
-  Set(dst_index, &desc, witness);
-  return this;
-}
-
-
 MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
                                          TransitionFlag transition_flag) {
   // Transitions are only kept when inserting another transition.
@@ -5716,26 +4761,38 @@
   // Conversely, we filter after replacing, so replacing a transition and
   // removing all other transitions is not supported.
   bool remove_transitions = transition_flag == REMOVE_TRANSITIONS;
-  ASSERT(remove_transitions == !descriptor->ContainsTransition());
+  ASSERT(remove_transitions == !descriptor->GetDetails().IsTransition());
   ASSERT(descriptor->GetDetails().type() != NULL_DESCRIPTOR);
 
   // Ensure the key is a symbol.
+  Object* result;
   { MaybeObject* maybe_result = descriptor->KeyToSymbol();
-    if (maybe_result->IsFailure()) return maybe_result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
-  int new_size = 0;
-  for (int i = 0; i < number_of_descriptors(); i++) {
-    if (IsNullDescriptor(i)) continue;
-    if (remove_transitions && IsTransitionOnly(i)) continue;
-    new_size++;
+  int transitions = 0;
+  int null_descriptors = 0;
+  if (remove_transitions) {
+    for (int i = 0; i < number_of_descriptors(); i++) {
+      if (IsTransition(i)) transitions++;
+      if (IsNullDescriptor(i)) null_descriptors++;
+    }
+  } else {
+    for (int i = 0; i < number_of_descriptors(); i++) {
+      if (IsNullDescriptor(i)) null_descriptors++;
+    }
   }
+  int new_size = number_of_descriptors() - transitions - null_descriptors;
 
   // If key is in descriptor, we replace it in-place when filtering.
   // Count a null descriptor for key as inserted, not replaced.
   int index = Search(descriptor->GetKey());
-  const bool replacing = (index != kNotFound);
+  const bool inserting = (index == kNotFound);
+  const bool replacing = !inserting;
   bool keep_enumeration_index = false;
+  if (inserting) {
+    ++new_size;
+  }
   if (replacing) {
     // We are replacing an existing descriptor.  We keep the enumeration
     // index of a visible property.
@@ -5750,21 +4807,15 @@
      // a transition that will be replaced.  Adjust count in this case.
       ++new_size;
     }
-  } else {
-    ++new_size;
   }
-
-  DescriptorArray* new_descriptors;
   { MaybeObject* maybe_result = Allocate(new_size);
-    if (!maybe_result->To(&new_descriptors)) return maybe_result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-
-  DescriptorArray::WhitenessWitness witness(new_descriptors);
-
+  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
   // Set the enumeration index in the descriptors and set the enumeration index
   // in the result.
   int enumeration_index = NextEnumerationIndex();
-  if (!descriptor->ContainsTransition()) {
+  if (!descriptor->GetDetails().IsTransition()) {
     if (keep_enumeration_index) {
       descriptor->SetEnumerationIndex(
           PropertyDetails(GetDetails(index)).index());
@@ -5777,26 +4828,28 @@
 
   // Copy the descriptors, filtering out transitions and null descriptors,
   // and inserting or replacing a descriptor.
-  int to_index = 0;
-  int insertion_index = -1;
+  uint32_t descriptor_hash = descriptor->GetKey()->Hash();
   int from_index = 0;
-  while (from_index < number_of_descriptors()) {
-    if (insertion_index < 0 &&
-        InsertionPointFound(GetKey(from_index), descriptor->GetKey())) {
-      insertion_index = to_index++;
-      if (replacing) from_index++;
-    } else {
-      if (!(IsNullDescriptor(from_index) ||
-            (remove_transitions && IsTransitionOnly(from_index)))) {
-        MaybeObject* copy_result =
-            new_descriptors->CopyFrom(to_index++, this, from_index, witness);
-        if (copy_result->IsFailure()) return copy_result;
-      }
-      from_index++;
+  int to_index = 0;
+
+  for (; from_index < number_of_descriptors(); from_index++) {
+    String* key = GetKey(from_index);
+    if (key->Hash() > descriptor_hash || key == descriptor->GetKey()) {
+      break;
     }
+    if (IsNullDescriptor(from_index)) continue;
+    if (remove_transitions && IsTransition(from_index)) continue;
+    new_descriptors->CopyFrom(to_index++, this, from_index);
   }
-  if (insertion_index < 0) insertion_index = to_index++;
-  new_descriptors->Set(insertion_index, descriptor, witness);
+
+  new_descriptors->Set(to_index++, descriptor);
+  if (replacing) from_index++;
+
+  for (; from_index < number_of_descriptors(); from_index++) {
+    if (IsNullDescriptor(from_index)) continue;
+    if (remove_transitions && IsTransition(from_index)) continue;
+    new_descriptors->CopyFrom(to_index++, this, from_index);
+  }
 
   ASSERT(to_index == new_descriptors->number_of_descriptors());
   SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
@@ -5806,25 +4859,27 @@
 
 
 MaybeObject* DescriptorArray::RemoveTransitions() {
-  // Allocate the new descriptor array.
-  int new_number_of_descriptors = 0;
+  // Remove all transitions and null descriptors. Return a copy of the array
+  // with all transitions removed, or a Failure object if the new array could
+  // not be allocated.
+
+  // Compute the size of the map transition entries to be removed.
+  int num_removed = 0;
   for (int i = 0; i < number_of_descriptors(); i++) {
-    if (IsProperty(i)) new_number_of_descriptors++;
-  }
-  DescriptorArray* new_descriptors;
-  { MaybeObject* maybe_result = Allocate(new_number_of_descriptors);
-    if (!maybe_result->To(&new_descriptors)) return maybe_result;
+    if (!IsProperty(i)) num_removed++;
   }
 
+  // Allocate the new descriptor array.
+  Object* result;
+  { MaybeObject* maybe_result = Allocate(number_of_descriptors() - num_removed);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  DescriptorArray* new_descriptors = DescriptorArray::cast(result);
+
   // Copy the content.
-  DescriptorArray::WhitenessWitness witness(new_descriptors);
   int next_descriptor = 0;
   for (int i = 0; i < number_of_descriptors(); i++) {
-    if (IsProperty(i)) {
-      MaybeObject* copy_result =
-          new_descriptors->CopyFrom(next_descriptor++, this, i, witness);
-      if (copy_result->IsFailure()) return copy_result;
-    }
+    if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i);
   }
   ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
 
@@ -5832,7 +4887,7 @@
 }
 
 
-void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
+void DescriptorArray::SortUnchecked() {
   // In-place heap sort.
   int len = number_of_descriptors();
 
@@ -5853,7 +4908,7 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
+      Swap(parent_index, child_index);
       // Now element at child_index could be < its children.
       parent_index = child_index;  // parent_hash remains correct.
     }
@@ -5862,8 +4917,8 @@
   // Extract elements and create sorted array.
   for (int i = len - 1; i > 0; --i) {
     // Put max element at the back of the array.
-    NoIncrementalWriteBarrierSwapDescriptors(0, i);
-    // Shift down the new top element.
+    Swap(0, i);
+    // Sift down the new top element.
     int parent_index = 0;
     const uint32_t parent_hash = GetKey(parent_index)->Hash();
     const int max_parent_index = (i / 2) - 1;
@@ -5878,15 +4933,15 @@
         }
       }
       if (child_hash <= parent_hash) break;
-      NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
+      Swap(parent_index, child_index);
       parent_index = child_index;
     }
   }
 }
 
 
-void DescriptorArray::Sort(const WhitenessWitness& witness) {
-  SortUnchecked(witness);
+void DescriptorArray::Sort() {
+  SortUnchecked();
   SLOW_ASSERT(IsSortedNoDuplicates());
 }
 
@@ -5936,24 +4991,6 @@
 }
 
 
-MaybeObject* AccessorPair::CopyWithoutTransitions() {
-  Heap* heap = GetHeap();
-  AccessorPair* copy;
-  { MaybeObject* maybe_copy = heap->AllocateAccessorPair();
-    if (!maybe_copy->To(&copy)) return maybe_copy;
-  }
-  copy->set_getter(getter()->IsMap() ? heap->the_hole_value() : getter());
-  copy->set_setter(setter()->IsMap() ? heap->the_hole_value() : setter());
-  return copy;
-}
-
-
-Object* AccessorPair::GetComponent(AccessorComponent component) {
-    Object* accessor = (component == ACCESSOR_GETTER) ? getter() : setter();
-    return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
-}
-
-
 MaybeObject* DeoptimizationInputData::Allocate(int deopt_entry_count,
                                                PretenureFlag pretenure) {
   ASSERT(deopt_entry_count > 0);
@@ -5989,6 +5026,24 @@
 }
 
 
+int String::Utf8Length() {
+  if (IsAsciiRepresentation()) return length();
+  // Attempt to flatten before accessing the string.  It probably
+  // doesn't make Utf8Length faster, but it is very likely that
+  // the string will be accessed later (for example by WriteUtf8)
+  // so it's still a good idea.
+  Heap* heap = GetHeap();
+  TryFlatten();
+  Access<StringInputBuffer> buffer(
+      heap->isolate()->objects_string_input_buffer());
+  buffer->Reset(0, this);
+  int result = 0;
+  while (buffer->has_more())
+    result += unibrow::Utf8::Length(buffer->GetNext());
+  return result;
+}
+
+
 String::FlatContent String::GetFlatContent() {
   int length = this->length();
   StringShape shape(this);
@@ -6015,7 +5070,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqAsciiString::cast(string)->GetChars();
     } else {
-      start = ExternalAsciiString::cast(string)->GetChars();
+      start = ExternalAsciiString::cast(string)->resource()->data();
     }
     return FlatContent(Vector<const char>(start + offset, length));
   } else {
@@ -6024,7 +5079,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqTwoByteString::cast(string)->GetChars();
     } else {
-      start = ExternalTwoByteString::cast(string)->GetChars();
+      start = ExternalTwoByteString::cast(string)->resource()->data();
     }
     return FlatContent(Vector<const uc16>(start + offset, length));
   }
@@ -6050,11 +5105,12 @@
   buffer->Reset(offset, this);
   int character_position = offset;
   int utf8_bytes = 0;
-  int last = unibrow::Utf16::kNoPreviousCharacter;
-  while (buffer->has_more() && character_position++ < offset + length) {
+  while (buffer->has_more()) {
     uint16_t character = buffer->GetNext();
-    utf8_bytes += unibrow::Utf8::Length(character, last);
-    last = character;
+    if (character_position < offset + length) {
+      utf8_bytes += unibrow::Utf8::Length(character);
+    }
+    character_position++;
   }
 
   if (length_return) {
@@ -6068,15 +5124,16 @@
   buffer->Seek(offset);
   character_position = offset;
   int utf8_byte_position = 0;
-  last = unibrow::Utf16::kNoPreviousCharacter;
-  while (buffer->has_more() && character_position++ < offset + length) {
+  while (buffer->has_more()) {
     uint16_t character = buffer->GetNext();
-    if (allow_nulls == DISALLOW_NULLS && character == 0) {
-      character = ' ';
+    if (character_position < offset + length) {
+      if (allow_nulls == DISALLOW_NULLS && character == 0) {
+        character = ' ';
+      }
+      utf8_byte_position +=
+          unibrow::Utf8::Encode(result + utf8_byte_position, character);
     }
-    utf8_byte_position +=
-        unibrow::Utf8::Encode(result + utf8_byte_position, character, last);
-    last = character;
+    character_position++;
   }
   result[utf8_byte_position] = 0;
   return SmartArrayPointer<char>(result);
@@ -6261,26 +5318,44 @@
 }
 
 
+uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return resource()->data()[index];
+}
+
+
 const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
       unsigned* remaining,
       unsigned* offset_ptr,
       unsigned max_chars) {
   // Cast const char* to unibrow::byte* (signedness difference).
   const unibrow::byte* b =
-      reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
+      reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
   *remaining = max_chars;
   *offset_ptr += max_chars;
   return b;
 }
 
 
+const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
+      unsigned start) {
+  return resource()->data() + start;
+}
+
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return resource()->data()[index];
+}
+
+
 void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
       ReadBlockBuffer* rbb,
       unsigned* offset_ptr,
       unsigned max_chars) {
   unsigned chars_read = 0;
   unsigned offset = *offset_ptr;
-  const uint16_t* data = GetChars();
+  const uint16_t* data = resource()->data();
   while (chars_read < max_chars) {
     uint16_t c = data[offset];
     if (c <= kMaxAsciiCharCode) {
@@ -6326,7 +5401,9 @@
       unsigned max_chars) {
   unsigned capacity = rbb->capacity - rbb->cursor;
   if (max_chars > capacity) max_chars = capacity;
-  memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
+  memcpy(rbb->util_buffer + rbb->cursor,
+         resource()->data() + *offset_ptr,
+         max_chars);
   rbb->remaining += max_chars;
   *offset_ptr += max_chars;
   rbb->cursor += max_chars;
@@ -6701,13 +5778,13 @@
     switch (StringShape(source).full_representation_tag()) {
       case kAsciiStringTag | kExternalStringTag: {
         CopyChars(sink,
-                  ExternalAsciiString::cast(source)->GetChars() + from,
+                  ExternalAsciiString::cast(source)->resource()->data() + from,
                   to - from);
         return;
       }
       case kTwoByteStringTag | kExternalStringTag: {
         const uc16* data =
-            ExternalTwoByteString::cast(source)->GetChars();
+            ExternalTwoByteString::cast(source)->resource()->data();
         CopyChars(sink,
                   data + from,
                   to - from);
@@ -6745,21 +5822,10 @@
           // Left hand side is longer.  Recurse over right.
           if (to > boundary) {
             String* second = cons_string->second();
-            // When repeatedly appending to a string, we get a cons string that
-            // is unbalanced to the left, a list, essentially.  We inline the
-            // common case of sequential ascii right child.
-            if (to - boundary == 1) {
-              sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
-            } else if (second->IsSeqAsciiString()) {
-              CopyChars(sink + boundary - from,
-                        SeqAsciiString::cast(second)->GetChars(),
+            WriteToFlat(second,
+                        sink + boundary - from,
+                        0,
                         to - boundary);
-            } else {
-              WriteToFlat(second,
-                          sink + boundary - from,
-                          0,
-                          to - boundary);
-            }
             to = boundary;
           }
           source = first;
@@ -6783,10 +5849,8 @@
   // General slow case check.  We know that the ia and ib iterators
   // have the same length.
   while (ia->has_more()) {
-    uint32_t ca = ia->GetNext();
-    uint32_t cb = ib->GetNext();
-    ASSERT(ca <= unibrow::Utf16::kMaxNonSurrogateCharCode);
-    ASSERT(cb <= unibrow::Utf16::kMaxNonSurrogateCharCode);
+    uc32 ca = ia->GetNext();
+    uc32 cb = ib->GetNext();
     if (ca != cb)
       return false;
   }
@@ -6948,7 +6012,7 @@
   if (StringShape(this).IsSymbol()) return false;
 
   Map* map = this->map();
-  Heap* heap = GetHeap();
+  Heap* heap = map->heap();
   if (map == heap->string_map()) {
     this->set_map(heap->undetectable_string_map());
     return true;
@@ -6969,14 +6033,8 @@
   decoder->Reset(str.start(), str.length());
   int i;
   for (i = 0; i < slen && decoder->has_more(); i++) {
-    uint32_t r = decoder->GetNext();
-    if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      if (i > slen - 1) return false;
-      if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
-      if (Get(i) != unibrow::Utf16::TrailSurrogate(r)) return false;
-    } else {
-      if (Get(i) != r) return false;
-    }
+    uc32 r = decoder->GetNext();
+    if (Get(i) != r) return false;
   }
   return i == slen && !decoder->has_more();
 }
@@ -7106,22 +6164,6 @@
 }
 
 
-void StringHasher::AddSurrogatePair(uc32 c) {
-  uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
-  AddCharacter(lead);
-  uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
-  AddCharacter(trail);
-}
-
-
-void StringHasher::AddSurrogatePairNoIndex(uc32 c) {
-  uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
-  AddCharacterNoIndex(lead);
-  uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
-  AddCharacterNoIndex(trail);
-}
-
-
 uint32_t StringHasher::GetHashField() {
   ASSERT(is_valid());
   if (length_ <= String::kMaxHashCalcLength) {
@@ -7178,155 +6220,66 @@
 }
 
 
-void Map::CreateOneBackPointer(Object* transition_target) {
-  if (!transition_target->IsMap()) return;
-  Map* target = Map::cast(transition_target);
-#ifdef DEBUG
-  // Verify target.
-  Object* source_prototype = prototype();
-  Object* target_prototype = target->prototype();
-  ASSERT(source_prototype->IsJSReceiver() ||
-         source_prototype->IsMap() ||
-         source_prototype->IsNull());
-  ASSERT(target_prototype->IsJSReceiver() ||
-         target_prototype->IsNull());
-  ASSERT(source_prototype->IsMap() ||
-         source_prototype == target_prototype);
-#endif
-  // Point target back to source.  set_prototype() will not let us set
-  // the prototype to a map, as we do here.
-  *RawField(target, kPrototypeOffset) = this;
-}
-
-
 void Map::CreateBackPointers() {
   DescriptorArray* descriptors = instance_descriptors();
   for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
-    switch (descriptors->GetType(i)) {
-      case MAP_TRANSITION:
-      case CONSTANT_TRANSITION:
-        CreateOneBackPointer(descriptors->GetValue(i));
-        break;
-      case ELEMENTS_TRANSITION: {
-        Object* object = descriptors->GetValue(i);
-        if (object->IsMap()) {
-          CreateOneBackPointer(object);
-        } else {
-          FixedArray* array = FixedArray::cast(object);
-          for (int i = 0; i < array->length(); ++i) {
-            CreateOneBackPointer(array->get(i));
-          }
-        }
-        break;
-      }
-      case CALLBACKS: {
-        Object* object = descriptors->GetValue(i);
-        if (object->IsAccessorPair()) {
-          AccessorPair* accessors = AccessorPair::cast(object);
-          CreateOneBackPointer(accessors->getter());
-          CreateOneBackPointer(accessors->setter());
-        }
-        break;
-      }
-      case NORMAL:
-      case FIELD:
-      case CONSTANT_FUNCTION:
-      case HANDLER:
-      case INTERCEPTOR:
-      case NULL_DESCRIPTOR:
-        break;
+    if (descriptors->GetType(i) == MAP_TRANSITION ||
+        descriptors->GetType(i) == ELEMENTS_TRANSITION ||
+        descriptors->GetType(i) == CONSTANT_TRANSITION) {
+      // Get target.
+      Map* target = Map::cast(descriptors->GetValue(i));
+#ifdef DEBUG
+      // Verify target.
+      Object* source_prototype = prototype();
+      Object* target_prototype = target->prototype();
+      ASSERT(source_prototype->IsJSObject() ||
+             source_prototype->IsMap() ||
+             source_prototype->IsNull());
+      ASSERT(target_prototype->IsJSObject() ||
+             target_prototype->IsNull());
+      ASSERT(source_prototype->IsMap() ||
+             source_prototype == target_prototype);
+#endif
+      // Point target back to source.  set_prototype() will not let us set
+      // the prototype to a map, as we do here.
+      *RawField(target, kPrototypeOffset) = this;
     }
   }
 }
 
 
-bool Map::RestoreOneBackPointer(Object* object,
-                                Object* real_prototype,
-                                bool* keep_entry) {
-  if (!object->IsMap()) return false;
-  Map* map = Map::cast(object);
-  if (Marking::MarkBitFrom(map).Get()) {
-    *keep_entry = true;
-    return false;
-  }
-  ASSERT(map->prototype() == this || map->prototype() == real_prototype);
-  // Getter prototype() is read-only, set_prototype() has side effects.
-  *RawField(map, Map::kPrototypeOffset) = real_prototype;
-  return true;
-}
-
-
 void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
-  DescriptorArray* d = DescriptorArray::cast(
+  // Live DescriptorArray objects will be marked, so we must use
+  // low-level accessors to get and modify their data.
+  DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
       *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
   if (d->IsEmpty()) return;
   Smi* NullDescriptorDetails =
     PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
-  FixedArray* contents = FixedArray::cast(
+  FixedArray* contents = reinterpret_cast<FixedArray*>(
       d->get(DescriptorArray::kContentArrayIndex));
   ASSERT(contents->length() >= 2);
   for (int i = 0; i < contents->length(); i += 2) {
-    // If the pair (value, details) is a map transition, check if the target is
-    // live. If not, null the descriptor. Also drop the back pointer for that
-    // map transition, so that this map is not reached again by following a back
-    // pointer from a non-live object.
-    bool keep_entry = false;
+    // If the pair (value, details) is a map transition,
+    // check if the target is live.  If not, null the descriptor.
+    // Also drop the back pointer for that map transition, so that this
+    // map is not reached again by following a back pointer from a
+    // non-live object.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
-    switch (details.type()) {
-      case MAP_TRANSITION:
-      case CONSTANT_TRANSITION:
-        RestoreOneBackPointer(contents->get(i), real_prototype, &keep_entry);
-        break;
-      case ELEMENTS_TRANSITION: {
-        Object* object = contents->get(i);
-        if (object->IsMap()) {
-          RestoreOneBackPointer(object, real_prototype, &keep_entry);
-        } else {
-          FixedArray* array = FixedArray::cast(object);
-          for (int j = 0; j < array->length(); ++j) {
-            if (RestoreOneBackPointer(array->get(j),
-                                      real_prototype,
-                                      &keep_entry)) {
-              array->set_undefined(j);
-            }
-          }
-        }
-        break;
+    if (details.type() == MAP_TRANSITION ||
+        details.type() == ELEMENTS_TRANSITION ||
+        details.type() == CONSTANT_TRANSITION) {
+      Map* target = reinterpret_cast<Map*>(contents->get(i));
+      ASSERT(target->IsHeapObject());
+      if (!target->IsMarked()) {
+        ASSERT(target->IsMap());
+        contents->set_unchecked(i + 1, NullDescriptorDetails);
+        contents->set_null_unchecked(heap, i);
+        ASSERT(target->prototype() == this ||
+               target->prototype() == real_prototype);
+        // Getter prototype() is read-only, set_prototype() has side effects.
+        *RawField(target, Map::kPrototypeOffset) = real_prototype;
       }
-      case CALLBACKS: {
-        Object* object = contents->get(i);
-        if (object->IsAccessorPair()) {
-          AccessorPair* accessors = AccessorPair::cast(object);
-          if (RestoreOneBackPointer(accessors->getter(),
-                                    real_prototype,
-                                    &keep_entry)) {
-            accessors->set_getter(heap->the_hole_value());
-          }
-          if (RestoreOneBackPointer(accessors->setter(),
-                                    real_prototype,
-                                    &keep_entry)) {
-            accessors->set_setter(heap->the_hole_value());
-          }
-        } else {
-          keep_entry = true;
-        }
-        break;
-      }
-      case NORMAL:
-      case FIELD:
-      case CONSTANT_FUNCTION:
-      case HANDLER:
-      case INTERCEPTOR:
-      case NULL_DESCRIPTOR:
-        keep_entry = true;
-        break;
-    }
-    // Make sure that an entry containing only dead transitions gets collected.
-    // What we *really* want to do here is removing this entry completely, but
-    // for technical reasons we can't do this, so we zero it out instead.
-    if (!keep_entry) {
-      contents->set_unchecked(i + 1, NullDescriptorDetails);
-      contents->set_null_unchecked(heap, i);
     }
   }
 }
@@ -7384,57 +6337,6 @@
 }
 
 
-bool SharedFunctionInfo::EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                                        ClearExceptionFlag flag) {
-  return shared->is_compiled() || CompileLazy(shared, flag);
-}
-
-
-static bool CompileLazyHelper(CompilationInfo* info,
-                              ClearExceptionFlag flag) {
-  // Compile the source information to a code object.
-  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
-  ASSERT(!info->isolate()->has_pending_exception());
-  bool result = Compiler::CompileLazy(info);
-  ASSERT(result != Isolate::Current()->has_pending_exception());
-  if (!result && flag == CLEAR_EXCEPTION) {
-    info->isolate()->clear_pending_exception();
-  }
-  return result;
-}
-
-
-bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
-                                     ClearExceptionFlag flag) {
-  CompilationInfo info(shared);
-  return CompileLazyHelper(&info, flag);
-}
-
-
-bool JSFunction::CompileLazy(Handle<JSFunction> function,
-                             ClearExceptionFlag flag) {
-  bool result = true;
-  if (function->shared()->is_compiled()) {
-    function->ReplaceCode(function->shared()->code());
-    function->shared()->set_code_age(0);
-  } else {
-    CompilationInfo info(function);
-    result = CompileLazyHelper(&info, flag);
-    ASSERT(!result || function->is_compiled());
-  }
-  return result;
-}
-
-
-bool JSFunction::CompileOptimized(Handle<JSFunction> function,
-                                  int osr_ast_id,
-                                  ClearExceptionFlag flag) {
-  CompilationInfo info(function);
-  info.SetOptimizing(osr_ast_id);
-  return CompileLazyHelper(&info, flag);
-}
-
-
 bool JSFunction::IsInlineable() {
   if (IsBuiltin()) return false;
   SharedFunctionInfo* shared_info = shared();
@@ -7449,19 +6351,11 @@
 }
 
 
-MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
-  ASSERT(value->IsJSReceiver());
+Object* JSFunction::SetInstancePrototype(Object* value) {
+  ASSERT(value->IsJSObject());
   Heap* heap = GetHeap();
   if (has_initial_map()) {
-    // If the function has allocated the initial map
-    // replace it with a copy containing the new prototype.
-    Map* new_map;
-    MaybeObject* maybe_new_map = initial_map()->CopyDropTransitions();
-    if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-    new_map->set_prototype(value);
-    MaybeObject* maybe_object =
-        set_initial_map_and_cache_transitions(new_map);
-    if (maybe_object->IsFailure()) return maybe_object;
+    initial_map()->set_prototype(value);
   } else {
     // Put the value in the initial map field until an initial map is
     // needed.  At that point, a new initial map is created and the
@@ -7477,19 +6371,20 @@
   ASSERT(should_have_prototype());
   Object* construct_prototype = value;
 
-  // If the value is not a JSReceiver, store the value in the map's
+  // If the value is not a JSObject, store the value in the map's
   // constructor field so it can be accessed.  Also, set the prototype
   // used for constructing objects to the original object prototype.
   // See ECMA-262 13.2.2.
-  if (!value->IsJSReceiver()) {
+  if (!value->IsJSObject()) {
     // Copy the map so this does not affect unrelated functions.
     // Remove map transitions because they point to maps with a
     // different prototype.
-    Map* new_map;
+    Object* new_object;
     { MaybeObject* maybe_new_map = map()->CopyDropTransitions();
-      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+      if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
     }
-    Heap* heap = new_map->GetHeap();
+    Map* new_map = Map::cast(new_object);
+    Heap* heap = new_map->heap();
     set_map(new_map);
     new_map->set_constructor(value);
     new_map->set_non_instance_prototype(true);
@@ -7506,21 +6401,21 @@
 
 Object* JSFunction::RemovePrototype() {
   Context* global_context = context()->global_context();
-  Map* no_prototype_map = shared()->is_classic_mode()
-      ? global_context->function_without_prototype_map()
-      : global_context->strict_mode_function_without_prototype_map();
+  Map* no_prototype_map = shared()->strict_mode()
+      ? global_context->strict_mode_function_without_prototype_map()
+      : global_context->function_without_prototype_map();
 
   if (map() == no_prototype_map) {
     // Be idempotent.
     return this;
   }
 
-  ASSERT(map() == (shared()->is_classic_mode()
-                   ? global_context->function_map()
-                   : global_context->strict_mode_function_map()));
+  ASSERT(!shared()->strict_mode() ||
+         map() == global_context->strict_mode_function_map());
+  ASSERT(shared()->strict_mode() || map() == global_context->function_map());
 
   set_map(no_prototype_map);
-  set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
+  set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
   return this;
 }
 
@@ -7545,12 +6440,12 @@
 MaybeObject* Oddball::Initialize(const char* to_string,
                                  Object* to_number,
                                  byte kind) {
-  String* symbol;
+  Object* symbol;
   { MaybeObject* maybe_symbol =
         Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
-    if (!maybe_symbol->To(&symbol)) return maybe_symbol;
+    if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
   }
-  set_to_string(symbol);
+  set_to_string(String::cast(symbol));
   set_to_number(to_number);
   set_kind(kind);
   return this;
@@ -7570,10 +6465,13 @@
 }
 
 
-Handle<Object> SharedFunctionInfo::GetSourceCode() {
-  if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value();
-  Handle<String> source(String::cast(Script::cast(script())->source()));
-  return SubString(source, start_position(), end_position());
+Object* SharedFunctionInfo::GetSourceCode() {
+  Isolate* isolate = GetIsolate();
+  if (!HasSourceCode()) return isolate->heap()->undefined_value();
+  HandleScope scope(isolate);
+  Object* source = Script::cast(script())->source();
+  return *SubString(Handle<String>(String::cast(source), isolate),
+                    start_position(), end_position());
 }
 
 
@@ -7621,10 +6519,10 @@
        obj = obj->GetPrototype()) {
     JSObject* js_object = JSObject::cast(obj);
     for (int i = 0; i < this_property_assignments_count(); i++) {
-      LookupResult result(heap->isolate());
+      LookupResult result;
       String* name = GetThisPropertyAssignmentName(i);
       js_object->LocalLookupRealNamedProperty(name, &result);
-      if (result.IsFound() && result.type() == CALLBACKS) {
+      if (result.IsProperty() && result.type() == CALLBACKS) {
         return false;
       }
     }
@@ -7770,7 +6668,7 @@
 }
 
 
-void SharedFunctionInfo::DisableOptimization() {
+void SharedFunctionInfo::DisableOptimization(JSFunction* function) {
   // Disable optimization for the shared function info and mark the
   // code as non-optimizable. The marker on the shared function info
   // is there because we flush non-optimized code thereby loosing the
@@ -7786,12 +6684,17 @@
     code()->set_optimizable(false);
   }
   if (FLAG_trace_opt) {
-    PrintF("[disabled optimization for %s]\n", *DebugName()->ToCString());
+    PrintF("[disabled optimization for: ");
+    function->PrintName();
+    PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
   }
 }
 
 
 bool SharedFunctionInfo::VerifyBailoutId(int id) {
+  // TODO(srdjan): debugging ARM crashes in hydrogen. OK to disable while
+  // we are always bailing out on ARM.
+
   ASSERT(id != AstNode::kNoNumber);
   Code* unoptimized = code();
   DeoptimizationOutputData* data =
@@ -7805,8 +6708,6 @@
 void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
   ASSERT(!IsInobjectSlackTrackingInProgress());
 
-  if (!FLAG_clever_optimizations) return;
-
   // Only initiate the tracking the first time.
   if (live_objects_may_exist()) return;
   set_live_objects_may_exist(true);
@@ -7822,7 +6723,7 @@
     set_construction_count(kGenerousAllocationCount);
   }
   set_initial_map(map);
-  Builtins* builtins = map->GetHeap()->isolate()->builtins();
+  Builtins* builtins = map->heap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             construct_stub());
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -7842,9 +6743,8 @@
   // then StartInobjectTracking will be called again the next time the
   // constructor is called. The countdown will continue and (possibly after
   // several more GCs) CompleteInobjectSlackTracking will eventually be called.
-  Heap* heap = map->GetHeap();
-  set_initial_map(heap->raw_unchecked_undefined_value());
-  Builtins* builtins = heap->isolate()->builtins();
+  set_initial_map(map->heap()->raw_unchecked_undefined_value());
+  Builtins* builtins = map->heap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
@@ -7860,7 +6760,7 @@
 
   // Resume inobject slack tracking.
   set_initial_map(map);
-  Builtins* builtins = map->GetHeap()->isolate()->builtins();
+  Builtins* builtins = map->heap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -7892,7 +6792,7 @@
   ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
   Map* map = Map::cast(initial_map());
 
-  Heap* heap = map->GetHeap();
+  Heap* heap = map->heap();
   set_initial_map(heap->undefined_value());
   Builtins* builtins = heap->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
@@ -7912,22 +6812,6 @@
 }
 
 
-#define DECLARE_TAG(ignore1, name, ignore2) name,
-const char* const VisitorSynchronization::kTags[
-    VisitorSynchronization::kNumberOfSyncTags] = {
-  VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
-};
-#undef DECLARE_TAG
-
-
-#define DECLARE_TAG(ignore1, ignore2, name) name,
-const char* const VisitorSynchronization::kTagNames[
-    VisitorSynchronization::kNumberOfSyncTags] = {
-  VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
-};
-#undef DECLARE_TAG
-
-
 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
   Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -7970,18 +6854,8 @@
 }
 
 
-void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
-  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-  VisitPointer(rinfo->target_object_address());
-}
-
-void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
-  Address* p = rinfo->target_reference_address();
-  VisitExternalReferences(p, p + 1);
-}
-
 void Code::InvalidateRelocation() {
-  set_relocation_info(GetHeap()->empty_byte_array());
+  set_relocation_info(heap()->empty_byte_array());
 }
 
 
@@ -7994,8 +6868,6 @@
 
 
 void Code::CopyFrom(const CodeDesc& desc) {
-  ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT);
-
   // copy code
   memmove(instruction_start(), desc.buffer, desc.instr_size);
 
@@ -8015,17 +6887,16 @@
     RelocInfo::Mode mode = it.rinfo()->rmode();
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
-      it.rinfo()->set_target_object(*p, SKIP_WRITE_BARRIER);
+      it.rinfo()->set_target_object(*p);
     } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-      Handle<JSGlobalPropertyCell> cell  = it.rinfo()->target_cell_handle();
-      it.rinfo()->set_target_cell(*cell, SKIP_WRITE_BARRIER);
+      Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+      it.rinfo()->set_target_cell(*cell);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       Code* code = Code::cast(*p);
-      it.rinfo()->set_target_address(code->instruction_start(),
-                                     SKIP_WRITE_BARRIER);
+      it.rinfo()->set_target_address(code->instruction_start());
     } else {
       it.rinfo()->apply(delta);
     }
@@ -8144,11 +7015,8 @@
         static_cast<Translation::Opcode>(iterator.Next());
     ASSERT(Translation::BEGIN == opcode);
     int frame_count = iterator.Next();
-    int jsframe_count = iterator.Next();
-    PrintF(out, "  %s {frame count=%d, js frame count=%d}\n",
-           Translation::StringFor(opcode),
-           frame_count,
-           jsframe_count);
+    PrintF(out, "  %s {count=%d}\n", Translation::StringFor(opcode),
+           frame_count);
 
     while (iterator.HasNext() &&
            Translation::BEGIN !=
@@ -8160,7 +7028,7 @@
           UNREACHABLE();
           break;
 
-        case Translation::JS_FRAME: {
+        case Translation::FRAME: {
           int ast_id = iterator.Next();
           int function_id = iterator.Next();
           JSFunction* function =
@@ -8172,18 +7040,6 @@
           break;
         }
 
-        case Translation::ARGUMENTS_ADAPTOR_FRAME:
-        case Translation::CONSTRUCT_STUB_FRAME: {
-          int function_id = iterator.Next();
-          JSFunction* function =
-              JSFunction::cast(LiteralArray()->get(function_id));
-          unsigned height = iterator.Next();
-          PrintF(out, "{function=");
-          function->PrintName(out);
-          PrintF(out, ", height=%u}", height);
-          break;
-        }
-
         case Translation::DUPLICATE:
           break;
 
@@ -8307,7 +7163,7 @@
     case CONSTANT_TRANSITION: return "CONSTANT_TRANSITION";
     case NULL_DESCRIPTOR: return "NULL_DESCRIPTOR";
   }
-  UNREACHABLE();  // keep the compiler happy
+  UNREACHABLE();
   return NULL;
 }
 
@@ -8416,55 +7272,125 @@
 #endif  // ENABLE_DISASSEMBLER
 
 
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(
-    int capacity,
-    int length,
-    SetFastElementsCapacityMode set_capacity_mode) {
+static void CopyFastElementsToFast(FixedArray* source,
+                                   FixedArray* destination,
+                                   WriteBarrierMode mode) {
+  uint32_t count = static_cast<uint32_t>(source->length());
+  for (uint32_t i = 0; i < count; ++i) {
+    destination->set(i, source->get(i), mode);
+  }
+}
+
+
+static void CopySlowElementsToFast(SeededNumberDictionary* source,
+                                   FixedArray* destination,
+                                   WriteBarrierMode mode) {
+  for (int i = 0; i < source->Capacity(); ++i) {
+    Object* key = source->KeyAt(i);
+    if (key->IsNumber()) {
+      uint32_t entry = static_cast<uint32_t>(key->Number());
+      destination->set(entry, source->ValueAt(i), mode);
+    }
+  }
+}
+
+
+MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
+                                                        int length) {
   Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
 
   // Allocate a new fast elements backing store.
-  FixedArray* new_elements;
-  { MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
-    if (!maybe->To(&new_elements)) return maybe;
+  FixedArray* new_elements = NULL;
+  { Object* object;
+    MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
+    if (!maybe->ToObject(&object)) return maybe;
+    new_elements = FixedArray::cast(object);
   }
 
   // Find the new map to use for this object if there is a map change.
   Map* new_map = NULL;
   if (elements()->map() != heap->non_strict_arguments_elements_map()) {
-    // The resized array has FAST_SMI_ONLY_ELEMENTS if the capacity mode forces
-    // it, or if it's allowed and the old elements array contained only SMIs.
-    bool has_fast_smi_only_elements =
-        (set_capacity_mode == kForceSmiOnlyElements) ||
-        ((set_capacity_mode == kAllowSmiOnlyElements) &&
-         (elements()->map()->has_fast_smi_only_elements() ||
-          elements() == heap->empty_fixed_array()));
-    ElementsKind elements_kind = has_fast_smi_only_elements
-        ? FAST_SMI_ONLY_ELEMENTS
-        : FAST_ELEMENTS;
-    MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
-    if (!maybe->To(&new_map)) return maybe;
+    Object* object;
+    MaybeObject* maybe = map()->GetFastElementsMap();
+    if (!maybe->ToObject(&object)) return maybe;
+    new_map = Map::cast(object);
   }
 
-  FixedArrayBase* old_elements = elements();
-  ElementsKind elements_kind = GetElementsKind();
-  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
-  ElementsKind to_kind = (elements_kind == FAST_SMI_ONLY_ELEMENTS)
-      ? FAST_SMI_ONLY_ELEMENTS
-      : FAST_ELEMENTS;
-  //  int copy_size = Min(old_elements_raw->length(), new_elements->length());
-  accessor->CopyElements(this, new_elements, to_kind);
-  if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
-    set_map_and_elements(new_map, new_elements);
-  } else {
-    FixedArray* parameter_map = FixedArray::cast(old_elements);
-    parameter_map->set(1, new_elements);
-  }
-
-  if (FLAG_trace_elements_transitions) {
-    PrintElementsTransition(stdout, elements_kind, old_elements,
-                            GetElementsKind(), new_elements);
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+      CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
+      set_map(new_map);
+      set_elements(new_elements);
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+      CopySlowElementsToFast(SeededNumberDictionary::cast(elements()),
+                             new_elements,
+                             mode);
+      set_map(new_map);
+      set_elements(new_elements);
+      break;
+    }
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      AssertNoAllocation no_gc;
+      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+      // The object's map and the parameter map are unchanged, the unaliased
+      // arguments are copied to the new backing store.
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      if (arguments->IsDictionary()) {
+        CopySlowElementsToFast(SeededNumberDictionary::cast(arguments),
+                               new_elements,
+                               mode);
+      } else {
+        CopyFastElementsToFast(arguments, new_elements, mode);
+      }
+      parameter_map->set(1, new_elements);
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      FixedDoubleArray* old_elements = FixedDoubleArray::cast(elements());
+      uint32_t old_length = static_cast<uint32_t>(old_elements->length());
+      // Fill out the new array with this content and array holes.
+      for (uint32_t i = 0; i < old_length; i++) {
+        if (!old_elements->is_the_hole(i)) {
+          Object* obj;
+          // Objects must be allocated in the old object space, since the
+          // overall number of HeapNumbers needed for the conversion might
+          // exceed the capacity of new space, and we would fail repeatedly
+          // trying to convert the FixedDoubleArray.
+          MaybeObject* maybe_value_object =
+              GetHeap()->AllocateHeapNumber(old_elements->get_scalar(i),
+                                            TENURED);
+          if (!maybe_value_object->ToObject(&obj)) return maybe_value_object;
+          // Force write barrier. It's not worth trying to exploit
+          // elems->GetWriteBarrierMode(), since it requires an
+          // AssertNoAllocation stack object that would have to be positioned
+          // after the HeapNumber allocation anyway.
+          new_elements->set(i, obj, UPDATE_WRITE_BARRIER);
+        }
+      }
+      set_map(new_map);
+      set_elements(new_elements);
+      break;
+    }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      UNREACHABLE();
+      break;
   }
 
   // Update the length if necessary.
@@ -8483,33 +7409,41 @@
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
 
-  FixedDoubleArray* elems;
+  Object* obj;
   { MaybeObject* maybe_obj =
         heap->AllocateUninitializedFixedDoubleArray(capacity);
-    if (!maybe_obj->To(&elems)) return maybe_obj;
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
+
+  { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  Map* new_map = Map::cast(obj);
+
+  AssertNoAllocation no_gc;
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      elems->Initialize(FixedArray::cast(elements()));
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      elems->Initialize(FixedDoubleArray::cast(elements()));
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      elems->Initialize(SeededNumberDictionary::cast(elements()));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
   }
 
-  Map* new_map;
-  { MaybeObject* maybe_obj =
-        GetElementsTransitionMap(heap->isolate(), FAST_DOUBLE_ELEMENTS);
-    if (!maybe_obj->To(&new_map)) return maybe_obj;
-  }
-
-  FixedArrayBase* old_elements = elements();
-  ElementsKind elements_kind = GetElementsKind();
-  ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
-  accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
-  if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
-    set_map_and_elements(new_map, elems);
-  } else {
-    FixedArray* parameter_map = FixedArray::cast(old_elements);
-    parameter_map->set(1, elems);
-  }
-
-  if (FLAG_trace_elements_transitions) {
-    PrintElementsTransition(stdout, elements_kind, old_elements,
-                            FAST_DOUBLE_ELEMENTS, elems);
-  }
+  ASSERT(new_map->has_fast_double_elements());
+  set_map(new_map);
+  ASSERT(elems->IsFixedDoubleArray());
+  set_elements(elems);
 
   if (IsJSArray()) {
     JSArray::cast(this)->set_length(Smi::FromInt(length));
@@ -8519,6 +7453,53 @@
 }
 
 
+MaybeObject* JSObject::SetSlowElements(Object* len) {
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasExternalArrayElements());
+
+  uint32_t new_length = static_cast<uint32_t>(len->Number());
+
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+    case FAST_DOUBLE_ELEMENTS:
+      // Make sure we never try to shrink dense arrays into sparse arrays.
+      ASSERT(static_cast<uint32_t>(
+          FixedArrayBase::cast(elements())->length()) <= new_length);
+      MaybeObject* result = NormalizeElements();
+      if (result->IsFailure()) return result;
+
+      // Update length for JSArrays.
+      if (IsJSArray()) JSArray::cast(this)->set_length(len);
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      if (IsJSArray()) {
+        uint32_t old_length =
+            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+        element_dictionary()->RemoveNumberEntries(new_length, old_length),
+        JSArray::cast(this)->set_length(len);
+      }
+      break;
+    }
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNIMPLEMENTED();
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS:
+    case EXTERNAL_PIXEL_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+  return this;
+}
+
+
 MaybeObject* JSArray::Initialize(int capacity) {
   Heap* heap = GetHeap();
   ASSERT(capacity >= 0);
@@ -8527,8 +7508,11 @@
   if (capacity == 0) {
     new_elements = heap->empty_fixed_array();
   } else {
-    MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
-    if (!maybe_obj->To(&new_elements)) return maybe_obj;
+    Object* obj;
+    { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
+      if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    }
+    new_elements = FixedArray::cast(obj);
   }
   set_elements(new_elements);
   return this;
@@ -8536,15 +7520,162 @@
 
 
 void JSArray::Expand(int required_size) {
-  GetIsolate()->factory()->SetElementsCapacityAndLength(
-      Handle<JSArray>(this), required_size, required_size);
+  Handle<JSArray> self(this);
+  Handle<FixedArray> old_backing(FixedArray::cast(elements()));
+  int old_size = old_backing->length();
+  int new_size = required_size > old_size ? required_size : old_size;
+  Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
+  // Can't use this any more now because we may have had a GC!
+  for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
+  self->SetContent(*new_backing);
 }
 
 
-MaybeObject* JSArray::SetElementsLength(Object* len) {
+static Failure* ArrayLengthRangeError(Heap* heap) {
+  HandleScope scope(heap->isolate());
+  return heap->isolate()->Throw(
+      *FACTORY->NewRangeError("invalid_array_length",
+          HandleVector<Object>(NULL, 0)));
+}
+
+
+MaybeObject* JSObject::SetElementsLength(Object* len) {
   // We should never end in here with a pixel or external array.
   ASSERT(AllowsSetElementsLength());
-  return GetElementsAccessor()->SetLength(this, len);
+
+  MaybeObject* maybe_smi_length = len->ToSmi();
+  Object* smi_length = Smi::FromInt(0);
+  if (maybe_smi_length->ToObject(&smi_length) && smi_length->IsSmi()) {
+    const int value = Smi::cast(smi_length)->value();
+    if (value < 0) return ArrayLengthRangeError(GetHeap());
+    ElementsKind elements_kind = GetElementsKind();
+    switch (elements_kind) {
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS: {
+        int old_capacity = FixedArrayBase::cast(elements())->length();
+        if (value <= old_capacity) {
+          if (IsJSArray()) {
+            Object* obj;
+            if (elements_kind == FAST_ELEMENTS) {
+              MaybeObject* maybe_obj = EnsureWritableFastElements();
+              if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+            }
+            if (2 * value <= old_capacity) {
+              // If more than half the elements won't be used, trim the array.
+              if (value == 0) {
+                initialize_elements();
+              } else {
+                Address filler_start;
+                int filler_size;
+                if (GetElementsKind() == FAST_ELEMENTS) {
+                  FixedArray* fast_elements = FixedArray::cast(elements());
+                  fast_elements->set_length(value);
+                  filler_start = fast_elements->address() +
+                      FixedArray::OffsetOfElementAt(value);
+                  filler_size = (old_capacity - value) * kPointerSize;
+                } else {
+                  ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+                  FixedDoubleArray* fast_double_elements =
+                      FixedDoubleArray::cast(elements());
+                  fast_double_elements->set_length(value);
+                  filler_start = fast_double_elements->address() +
+                      FixedDoubleArray::OffsetOfElementAt(value);
+                  filler_size = (old_capacity - value) * kDoubleSize;
+                }
+                GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
+              }
+            } else {
+              // Otherwise, fill the unused tail with holes.
+              int old_length = FastD2I(JSArray::cast(this)->length()->Number());
+              if (GetElementsKind() == FAST_ELEMENTS) {
+                FixedArray* fast_elements = FixedArray::cast(elements());
+                for (int i = value; i < old_length; i++) {
+                  fast_elements->set_the_hole(i);
+                }
+              } else {
+                ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+                FixedDoubleArray* fast_double_elements =
+                    FixedDoubleArray::cast(elements());
+                for (int i = value; i < old_length; i++) {
+                  fast_double_elements->set_the_hole(i);
+                }
+              }
+            }
+            JSArray::cast(this)->set_length(Smi::cast(smi_length));
+          }
+          return this;
+        }
+        int min = NewElementsCapacity(old_capacity);
+        int new_capacity = value > min ? value : min;
+        if (!ShouldConvertToSlowElements(new_capacity)) {
+          MaybeObject* result;
+          if (GetElementsKind() == FAST_ELEMENTS) {
+            result = SetFastElementsCapacityAndLength(new_capacity, value);
+          }  else {
+            ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+            result = SetFastDoubleElementsCapacityAndLength(new_capacity,
+                                                            value);
+          }
+          if (result->IsFailure()) return result;
+          return this;
+        }
+        break;
+      }
+      case DICTIONARY_ELEMENTS: {
+        if (IsJSArray()) {
+          if (value == 0) {
+            // If the length of a slow array is reset to zero, we clear
+            // the array and flush backing storage. This has the added
+            // benefit that the array returns to fast mode.
+            Object* obj;
+            { MaybeObject* maybe_obj = ResetElements();
+              if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+            }
+          } else {
+            // Remove deleted elements.
+            uint32_t old_length =
+            static_cast<uint32_t>(JSArray::cast(this)->length()->Number());
+            element_dictionary()->RemoveNumberEntries(value, old_length);
+          }
+          JSArray::cast(this)->set_length(Smi::cast(smi_length));
+        }
+        return this;
+      }
+      case NON_STRICT_ARGUMENTS_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+      case EXTERNAL_DOUBLE_ELEMENTS:
+      case EXTERNAL_PIXEL_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+
+  // General slow case.
+  if (len->IsNumber()) {
+    uint32_t length;
+    if (len->ToArrayIndex(&length)) {
+      return SetSlowElements(len);
+    } else {
+      return ArrayLengthRangeError(GetHeap());
+    }
+  }
+
+  // len is not a number so make the array size one and
+  // set only element to len.
+  Object* obj;
+  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  }
+  FixedArray::cast(obj)->set(0, len);
+  if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
+  set_elements(FixedArray::cast(obj));
+  return this;
 }
 
 
@@ -8587,8 +7718,8 @@
     FixedArray* new_cache;
     // Grow array by factor 2 over and above what we need.
     { MaybeObject* maybe_cache =
-          GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
-      if (!maybe_cache->To(&new_cache)) return maybe_cache;
+          heap()->AllocateFixedArray(transitions * 2 * step + header);
+      if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
     }
 
     for (int i = 0; i < capacity * step; i++) {
@@ -8640,7 +7771,7 @@
   // It is sufficient to validate that the receiver is not in the new prototype
   // chain.
   for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
-    if (JSReceiver::cast(pt) == this) {
+    if (JSObject::cast(pt) == this) {
       // Cycle detected.
       HandleScope scope(heap->isolate());
       return heap->isolate()->Throw(
@@ -8655,8 +7786,8 @@
     // hidden and set the new prototype on that object.
     Object* current_proto = real_receiver->GetPrototype();
     while (current_proto->IsJSObject() &&
-          JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
-      real_receiver = JSReceiver::cast(current_proto);
+          JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
+      real_receiver = JSObject::cast(current_proto);
       current_proto = current_proto->GetPrototype();
     }
   }
@@ -8689,16 +7820,69 @@
 }
 
 
-MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
-                                                uint32_t first_arg,
-                                                uint32_t arg_count,
-                                                EnsureElementsMode mode) {
-  // Elements in |Arguments| are ordered backwards (because they're on the
-  // stack), but the method that's called here iterates over them in forward
-  // direction.
-  return EnsureCanContainElements(
-      args->arguments() - first_arg - (arg_count - 1),
-      arg_count, mode);
+bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
+  switch (GetElementsKind()) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+        return true;
+      }
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
+        return true;
+      }
+      break;
+    }
+    case EXTERNAL_PIXEL_ELEMENTS: {
+      ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
+      if (index < static_cast<uint32_t>(pixels->length())) {
+        return true;
+      }
+      break;
+    }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        return true;
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      if (element_dictionary()->FindEntry(index)
+          != SeededNumberDictionary::kNotFound) {
+        return true;
+      }
+      break;
+    }
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      UNREACHABLE();
+      break;
+  }
+
+  // Handle [] on String objects.
+  if (this->IsStringObjectWithCharacterAt(index)) return true;
+
+  Object* pt = GetPrototype();
+  if (pt->IsNull()) return false;
+  return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
 }
 
 
@@ -8741,21 +7925,7 @@
     }
     if (!result.IsEmpty()) return true;
   }
-
-  if (holder_handle->GetElementsAccessor()->HasElement(
-          *receiver_handle, *holder_handle, index)) {
-    return true;
-  }
-
-  if (holder_handle->IsStringObjectWithCharacterAt(index)) return true;
-  Object* pt = holder_handle->GetPrototype();
-  if (pt->IsJSProxy()) {
-    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
-    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
-        receiver, index) != ABSENT;
-  }
-  if (pt->IsNull()) return false;
-  return JSObject::cast(pt)->HasElementWithReceiver(*receiver_handle, index);
+  return holder_handle->HasElementPostInterceptor(*receiver_handle, index);
 }
 
 
@@ -8788,7 +7958,6 @@
   }
 
   switch (GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -8865,6 +8034,28 @@
 }
 
 
+bool JSObject::HasElementInElements(FixedArray* elements,
+                                    ElementsKind kind,
+                                    uint32_t index) {
+  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+  if (kind == FAST_ELEMENTS) {
+    int length = IsJSArray()
+        ? Smi::cast(JSArray::cast(this)->length())->value()
+        : elements->length();
+    if (index < static_cast<uint32_t>(length) &&
+        !elements->get(index)->IsTheHole()) {
+      return true;
+    }
+  } else {
+    if (SeededNumberDictionary::cast(elements)->FindEntry(index) !=
+        SeededNumberDictionary::kNotFound) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
 bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
@@ -8880,9 +8071,67 @@
     return HasElementWithInterceptor(receiver, index);
   }
 
-  ElementsAccessor* accessor = GetElementsAccessor();
-  if (accessor->HasElement(receiver, this, index)) {
-    return true;
+  ElementsKind kind = GetElementsKind();
+  switch (kind) {
+    case FAST_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedArray::cast(elements())->get(index)->IsTheHole()) return true;
+      break;
+    }
+    case FAST_DOUBLE_ELEMENTS: {
+      uint32_t length = IsJSArray() ?
+          static_cast<uint32_t>
+              (Smi::cast(JSArray::cast(this)->length())->value()) :
+          static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+      if ((index < length) &&
+          !FixedDoubleArray::cast(elements())->is_the_hole(index)) return true;
+      break;
+    }
+    case EXTERNAL_PIXEL_ELEMENTS: {
+      ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
+      if (index < static_cast<uint32_t>(pixels->length())) {
+        return true;
+      }
+      break;
+    }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+    case EXTERNAL_DOUBLE_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        return true;
+      }
+      break;
+    }
+    case DICTIONARY_ELEMENTS: {
+      if (element_dictionary()->FindEntry(index)
+          != SeededNumberDictionary::kNotFound) {
+        return true;
+      }
+      break;
+    }
+    case NON_STRICT_ARGUMENTS_ELEMENTS: {
+      FixedArray* parameter_map = FixedArray::cast(elements());
+      uint32_t length = parameter_map->length();
+      Object* probe =
+          (index < length - 2) ? parameter_map->get(index + 2) : NULL;
+      if (probe != NULL && !probe->IsTheHole()) return true;
+
+      // Not a mapped parameter, check the arguments.
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      kind = arguments->IsDictionary() ? DICTIONARY_ELEMENTS : FAST_ELEMENTS;
+      if (HasElementInElements(arguments, kind, index)) return true;
+      break;
+    }
   }
 
   // Handle [] on String objects.
@@ -8890,21 +8139,14 @@
 
   Object* pt = GetPrototype();
   if (pt->IsNull()) return false;
-  if (pt->IsJSProxy()) {
-    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
-    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
-        receiver, index) != ABSENT;
-  }
   return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
 }
 
 
 MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
                                                  Object* value,
-                                                 PropertyAttributes attributes,
                                                  StrictModeFlag strict_mode,
-                                                 bool check_prototype,
-                                                 SetPropertyMode set_mode) {
+                                                 bool check_prototype) {
   Isolate* isolate = GetIsolate();
   // Make sure that the top context does not change when doing
   // callbacks or interceptor calls.
@@ -8932,10 +8174,8 @@
   MaybeObject* raw_result =
       this_handle->SetElementWithoutInterceptor(index,
                                                 *value_handle,
-                                                attributes,
                                                 strict_mode,
-                                                check_prototype,
-                                                set_mode);
+                                                check_prototype);
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return raw_result;
 }
@@ -8973,11 +8213,11 @@
   }
 
   // __defineGetter__ callback
-  if (structure->IsAccessorPair()) {
-    Object* getter = AccessorPair::cast(structure)->getter();
-    if (getter->IsSpecFunction()) {
-      // TODO(rossberg): nicer would be to cast to some JSCallable here...
-      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
+  if (structure->IsFixedArray()) {
+    Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+    if (getter->IsJSFunction()) {
+      return Object::GetPropertyWithDefinedGetter(receiver,
+                                                  JSFunction::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -9030,11 +8270,10 @@
     return *value_handle;
   }
 
-  if (structure->IsAccessorPair()) {
-    Handle<Object> setter(AccessorPair::cast(structure)->setter());
-    if (setter->IsSpecFunction()) {
-      // TODO(rossberg): nicer would be to cast to some JSCallable here...
-      return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
+  if (structure->IsFixedArray()) {
+    Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
+    if (setter->IsJSFunction()) {
+     return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -9084,20 +8323,21 @@
                                       Object* value,
                                       StrictModeFlag strict_mode,
                                       bool check_prototype) {
-  ASSERT(HasFastTypeElements() ||
-         HasFastArgumentsElements());
+  ASSERT(HasFastElements() || HasFastArgumentsElements());
 
   FixedArray* backing_store = FixedArray::cast(elements());
   if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
     backing_store = FixedArray::cast(backing_store->get(1));
   } else {
+    Object* writable;
     MaybeObject* maybe = EnsureWritableFastElements();
-    if (!maybe->To(&backing_store)) return maybe;
+    if (!maybe->ToObject(&writable)) return maybe;
+    backing_store = FixedArray::cast(writable);
   }
-  uint32_t capacity = static_cast<uint32_t>(backing_store->length());
+  uint32_t length = static_cast<uint32_t>(backing_store->length());
 
   if (check_prototype &&
-      (index >= capacity || backing_store->get(index)->IsTheHole())) {
+      (index >= length || backing_store->get(index)->IsTheHole())) {
     bool found;
     MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
                                                                    value,
@@ -9106,88 +8346,46 @@
     if (found) return result;
   }
 
-  uint32_t new_capacity = capacity;
-  // Check if the length property of this object needs to be updated.
-  uint32_t array_length = 0;
-  bool must_update_array_length = false;
-  if (IsJSArray()) {
-    CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
-    if (index >= array_length) {
-      must_update_array_length = true;
-      array_length = index + 1;
-    }
-  }
-  // Check if the capacity of the backing store needs to be increased, or if
-  // a transition to slow elements is necessary.
-  if (index >= capacity) {
-    bool convert_to_slow = true;
-    if ((index - capacity) < kMaxGap) {
-      new_capacity = NewElementsCapacity(index + 1);
-      ASSERT(new_capacity > index);
-      if (!ShouldConvertToSlowElements(new_capacity)) {
-        convert_to_slow = false;
+  // Check whether there is extra space in fixed array.
+  if (index < length) {
+    backing_store->set(index, value);
+    if (IsJSArray()) {
+      // Update the length of the array if needed.
+      uint32_t array_length = 0;
+      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
+      if (index >= array_length) {
+        JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
       }
     }
-    if (convert_to_slow) {
-      MaybeObject* result = NormalizeElements();
-      if (result->IsFailure()) return result;
-      return SetDictionaryElement(index, value, NONE, strict_mode,
-                                  check_prototype);
-    }
-  }
-  // Convert to fast double elements if appropriate.
-  if (HasFastSmiOnlyElements() && !value->IsSmi() && value->IsNumber()) {
-    MaybeObject* maybe =
-        SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
-    if (maybe->IsFailure()) return maybe;
-    FixedDoubleArray::cast(elements())->set(index, value->Number());
     return value;
   }
-  // Change elements kind from SMI_ONLY to generic FAST if necessary.
-  if (HasFastSmiOnlyElements() && !value->IsSmi()) {
-    Map* new_map;
-    { MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
-                                                            FAST_ELEMENTS);
-      if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-    }
-    set_map(new_map);
-    if (FLAG_trace_elements_transitions) {
-      PrintElementsTransition(stdout, FAST_SMI_ONLY_ELEMENTS, elements(),
-                              FAST_ELEMENTS, elements());
+
+  // Allow gap in fast case.
+  if ((index - length) < kMaxGap) {
+    // Try allocating extra space.
+    int new_capacity = NewElementsCapacity(index + 1);
+    if (!ShouldConvertToSlowElements(new_capacity)) {
+      ASSERT(static_cast<uint32_t>(new_capacity) > index);
+      Object* new_elements;
+      MaybeObject* maybe =
+          SetFastElementsCapacityAndLength(new_capacity, index + 1);
+      if (!maybe->ToObject(&new_elements)) return maybe;
+      FixedArray::cast(new_elements)->set(index, value);
+      return value;
     }
   }
-  // Increase backing store capacity if that's been decided previously.
-  if (new_capacity != capacity) {
-    FixedArray* new_elements;
-    SetFastElementsCapacityMode set_capacity_mode =
-        value->IsSmi() && HasFastSmiOnlyElements()
-            ? kAllowSmiOnlyElements
-            : kDontAllowSmiOnlyElements;
-    { MaybeObject* maybe =
-          SetFastElementsCapacityAndLength(new_capacity,
-                                           array_length,
-                                           set_capacity_mode);
-      if (!maybe->To(&new_elements)) return maybe;
-    }
-    new_elements->set(index, value);
-    return value;
-  }
-  // Finally, set the new element and length.
-  ASSERT(elements()->IsFixedArray());
-  backing_store->set(index, value);
-  if (must_update_array_length) {
-    JSArray::cast(this)->set_length(Smi::FromInt(array_length));
-  }
-  return value;
+
+  // Otherwise default to slow case.
+  MaybeObject* result = NormalizeElements();
+  if (result->IsFailure()) return result;
+  return SetDictionaryElement(index, value, strict_mode, check_prototype);
 }
 
 
 MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
                                             Object* value,
-                                            PropertyAttributes attributes,
                                             StrictModeFlag strict_mode,
-                                            bool check_prototype,
-                                            SetPropertyMode set_mode) {
+                                            bool check_prototype) {
   ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
   Isolate* isolate = GetIsolate();
   Heap* heap = isolate->heap();
@@ -9207,40 +8405,20 @@
   if (entry != SeededNumberDictionary::kNotFound) {
     Object* element = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
-    if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
+    if (details.type() == CALLBACKS) {
       return SetElementWithCallback(element, index, value, this, strict_mode);
     } else {
       dictionary->UpdateMaxNumberKey(index);
-      // If a value has not been initialized we allow writing to it even if it
-      // is read-only (a declared const that has not been initialized).  If a
-      // value is being defined we skip attribute checks completely.
-      if (set_mode == DEFINE_PROPERTY) {
-        details = PropertyDetails(attributes, NORMAL, details.index());
-        dictionary->DetailsAtPut(entry, details);
-      } else if (details.IsReadOnly() && !element->IsTheHole()) {
-        if (strict_mode == kNonStrictMode) {
-          return isolate->heap()->undefined_value();
-        } else {
-          Handle<Object> holder(this);
-          Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
-          Handle<Object> args[2] = { number, holder };
-          Handle<Object> error =
-              isolate->factory()->NewTypeError("strict_read_only_property",
-                                               HandleVector(args, 2));
-          return isolate->Throw(*error);
-        }
+      // If put fails in strict mode, throw an exception.
+      if (!dictionary->ValueAtPut(entry, value) && strict_mode == kStrictMode) {
+        Handle<Object> holder(this);
+        Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+        Handle<Object> args[2] = { number, holder };
+        Handle<Object> error =
+            isolate->factory()->NewTypeError("strict_read_only_property",
+                                             HandleVector(args, 2));
+        return isolate->Throw(*error);
       }
-      // Elements of the arguments object in slow mode might be slow aliases.
-      if (is_arguments && element->IsAliasedArgumentsEntry()) {
-        AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
-        Context* context = Context::cast(elements->get(0));
-        int context_index = entry->aliased_context_slot();
-        ASSERT(!context->get(context_index)->IsTheHole());
-        context->set(context_index, value);
-        // For elements that are still writable we keep slow aliasing.
-        if (!details.IsReadOnly()) value = element;
-      }
-      dictionary->ValueAtPut(entry, value);
     }
   } else {
     // Index not already used. Look for an accessor in the prototype chain.
@@ -9267,9 +8445,8 @@
       }
     }
     FixedArrayBase* new_dictionary;
-    PropertyDetails details = PropertyDetails(attributes, NORMAL);
-    MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details);
-    if (!maybe->To(&new_dictionary)) return maybe;
+    MaybeObject* maybe = dictionary->AtNumberPut(index, value);
+    if (!maybe->To<FixedArrayBase>(&new_dictionary)) return maybe;
     if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
       if (is_arguments) {
         elements->set(1, new_dictionary);
@@ -9295,20 +8472,9 @@
     } else {
       new_length = dictionary->max_number_key() + 1;
     }
-    SetFastElementsCapacityMode set_capacity_mode = FLAG_smi_only_arrays
-        ? kAllowSmiOnlyElements
-        : kDontAllowSmiOnlyElements;
-    bool has_smi_only_elements = false;
-    bool should_convert_to_fast_double_elements =
-        ShouldConvertToFastDoubleElements(&has_smi_only_elements);
-    if (has_smi_only_elements) {
-      set_capacity_mode = kForceSmiOnlyElements;
-    }
-    MaybeObject* result = should_convert_to_fast_double_elements
+    MaybeObject* result = CanConvertToFastDoubleElements()
         ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
-        : SetFastElementsCapacityAndLength(new_length,
-                                           new_length,
-                                           set_capacity_mode);
+        : SetFastElementsCapacityAndLength(new_length, new_length);
     if (result->IsFailure()) return result;
 #ifdef DEBUG
     if (FLAG_trace_normalization) {
@@ -9328,14 +8494,13 @@
     bool check_prototype) {
   ASSERT(HasFastDoubleElements());
 
-  FixedArrayBase* base_elms = FixedArrayBase::cast(elements());
-  uint32_t elms_length = static_cast<uint32_t>(base_elms->length());
+  FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
+  uint32_t elms_length = static_cast<uint32_t>(elms->length());
 
   // If storing to an element that isn't in the array, pass the store request
   // up the prototype chain before storing in the receiver's elements.
   if (check_prototype &&
-      (index >= elms_length ||
-       FixedDoubleArray::cast(base_elms)->is_the_hole(index))) {
+      (index >= elms_length || elms->is_the_hole(index))) {
     bool found;
     MaybeObject* result = SetElementWithCallbackSetterInPrototypes(index,
                                                                    value,
@@ -9353,15 +8518,10 @@
     if (IsJSArray()) {
       CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
     }
-    MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
-        elms_length,
-        length,
-        kDontAllowSmiOnlyElements);
+    MaybeObject* maybe_obj =
+        SetFastElementsCapacityAndLength(elms_length, length);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    return SetFastElement(index,
-                          value,
-                          strict_mode,
-                          check_prototype);
+    return SetFastElement(index, value, strict_mode, check_prototype);
   }
 
   double double_value = value_is_smi
@@ -9370,7 +8530,6 @@
 
   // Check whether there is extra space in the fixed array.
   if (index < elms_length) {
-    FixedDoubleArray* elms = FixedDoubleArray::cast(elements());
     elms->set(index, double_value);
     if (IsJSArray()) {
       // Update the length of the array if needed.
@@ -9409,64 +8568,14 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   ASSERT(HasDictionaryElements());
-  return SetElement(index, value, NONE, strict_mode, check_prototype);
-}
-
-
-MaybeObject* JSReceiver::SetElement(uint32_t index,
-                                    Object* value,
-                                    PropertyAttributes attributes,
-                                    StrictModeFlag strict_mode,
-                                    bool check_proto) {
-  if (IsJSProxy()) {
-    return JSProxy::cast(this)->SetElementWithHandler(
-        index, value, strict_mode);
-  } else {
-    return JSObject::cast(this)->SetElement(
-        index, value, attributes, strict_mode, check_proto);
-  }
-}
-
-
-Handle<Object> JSObject::SetOwnElement(Handle<JSObject> object,
-                                       uint32_t index,
-                                       Handle<Object> value,
-                                       StrictModeFlag strict_mode) {
-  ASSERT(!object->HasExternalArrayElements());
-  CALL_HEAP_FUNCTION(
-      object->GetIsolate(),
-      object->SetElement(index, *value, NONE, strict_mode, false),
-      Object);
-}
-
-
-Handle<Object> JSObject::SetElement(Handle<JSObject> object,
-                                    uint32_t index,
-                                    Handle<Object> value,
-                                    PropertyAttributes attr,
-                                    StrictModeFlag strict_mode,
-                                    SetPropertyMode set_mode) {
-  if (object->HasExternalArrayElements()) {
-    if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
-      bool has_exception;
-      Handle<Object> number = Execution::ToNumber(value, &has_exception);
-      if (has_exception) return Handle<Object>();
-      value = number;
-    }
-  }
-  CALL_HEAP_FUNCTION(
-      object->GetIsolate(),
-      object->SetElement(index, *value, attr, strict_mode, true, set_mode),
-      Object);
+  return SetElement(index, value, strict_mode, check_prototype);
 }
 
 
 MaybeObject* JSObject::SetElement(uint32_t index,
                                   Object* value,
-                                  PropertyAttributes attributes,
                                   StrictModeFlag strict_mode,
-                                  bool check_prototype,
-                                  SetPropertyMode set_mode) {
+                                  bool check_prototype) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
     Heap* heap = GetHeap();
@@ -9484,62 +8593,31 @@
     ASSERT(proto->IsJSGlobalObject());
     return JSObject::cast(proto)->SetElement(index,
                                              value,
-                                             attributes,
                                              strict_mode,
-                                             check_prototype,
-                                             set_mode);
-  }
-
-  // Don't allow element properties to be redefined for external arrays.
-  if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
-    Isolate* isolate = GetHeap()->isolate();
-    Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
-    Handle<Object> args[] = { Handle<Object>(this), number };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
-    return isolate->Throw(*error);
-  }
-
-  // Normalize the elements to enable attributes on the property.
-  if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
-    SeededNumberDictionary* dictionary;
-    MaybeObject* maybe_object = NormalizeElements();
-    if (!maybe_object->To(&dictionary)) return maybe_object;
-    // Make sure that we never go back to fast case.
-    dictionary->set_requires_slow_elements();
+                                             check_prototype);
   }
 
   // Check for lookup interceptor
   if (HasIndexedInterceptor()) {
     return SetElementWithInterceptor(index,
                                      value,
-                                     attributes,
                                      strict_mode,
-                                     check_prototype,
-                                     set_mode);
+                                     check_prototype);
   }
 
   return SetElementWithoutInterceptor(index,
                                       value,
-                                      attributes,
                                       strict_mode,
-                                      check_prototype,
-                                      set_mode);
+                                      check_prototype);
 }
 
 
 MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
                                                     Object* value,
-                                                    PropertyAttributes attr,
                                                     StrictModeFlag strict_mode,
-                                                    bool check_prototype,
-                                                    SetPropertyMode set_mode) {
-  ASSERT(HasDictionaryElements() ||
-         HasDictionaryArgumentsElements() ||
-         (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
+                                                    bool check_prototype) {
   Isolate* isolate = GetIsolate();
   switch (GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       return SetFastElement(index, value, strict_mode, check_prototype);
     case FAST_DOUBLE_ELEMENTS:
@@ -9584,8 +8662,7 @@
       return array->SetValue(index, value);
     }
     case DICTIONARY_ELEMENTS:
-      return SetDictionaryElement(index, value, attr, strict_mode,
-                                  check_prototype, set_mode);
+      return SetDictionaryElement(index, value, strict_mode, check_prototype);
     case NON_STRICT_ARGUMENTS_ELEMENTS: {
       FixedArray* parameter_map = FixedArray::cast(elements());
       uint32_t length = parameter_map->length();
@@ -9596,22 +8673,16 @@
         int context_index = Smi::cast(probe)->value();
         ASSERT(!context->get(context_index)->IsTheHole());
         context->set(context_index, value);
-        // Redefining attributes of an aliased element destroys fast aliasing.
-        if (set_mode == SET_PROPERTY || attr == NONE) return value;
-        parameter_map->set_the_hole(index + 2);
-        // For elements that are still writable we re-establish slow aliasing.
-        if ((attr & READ_ONLY) == 0) {
-          MaybeObject* maybe_entry =
-              isolate->heap()->AllocateAliasedArgumentsEntry(context_index);
-          if (!maybe_entry->ToObject(&value)) return maybe_entry;
-        }
-      }
-      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-      if (arguments->IsDictionary()) {
-        return SetDictionaryElement(index, value, attr, strict_mode,
-                                    check_prototype, set_mode);
+        return value;
       } else {
-        return SetFastElement(index, value, strict_mode, check_prototype);
+        // Object is not mapped, defer to the arguments.
+        FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+        if (arguments->IsDictionary()) {
+          return SetDictionaryElement(index, value, strict_mode,
+                                      check_prototype);
+        } else {
+          return SetFastElement(index, value, strict_mode, check_prototype);
+        }
       }
     }
   }
@@ -9622,79 +8693,6 @@
 }
 
 
-Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
-                                                ElementsKind to_kind) {
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->TransitionElementsKind(to_kind),
-                     Object);
-}
-
-
-MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
-  ElementsKind from_kind = map()->elements_kind();
-
-  Isolate* isolate = GetIsolate();
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      (to_kind == FAST_ELEMENTS ||
-       elements() == isolate->heap()->empty_fixed_array())) {
-    MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
-    Map* new_map;
-    if (!maybe_new_map->To(&new_map)) return maybe_new_map;
-    set_map(new_map);
-    if (FLAG_trace_elements_transitions) {
-      FixedArrayBase* elms = FixedArrayBase::cast(elements());
-      PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
-    }
-    return this;
-  }
-
-  FixedArrayBase* elms = FixedArrayBase::cast(elements());
-  uint32_t capacity = static_cast<uint32_t>(elms->length());
-  uint32_t length = capacity;
-
-  if (IsJSArray()) {
-    Object* raw_length = JSArray::cast(this)->length();
-    if (raw_length->IsUndefined()) {
-      // If length is undefined, then JSArray is being initialized and has no
-      // elements, assume a length of zero.
-      length = 0;
-    } else {
-      CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
-    }
-  }
-
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      to_kind == FAST_DOUBLE_ELEMENTS) {
-    MaybeObject* maybe_result =
-        SetFastDoubleElementsCapacityAndLength(capacity, length);
-    if (maybe_result->IsFailure()) return maybe_result;
-    return this;
-  }
-
-  if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
-        capacity, length, kDontAllowSmiOnlyElements);
-    if (maybe_result->IsFailure()) return maybe_result;
-    return this;
-  }
-
-  // This method should never be called for any other case than the ones
-  // handled above.
-  UNREACHABLE();
-  return GetIsolate()->heap()->null_value();
-}
-
-
-// static
-bool Map::IsValidElementsTransition(ElementsKind from_kind,
-                                    ElementsKind to_kind) {
-  return
-      (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-          (to_kind == FAST_DOUBLE_ELEMENTS || to_kind == FAST_ELEMENTS)) ||
-      (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS);
-}
-
-
 MaybeObject* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index,
                                                    Object* value) {
   uint32_t old_len = 0;
@@ -9742,9 +8740,10 @@
 
   Heap* heap = holder_handle->GetHeap();
   ElementsAccessor* handler = holder_handle->GetElementsAccessor();
-  MaybeObject* raw_result = handler->Get(*this_handle,
+  MaybeObject* raw_result = handler->Get(holder_handle->elements(),
+                                         index,
                                          *holder_handle,
-                                         index);
+                                         *this_handle);
   if (raw_result != heap->the_hole_value()) return raw_result;
 
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
@@ -9782,7 +8781,6 @@
         break;
       }
       // Fall through.
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       backing_store = FixedArray::cast(backing_store_base);
       *capacity = backing_store->length();
@@ -9876,26 +8874,18 @@
 }
 
 
-bool JSObject::ShouldConvertToFastDoubleElements(
-    bool* has_smi_only_elements) {
-  *has_smi_only_elements = false;
+bool JSObject::CanConvertToFastDoubleElements() {
   if (FLAG_unbox_double_arrays) {
     ASSERT(HasDictionaryElements());
     SeededNumberDictionary* dictionary =
         SeededNumberDictionary::cast(elements());
-    bool found_double = false;
     for (int i = 0; i < dictionary->Capacity(); i++) {
       Object* key = dictionary->KeyAt(i);
       if (key->IsNumber()) {
-        Object* value = dictionary->ValueAt(i);
-        if (!value->IsNumber()) return false;
-        if (!value->IsSmi()) {
-          found_double = true;
-        }
+        if (!dictionary->ValueAt(i)->IsNumber()) return false;
       }
     }
-    *has_smi_only_elements = !found_double;
-    return found_double;
+    return true;
   } else {
     return false;
   }
@@ -9970,7 +8960,7 @@
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
-  LookupResult result(GetIsolate());
+  LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
@@ -9988,7 +8978,7 @@
     String* name,
     PropertyAttributes* attributes) {
   // Check local property in holder, ignore interceptor.
-  LookupResult result(GetIsolate());
+  LookupResult result;
   LocalLookupRealNamedProperty(name, &result);
   if (result.IsProperty()) {
     return GetProperty(receiver, &result, name, attributes);
@@ -10039,15 +9029,15 @@
 
 bool JSObject::HasRealNamedProperty(String* key) {
   // Check access rights if needed.
-  Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
       return false;
     }
   }
 
-  LookupResult result(isolate);
+  LookupResult result;
   LocalLookupRealNamedProperty(key, &result);
   return result.IsProperty() && (result.type() != INTERCEPTOR);
 }
@@ -10067,7 +9057,6 @@
   if (this->IsStringObjectWithCharacterAt(index)) return true;
 
   switch (GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>(
@@ -10116,24 +9105,39 @@
 
 bool JSObject::HasRealNamedCallbackProperty(String* key) {
   // Check access rights if needed.
-  Isolate* isolate = GetIsolate();
   if (IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
-      isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+    Heap* heap = GetHeap();
+    if (!heap->isolate()->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
+      heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
       return false;
     }
   }
 
-  LookupResult result(isolate);
+  LookupResult result;
   LocalLookupRealNamedProperty(key, &result);
-  return result.IsFound() && (result.type() == CALLBACKS);
+  return result.IsProperty() && (result.type() == CALLBACKS);
 }
 
 
 int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
-  return HasFastProperties() ?
-      map()->NumberOfDescribedProperties(filter) :
-      property_dictionary()->NumberOfElementsFilterAttributes(filter);
+  if (HasFastProperties()) {
+    DescriptorArray* descs = map()->instance_descriptors();
+    int result = 0;
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      PropertyDetails details(descs->GetDetails(i));
+      if (details.IsProperty() && (details.attributes() & filter) == 0) {
+        result++;
+      }
+    }
+    return result;
+  } else {
+    return property_dictionary()->NumberOfElementsFilterAttributes(filter);
+  }
+}
+
+
+int JSObject::NumberOfEnumProperties() {
+  return NumberOfLocalProperties(static_cast<PropertyAttributes>(DONT_ENUM));
 }
 
 
@@ -10143,8 +9147,8 @@
   set(j, temp);
   if (this != numbers) {
     temp = numbers->get(i);
-    numbers->set(i, Smi::cast(numbers->get(j)));
-    numbers->set(j, Smi::cast(temp));
+    numbers->set(i, numbers->get(j));
+    numbers->set(j, temp);
   }
 }
 
@@ -10254,7 +9258,7 @@
 // purpose of this function is to provide reflection information for the object
 // mirrors.
 void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
-  ASSERT(storage->length() >= (NumberOfLocalProperties() - index));
+  ASSERT(storage->length() >= (NumberOfLocalProperties(NONE) - index));
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
     for (int i = 0; i < descs->number_of_descriptors(); i++) {
@@ -10292,7 +9296,6 @@
                                   PropertyAttributes filter) {
   int counter = 0;
   switch (GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       int length = IsJSArray() ?
           Smi::cast(JSArray::cast(this)->length())->value() :
@@ -10459,87 +9462,70 @@
  public:
   StringSharedKey(String* source,
                   SharedFunctionInfo* shared,
-                  LanguageMode language_mode,
-                  int scope_position)
+                  StrictModeFlag strict_mode)
       : source_(source),
         shared_(shared),
-        language_mode_(language_mode),
-        scope_position_(scope_position) { }
+        strict_mode_(strict_mode) { }
 
   bool IsMatch(Object* other) {
     if (!other->IsFixedArray()) return false;
-    FixedArray* other_array = FixedArray::cast(other);
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
+    FixedArray* pair = FixedArray::cast(other);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
     if (shared != shared_) return false;
-    int language_unchecked = Smi::cast(other_array->get(2))->value();
-    ASSERT(language_unchecked == CLASSIC_MODE ||
-           language_unchecked == STRICT_MODE ||
-           language_unchecked == EXTENDED_MODE);
-    LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
-    if (language_mode != language_mode_) return false;
-    int scope_position = Smi::cast(other_array->get(3))->value();
-    if (scope_position != scope_position_) return false;
-    String* source = String::cast(other_array->get(1));
+    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+        Smi::cast(pair->get(2))->value());
+    if (strict_mode != strict_mode_) return false;
+    String* source = String::cast(pair->get(1));
     return source->Equals(source_);
   }
 
   static uint32_t StringSharedHashHelper(String* source,
                                          SharedFunctionInfo* shared,
-                                         LanguageMode language_mode,
-                                         int scope_position) {
+                                         StrictModeFlag strict_mode) {
     uint32_t hash = source->Hash();
     if (shared->HasSourceCode()) {
       // Instead of using the SharedFunctionInfo pointer in the hash
       // code computation, we use a combination of the hash of the
-      // script source code and the start position of the calling scope.
-      // We do this to ensure that the cache entries can survive garbage
+      // script source code and the start and end positions.  We do
+      // this to ensure that the cache entries can survive garbage
       // collection.
       Script* script = Script::cast(shared->script());
       hash ^= String::cast(script->source())->Hash();
-      if (language_mode == STRICT_MODE) hash ^= 0x8000;
-      if (language_mode == EXTENDED_MODE) hash ^= 0x0080;
-      hash += scope_position;
+      if (strict_mode == kStrictMode) hash ^= 0x8000;
+      hash += shared->start_position();
     }
     return hash;
   }
 
   uint32_t Hash() {
-    return StringSharedHashHelper(
-        source_, shared_, language_mode_, scope_position_);
+    return StringSharedHashHelper(source_, shared_, strict_mode_);
   }
 
   uint32_t HashForObject(Object* obj) {
-    FixedArray* other_array = FixedArray::cast(obj);
-    SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
-    String* source = String::cast(other_array->get(1));
-    int language_unchecked = Smi::cast(other_array->get(2))->value();
-    ASSERT(language_unchecked == CLASSIC_MODE ||
-           language_unchecked == STRICT_MODE ||
-           language_unchecked == EXTENDED_MODE);
-    LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
-    int scope_position = Smi::cast(other_array->get(3))->value();
-    return StringSharedHashHelper(
-        source, shared, language_mode, scope_position);
+    FixedArray* pair = FixedArray::cast(obj);
+    SharedFunctionInfo* shared = SharedFunctionInfo::cast(pair->get(0));
+    String* source = String::cast(pair->get(1));
+    StrictModeFlag strict_mode = static_cast<StrictModeFlag>(
+        Smi::cast(pair->get(2))->value());
+    return StringSharedHashHelper(source, shared, strict_mode);
   }
 
   MUST_USE_RESULT MaybeObject* AsObject() {
     Object* obj;
-    { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(4);
+    { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(3);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
-    FixedArray* other_array = FixedArray::cast(obj);
-    other_array->set(0, shared_);
-    other_array->set(1, source_);
-    other_array->set(2, Smi::FromInt(language_mode_));
-    other_array->set(3, Smi::FromInt(scope_position_));
-    return other_array;
+    FixedArray* pair = FixedArray::cast(obj);
+    pair->set(0, shared_);
+    pair->set(1, source_);
+    pair->set(2, Smi::FromInt(strict_mode_));
+    return pair;
   }
 
  private:
   String* source_;
   SharedFunctionInfo* shared_;
-  LanguageMode language_mode_;
-  int scope_position_;
+  StrictModeFlag strict_mode_;
 };
 
 
@@ -10597,7 +9583,7 @@
     if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
     unibrow::Utf8InputBuffer<> buffer(string_.start(),
                                       static_cast<unsigned>(string_.length()));
-    chars_ = buffer.Utf16Length();
+    chars_ = buffer.Length();
     hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
     uint32_t result = hash_field_ >> String::kHashShift;
     ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
@@ -10792,7 +9778,7 @@
     // Transform string to symbol if possible.
     Map* map = heap->SymbolMapForString(string_);
     if (map != NULL) {
-      string_->set_map_no_write_barrier(map);
+      string_->set_map(map);
       ASSERT(string_->IsSymbol());
       return string_;
     }
@@ -10853,7 +9839,7 @@
 
   // Optimized for symbol key. Knowledge of the key type allows:
   // 1. Move the check if the key is a symbol out of the loop.
-  // 2. Avoid comparing hash codes in symbol to symbol comparison.
+  // 2. Avoid comparing hash codes in symbol to symbol comparision.
   // 3. Detect a case when a dictionary key is not a symbol but the key is.
   //    In case of positive result the dictionary key may be replaced by
   //    the symbol with minimal performance penalty. It gives a chance to
@@ -10871,45 +9857,20 @@
     if (element->IsUndefined()) break;  // Empty entry.
     if (key == element) return entry;
     if (!element->IsSymbol() &&
-        !element->IsTheHole() &&
+        !element->IsNull() &&
         String::cast(element)->Equals(key)) {
       // Replace a non-symbol key by the equivalent symbol for faster further
       // lookups.
       set(index, key);
       return entry;
     }
-    ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key));
+    ASSERT(element->IsNull() || !String::cast(element)->Equals(key));
     entry = NextProbe(entry, count++, capacity);
   }
   return kNotFound;
 }
 
 
-bool StringDictionary::ContainsTransition(int entry) {
-  switch (DetailsAt(entry).type()) {
-    case MAP_TRANSITION:
-    case CONSTANT_TRANSITION:
-    case ELEMENTS_TRANSITION:
-      return true;
-    case CALLBACKS: {
-      Object* value = ValueAt(entry);
-      if (!value->IsAccessorPair()) return false;
-      AccessorPair* accessors = AccessorPair::cast(value);
-      return accessors->getter()->IsMap() || accessors->setter()->IsMap();
-    }
-    case NORMAL:
-    case FIELD:
-    case CONSTANT_FUNCTION:
-    case HANDLER:
-    case INTERCEPTOR:
-    case NULL_DESCRIPTOR:
-      return false;
-  }
-  UNREACHABLE();  // Keep the compiler happy.
-  return false;
-}
-
-
 template<typename Shape, typename Key>
 MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
   ASSERT(NumberOfElements() < new_table->Capacity());
@@ -11007,7 +9968,7 @@
   // EnsureCapacity will guarantee the hash table is never full.
   while (true) {
     Object* element = KeyAt(entry);
-    if (element->IsUndefined() || element->IsTheHole()) break;
+    if (element->IsUndefined() || element->IsNull()) break;
     entry = NextProbe(entry, count++, capacity);
   }
   return entry;
@@ -11022,9 +9983,7 @@
 
 template class HashTable<MapCacheShape, HashTableKey*>;
 
-template class HashTable<ObjectHashTableShape<1>, Object*>;
-
-template class HashTable<ObjectHashTableShape<2>, Object*>;
+template class HashTable<ObjectHashTableShape, JSObject*>;
 
 template class Dictionary<StringDictionaryShape, String*>;
 
@@ -11047,9 +10006,6 @@
 template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
     AtPut(uint32_t, Object*);
 
-template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>::
-    SlowReverseLookup(Object* value);
-
 template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>::
     SlowReverseLookup(Object* value);
 
@@ -11228,6 +10184,8 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
+  ASSERT(!HasExternalArrayElements());
+
   Heap* heap = GetHeap();
 
   if (HasDictionaryElements()) {
@@ -11241,8 +10199,7 @@
     // Convert to fast elements.
 
     Object* obj;
-    { MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
-                                                        FAST_ELEMENTS);
+    { MaybeObject* maybe_obj = map()->GetFastElementsMap();
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     Map* new_map = Map::cast(obj);
@@ -11258,16 +10215,13 @@
 
     set_map(new_map);
     set_elements(fast_elements);
-  } else if (HasExternalArrayElements()) {
-    // External arrays cannot have holes or undefined elements.
-    return Smi::FromInt(ExternalArray::cast(elements())->length());
   } else if (!HasFastDoubleElements()) {
     Object* obj;
     { MaybeObject* maybe_obj = EnsureWritableFastElements();
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
   }
-  ASSERT(HasFastTypeElements() || HasFastDoubleElements());
+  ASSERT(HasFastElements() || HasFastDoubleElements());
 
   // Collect holes at the end, undefined before that and the rest at the
   // start, and return the number of non-hole, non-undefined values.
@@ -11489,7 +10443,7 @@
 
 
 MaybeObject* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
-  float cast_value = static_cast<float>(OS::nan_value());
+  float cast_value = 0;
   Heap* heap = GetHeap();
   if (index < static_cast<uint32_t>(length())) {
     if (value->IsSmi()) {
@@ -11499,7 +10453,7 @@
       double double_value = HeapNumber::cast(value)->value();
       cast_value = static_cast<float>(double_value);
     } else {
-      // Clamp undefined to NaN (default). All other types have been
+      // Clamp undefined to zero (default). All other types have been
       // converted to a number type further up in the call chain.
       ASSERT(value->IsUndefined());
     }
@@ -11510,7 +10464,7 @@
 
 
 MaybeObject* ExternalDoubleArray::SetValue(uint32_t index, Object* value) {
-  double double_value = OS::nan_value();
+  double double_value = 0;
   Heap* heap = GetHeap();
   if (index < static_cast<uint32_t>(length())) {
     if (value->IsSmi()) {
@@ -11519,7 +10473,7 @@
     } else if (value->IsHeapNumber()) {
       double_value = HeapNumber::cast(value)->value();
     } else {
-      // Clamp undefined to NaN (default). All other types have been
+      // Clamp undefined to zero (default). All other types have been
       // converted to a number type further up in the call chain.
       ASSERT(value->IsUndefined());
     }
@@ -11536,16 +10490,6 @@
 }
 
 
-Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell(
-    Handle<GlobalObject> global,
-    Handle<String> name) {
-  Isolate* isolate = global->GetIsolate();
-  CALL_HEAP_FUNCTION(isolate,
-                     global->EnsurePropertyCell(*name),
-                     JSGlobalPropertyCell);
-}
-
-
 MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
   ASSERT(!HasFastProperties());
   int entry = property_dictionary()->FindEntry(name);
@@ -11747,12 +10691,8 @@
 
 Object* CompilationCacheTable::LookupEval(String* src,
                                           Context* context,
-                                          LanguageMode language_mode,
-                                          int scope_position) {
-  StringSharedKey key(src,
-                      context->closure()->shared(),
-                      language_mode,
-                      scope_position);
+                                          StrictModeFlag strict_mode) {
+  StringSharedKey key(src, context->closure()->shared(), strict_mode);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
@@ -11787,12 +10727,10 @@
 
 MaybeObject* CompilationCacheTable::PutEval(String* src,
                                             Context* context,
-                                            SharedFunctionInfo* value,
-                                            int scope_position) {
+                                            SharedFunctionInfo* value) {
   StringSharedKey key(src,
                       context->closure()->shared(),
-                      value->language_mode(),
-                      scope_position);
+                      value->strict_mode() ? kStrictMode : kNonStrictMode);
   Object* obj;
   { MaybeObject* maybe_obj = EnsureCapacity(1, &key);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -11836,13 +10774,13 @@
 
 
 void CompilationCacheTable::Remove(Object* value) {
-  Object* the_hole_value = GetHeap()->the_hole_value();
+  Object* null_value = GetHeap()->null_value();
   for (int entry = 0, size = Capacity(); entry < size; entry++) {
     int entry_index = EntryToIndex(entry);
     int value_index = entry_index + 1;
     if (get(value_index) == value) {
-      NoWriteBarrierSet(this, entry_index, the_hole_value);
-      NoWriteBarrierSet(this, value_index, the_hole_value);
+      fast_set(this, entry_index, null_value);
+      fast_set(this, value_index, null_value);
       ElementRemoved();
     }
   }
@@ -11995,6 +10933,30 @@
 }
 
 
+void SeededNumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
+  // Do nothing if the interval [from, to) is empty.
+  if (from >= to) return;
+
+  Heap* heap = GetHeap();
+  int removed_entries = 0;
+  Object* sentinel = heap->null_value();
+  int capacity = Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* key = KeyAt(i);
+    if (key->IsNumber()) {
+      uint32_t number = static_cast<uint32_t>(key->Number());
+      if (from <= number && number < to) {
+        SetEntry(i, sentinel, sentinel);
+        removed_entries++;
+      }
+    }
+  }
+
+  // Update the number of elements.
+  ElementsRemoved(removed_entries);
+}
+
+
 template<typename Shape, typename Key>
 Object* Dictionary<Shape, Key>::DeleteProperty(int entry,
                                                JSReceiver::DeleteMode mode) {
@@ -12004,7 +10966,7 @@
   if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
     return heap->false_value();
   }
-  SetEntry(entry, heap->the_hole_value(), heap->the_hole_value());
+  SetEntry(entry, heap->null_value(), heap->null_value());
   HashTable<Shape, Key>::ElementRemoved();
   return heap->true_value();
 }
@@ -12136,27 +11098,6 @@
 }
 
 
-Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
-    Handle<SeededNumberDictionary> dictionary,
-    uint32_t index,
-    Handle<Object> value,
-    PropertyDetails details) {
-  CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
-                     dictionary->Set(index, *value, details),
-                     SeededNumberDictionary);
-}
-
-
-Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
-    Handle<UnseededNumberDictionary> dictionary,
-    uint32_t index,
-    Handle<Object> value) {
-  CALL_HEAP_FUNCTION(dictionary->GetIsolate(),
-                     dictionary->Set(index, *value),
-                     UnseededNumberDictionary);
-}
-
-
 MaybeObject* SeededNumberDictionary::Set(uint32_t key,
                                          Object* value,
                                          PropertyDetails details) {
@@ -12338,15 +11279,14 @@
   }
 
   // Allocate the instance descriptor.
-  DescriptorArray* descriptors;
-  { MaybeObject* maybe_descriptors =
+  Object* descriptors_unchecked;
+  { MaybeObject* maybe_descriptors_unchecked =
         DescriptorArray::Allocate(instance_descriptor_length);
-    if (!maybe_descriptors->To<DescriptorArray>(&descriptors)) {
-      return maybe_descriptors;
+    if (!maybe_descriptors_unchecked->ToObject(&descriptors_unchecked)) {
+      return maybe_descriptors_unchecked;
     }
   }
-
-  DescriptorArray::WhitenessWitness witness(descriptors);
+  DescriptorArray* descriptors = DescriptorArray::cast(descriptors_unchecked);
 
   int inobject_props = obj->map()->inobject_properties();
   int number_of_allocated_fields =
@@ -12384,7 +11324,7 @@
                                      JSFunction::cast(value),
                                      details.attributes(),
                                      details.index());
-        descriptors->Set(next_descriptor++, &d, witness);
+        descriptors->Set(next_descriptor++, &d);
       } else if (type == NORMAL) {
         if (current_offset < inobject_props) {
           obj->InObjectPropertyAtPut(current_offset,
@@ -12398,18 +11338,13 @@
                           current_offset++,
                           details.attributes(),
                           details.index());
-        descriptors->Set(next_descriptor++, &d, witness);
+        descriptors->Set(next_descriptor++, &d);
       } else if (type == CALLBACKS) {
-        if (value->IsAccessorPair()) {
-          MaybeObject* maybe_copy =
-              AccessorPair::cast(value)->CopyWithoutTransitions();
-          if (!maybe_copy->To(&value)) return maybe_copy;
-        }
         CallbacksDescriptor d(String::cast(key),
                               value,
                               details.attributes(),
                               details.index());
-        descriptors->Set(next_descriptor++, &d, witness);
+        descriptors->Set(next_descriptor++, &d);
       } else {
         UNREACHABLE();
       }
@@ -12417,7 +11352,7 @@
   }
   ASSERT(current_offset == number_of_fields);
 
-  descriptors->Sort(witness);
+  descriptors->Sort();
   // Allocate new map.
   Object* new_map;
   { MaybeObject* maybe_new_map = obj->map()->CopyDropDescriptors();
@@ -12440,84 +11375,20 @@
 }
 
 
-bool ObjectHashSet::Contains(Object* key) {
-  ASSERT(IsKey(key));
-
+Object* ObjectHashTable::Lookup(JSObject* key) {
   // If the object does not have an identity hash, it was never used as a key.
-  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
-    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return false;
-  }
-  return (FindEntry(key) != kNotFound);
-}
-
-
-MaybeObject* ObjectHashSet::Add(Object* key) {
-  ASSERT(IsKey(key));
-
-  // Make sure the key object has an identity hash code.
-  int hash;
-  { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
-    if (maybe_hash->IsFailure()) return maybe_hash;
-    hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
-  }
-  int entry = FindEntry(key);
-
-  // Check whether key is already present.
-  if (entry != kNotFound) return this;
-
-  // Check whether the hash set should be extended and add entry.
-  Object* obj;
-  { MaybeObject* maybe_obj = EnsureCapacity(1, key);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  ObjectHashSet* table = ObjectHashSet::cast(obj);
-  entry = table->FindInsertionEntry(hash);
-  table->set(EntryToIndex(entry), key);
-  table->ElementAdded();
-  return table;
-}
-
-
-MaybeObject* ObjectHashSet::Remove(Object* key) {
-  ASSERT(IsKey(key));
-
-  // If the object does not have an identity hash, it was never used as a key.
-  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
-    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) return this;
-  }
-  int entry = FindEntry(key);
-
-  // Check whether key is actually present.
-  if (entry == kNotFound) return this;
-
-  // Remove entry and try to shrink this hash set.
-  set_the_hole(EntryToIndex(entry));
-  ElementRemoved();
-  return Shrink(key);
-}
-
-
-Object* ObjectHashTable::Lookup(Object* key) {
-  ASSERT(IsKey(key));
-
-  // If the object does not have an identity hash, it was never used as a key.
-  { MaybeObject* maybe_hash = key->GetHash(OMIT_CREATION);
-    if (maybe_hash->ToObjectUnchecked()->IsUndefined()) {
-      return GetHeap()->undefined_value();
-    }
-  }
+  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+  if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
   int entry = FindEntry(key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
   return get(EntryToIndex(entry) + 1);
 }
 
 
-MaybeObject* ObjectHashTable::Put(Object* key, Object* value) {
-  ASSERT(IsKey(key));
-
+MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
   // Make sure the key object has an identity hash code.
   int hash;
-  { MaybeObject* maybe_hash = key->GetHash(ALLOW_CREATION);
+  { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
     if (maybe_hash->IsFailure()) return maybe_hash;
     hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
   }
@@ -12547,16 +11418,16 @@
 }
 
 
-void ObjectHashTable::AddEntry(int entry, Object* key, Object* value) {
+void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
   set(EntryToIndex(entry), key);
   set(EntryToIndex(entry) + 1, value);
   ElementAdded();
 }
 
 
-void ObjectHashTable::RemoveEntry(int entry) {
-  set_the_hole(EntryToIndex(entry));
-  set_the_hole(EntryToIndex(entry) + 1);
+void ObjectHashTable::RemoveEntry(int entry, Heap* heap) {
+  set_null(heap, EntryToIndex(entry));
+  set_null(heap, EntryToIndex(entry) + 1);
   ElementRemoved();
 }
 
@@ -12811,136 +11682,7 @@
   // Multiple break points.
   return FixedArray::cast(break_point_objects())->length();
 }
-#endif  // ENABLE_DEBUGGER_SUPPORT
+#endif
 
 
-MaybeObject* JSDate::GetField(Object* object, Smi* index) {
-  return JSDate::cast(object)->DoGetField(
-      static_cast<FieldIndex>(index->value()));
-}
-
-
-Object* JSDate::DoGetField(FieldIndex index) {
-  ASSERT(index != kDateValue);
-
-  DateCache* date_cache = GetIsolate()->date_cache();
-
-  if (index < kFirstUncachedField) {
-    Object* stamp = cache_stamp();
-    if (stamp != date_cache->stamp() && stamp->IsSmi()) {
-      // Since the stamp is not NaN, the value is also not NaN.
-      int64_t local_time_ms =
-          date_cache->ToLocal(static_cast<int64_t>(value()->Number()));
-      SetLocalFields(local_time_ms, date_cache);
-    }
-    switch (index) {
-      case kYear: return year();
-      case kMonth: return month();
-      case kDay: return day();
-      case kWeekday: return weekday();
-      case kHour: return hour();
-      case kMinute: return min();
-      case kSecond: return sec();
-      default: UNREACHABLE();
-    }
-  }
-
-  if (index >= kFirstUTCField) {
-    return GetUTCField(index, value()->Number(), date_cache);
-  }
-
-  double time = value()->Number();
-  if (isnan(time)) return GetIsolate()->heap()->nan_value();
-
-  int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time));
-  int days = DateCache::DaysFromTime(local_time_ms);
-
-  if (index == kDays) return Smi::FromInt(days);
-
-  int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
-  if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000);
-  ASSERT(index == kTimeInDay);
-  return Smi::FromInt(time_in_day_ms);
-}
-
-
-Object* JSDate::GetUTCField(FieldIndex index,
-                            double value,
-                            DateCache* date_cache) {
-  ASSERT(index >= kFirstUTCField);
-
-  if (isnan(value)) return GetIsolate()->heap()->nan_value();
-
-  int64_t time_ms = static_cast<int64_t>(value);
-
-  if (index == kTimezoneOffset) {
-    return Smi::FromInt(date_cache->TimezoneOffset(time_ms));
-  }
-
-  int days = DateCache::DaysFromTime(time_ms);
-
-  if (index == kWeekdayUTC) return Smi::FromInt(date_cache->Weekday(days));
-
-  if (index <= kDayUTC) {
-    int year, month, day;
-    date_cache->YearMonthDayFromDays(days, &year, &month, &day);
-    if (index == kYearUTC) return Smi::FromInt(year);
-    if (index == kMonthUTC) return Smi::FromInt(month);
-    ASSERT(index == kDayUTC);
-    return Smi::FromInt(day);
-  }
-
-  int time_in_day_ms = DateCache::TimeInDay(time_ms, days);
-  switch (index) {
-    case kHourUTC: return Smi::FromInt(time_in_day_ms / (60 * 60 * 1000));
-    case kMinuteUTC: return Smi::FromInt((time_in_day_ms / (60 * 1000)) % 60);
-    case kSecondUTC: return Smi::FromInt((time_in_day_ms / 1000) % 60);
-    case kMillisecondUTC: return Smi::FromInt(time_in_day_ms % 1000);
-    case kDaysUTC: return Smi::FromInt(days);
-    case kTimeInDayUTC: return Smi::FromInt(time_in_day_ms);
-    default: UNREACHABLE();
-  }
-
-  UNREACHABLE();
-  return NULL;
-}
-
-
-void JSDate::SetValue(Object* value, bool is_value_nan) {
-  set_value(value);
-  if (is_value_nan) {
-    HeapNumber* nan = GetIsolate()->heap()->nan_value();
-    set_cache_stamp(nan, SKIP_WRITE_BARRIER);
-    set_year(nan, SKIP_WRITE_BARRIER);
-    set_month(nan, SKIP_WRITE_BARRIER);
-    set_day(nan, SKIP_WRITE_BARRIER);
-    set_hour(nan, SKIP_WRITE_BARRIER);
-    set_min(nan, SKIP_WRITE_BARRIER);
-    set_sec(nan, SKIP_WRITE_BARRIER);
-    set_weekday(nan, SKIP_WRITE_BARRIER);
-  } else {
-    set_cache_stamp(Smi::FromInt(DateCache::kInvalidStamp), SKIP_WRITE_BARRIER);
-  }
-}
-
-
-void JSDate::SetLocalFields(int64_t local_time_ms, DateCache* date_cache) {
-  int days = DateCache::DaysFromTime(local_time_ms);
-  int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
-  int year, month, day;
-  date_cache->YearMonthDayFromDays(days, &year, &month, &day);
-  int weekday = date_cache->Weekday(days);
-  int hour = time_in_day_ms / (60 * 60 * 1000);
-  int min = (time_in_day_ms / (60 * 1000)) % 60;
-  int sec = (time_in_day_ms / 1000) % 60;
-  set_cache_stamp(date_cache->stamp());
-  set_year(Smi::FromInt(year), SKIP_WRITE_BARRIER);
-  set_month(Smi::FromInt(month), SKIP_WRITE_BARRIER);
-  set_day(Smi::FromInt(day), SKIP_WRITE_BARRIER);
-  set_weekday(Smi::FromInt(weekday), SKIP_WRITE_BARRIER);
-  set_hour(Smi::FromInt(hour), SKIP_WRITE_BARRIER);
-  set_min(Smi::FromInt(min), SKIP_WRITE_BARRIER);
-  set_sec(Smi::FromInt(sec), SKIP_WRITE_BARRIER);
-}
-
 } }  // namespace v8::internal
diff --git a/src/objects.h b/src/objects.h
index a9cb8e0..1245ed0 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,6 @@
 #include "allocation.h"
 #include "builtins.h"
 #include "list.h"
-#include "property-details.h"
 #include "smart-array-pointer.h"
 #include "unicode-inl.h"
 #if V8_TARGET_ARCH_ARM
@@ -39,8 +38,6 @@
 #elif V8_TARGET_ARCH_MIPS
 #include "mips/constants-mips.h"
 #endif
-#include "v8checks.h"
-
 
 //
 // Most object types in the V8 JavaScript are described in this file.
@@ -54,8 +51,6 @@
 //       - JSReceiver  (suitable for property access)
 //         - JSObject
 //           - JSArray
-//           - JSSet
-//           - JSMap
 //           - JSWeakMap
 //           - JSRegExp
 //           - JSFunction
@@ -64,7 +59,6 @@
 //             - JSBuiltinsObject
 //           - JSGlobalProxy
 //           - JSValue
-//             - JSDate
 //           - JSMessageObject
 //         - JSProxy
 //           - JSFunctionProxy
@@ -80,7 +74,7 @@
 //             - MapCache
 //           - Context
 //           - JSFunctionResultCache
-//           - ScopeInfo
+//           - SerializedScopeInfo
 //         - FixedDoubleArray
 //         - ExternalArray
 //           - ExternalPixelArray
@@ -108,7 +102,6 @@
 //       - SharedFunctionInfo
 //       - Struct
 //         - AccessorInfo
-//         - AccessorPair
 //         - AccessCheckInfo
 //         - InterceptorInfo
 //         - CallHandlerInfo
@@ -127,17 +120,24 @@
 //  HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
 //  Failure:    [30 bit signed int] 11
 
+// Ecma-262 3rd 8.6.1
+enum PropertyAttributes {
+  NONE              = v8::None,
+  READ_ONLY         = v8::ReadOnly,
+  DONT_ENUM         = v8::DontEnum,
+  DONT_DELETE       = v8::DontDelete,
+  ABSENT            = 16  // Used in runtime to indicate a property is absent.
+  // ABSENT can never be stored in or returned from a descriptor's attributes
+  // bitfield.  It is only used as a return value meaning the attributes of
+  // a non-existent property.
+};
+
 namespace v8 {
 namespace internal {
 
 enum ElementsKind {
-  // The "fast" kind for elements that only contain SMI values. Must be first
-  // to make it possible to efficiently check maps for this kind.
-  FAST_SMI_ONLY_ELEMENTS,
-
-  // The "fast" kind for tagged values. Must be second to make it possible to
-  // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
-  // together at once.
+  // The "fast" kind for tagged values. Must be first to make it possible
+  // to efficiently check maps if they have fast elements.
   FAST_ELEMENTS,
 
   // The "fast" kind for unwrapped, non-tagged double values.
@@ -160,26 +160,101 @@
   // Derived constants from ElementsKind
   FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
   LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
-  FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
+  FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
   LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
 };
 
-enum CompareMapMode {
-  REQUIRE_EXACT_MAP,
-  ALLOW_ELEMENT_TRANSITION_MAPS
+static const int kElementsKindCount =
+    LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
+
+// PropertyDetails captures type and attributes for a property.
+// They are used both in property dictionaries and instance descriptors.
+class PropertyDetails BASE_EMBEDDED {
+ public:
+  PropertyDetails(PropertyAttributes attributes,
+                  PropertyType type,
+                  int index = 0) {
+    ASSERT(type != ELEMENTS_TRANSITION);
+    ASSERT(TypeField::is_valid(type));
+    ASSERT(AttributesField::is_valid(attributes));
+    ASSERT(StorageField::is_valid(index));
+
+    value_ = TypeField::encode(type)
+        | AttributesField::encode(attributes)
+        | StorageField::encode(index);
+
+    ASSERT(type == this->type());
+    ASSERT(attributes == this->attributes());
+    ASSERT(index == this->index());
+  }
+
+  PropertyDetails(PropertyAttributes attributes,
+                  PropertyType type,
+                  ElementsKind elements_kind) {
+    ASSERT(type == ELEMENTS_TRANSITION);
+    ASSERT(TypeField::is_valid(type));
+    ASSERT(AttributesField::is_valid(attributes));
+    ASSERT(StorageField::is_valid(static_cast<int>(elements_kind)));
+
+    value_ = TypeField::encode(type)
+        | AttributesField::encode(attributes)
+        | StorageField::encode(static_cast<int>(elements_kind));
+
+    ASSERT(type == this->type());
+    ASSERT(attributes == this->attributes());
+    ASSERT(elements_kind == this->elements_kind());
+  }
+
+  // Conversion for storing details as Object*.
+  explicit inline PropertyDetails(Smi* smi);
+  inline Smi* AsSmi();
+
+  PropertyType type() { return TypeField::decode(value_); }
+
+  bool IsTransition() {
+    PropertyType t = type();
+    ASSERT(t != INTERCEPTOR);
+    return t == MAP_TRANSITION || t == CONSTANT_TRANSITION ||
+        t == ELEMENTS_TRANSITION;
+  }
+
+  bool IsProperty() {
+    return type() < FIRST_PHANTOM_PROPERTY_TYPE;
+  }
+
+  PropertyAttributes attributes() { return AttributesField::decode(value_); }
+
+  int index() { return StorageField::decode(value_); }
+
+  ElementsKind elements_kind() {
+    ASSERT(type() == ELEMENTS_TRANSITION);
+    return static_cast<ElementsKind>(StorageField::decode(value_));
+  }
+
+  inline PropertyDetails AsDeleted();
+
+  static bool IsValidIndex(int index) {
+    return StorageField::is_valid(index);
+  }
+
+  bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
+  bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
+  bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
+  bool IsDeleted() { return DeletedField::decode(value_) != 0;}
+
+  // Bit fields in value_ (type, shift, size). Must be public so the
+  // constants can be embedded in generated code.
+  class TypeField:       public BitField<PropertyType,       0, 4> {};
+  class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
+  class DeletedField:    public BitField<uint32_t,           7, 1> {};
+  class StorageField:    public BitField<uint32_t,           8, 32-8> {};
+
+  static const int kInitialIndex = 1;
+
+ private:
+  uint32_t value_;
 };
 
-enum KeyedAccessGrowMode {
-  DO_NOT_ALLOW_JSARRAY_GROWTH,
-  ALLOW_JSARRAY_GROWTH
-};
-
-const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
-
-void PrintElementsKind(FILE* out, ElementsKind kind);
-
-inline bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
-                                                ElementsKind to_kind);
 
 // Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER.
 enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
@@ -201,15 +276,8 @@
 };
 
 
-// Indicates whether a get method should implicitly create the object looked up.
-enum CreationFlag {
-  ALLOW_CREATION,
-  OMIT_CREATION
-};
-
-
 // Instance size sentinel for objects of variable size.
-const int kVariableSizeSentinel = 0;
+static const int kVariableSizeSentinel = 0;
 
 
 // All Maps have a field instance_type containing a InstanceType.
@@ -223,7 +291,7 @@
 // encoding is considered TWO_BYTE.  It is not mentioned in the name.  ASCII
 // encoding is mentioned explicitly in the name.  Likewise, the default
 // representation is considered sequential.  It is not mentioned in the
-// name.  The other representations (e.g. CONS, EXTERNAL) are explicitly
+// name.  The other representations (eg, CONS, EXTERNAL) are explicitly
 // mentioned.  Finally, the string is either a SYMBOL_TYPE (if it is a
 // symbol) or a STRING_TYPE (if it is not a symbol).
 //
@@ -243,9 +311,6 @@
   V(EXTERNAL_SYMBOL_TYPE)                                                      \
   V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_SYMBOL_TYPE)                                                \
-  V(SHORT_EXTERNAL_SYMBOL_TYPE)                                                \
-  V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE)                                \
-  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE)                                          \
   V(STRING_TYPE)                                                               \
   V(ASCII_STRING_TYPE)                                                         \
   V(CONS_STRING_TYPE)                                                          \
@@ -254,9 +319,6 @@
   V(EXTERNAL_STRING_TYPE)                                                      \
   V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                      \
   V(EXTERNAL_ASCII_STRING_TYPE)                                                \
-  V(SHORT_EXTERNAL_STRING_TYPE)                                                \
-  V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE)                                \
-  V(SHORT_EXTERNAL_ASCII_STRING_TYPE)                                          \
   V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE)                                        \
                                                                                \
   V(MAP_TYPE)                                                                  \
@@ -267,7 +329,6 @@
   V(HEAP_NUMBER_TYPE)                                                          \
   V(FOREIGN_TYPE)                                                              \
   V(BYTE_ARRAY_TYPE)                                                           \
-  V(FREE_SPACE_TYPE)                                                           \
   /* Note: the order of these external array */                                \
   /* types is relied upon in */                                                \
   /* Object::IsExternalArray(). */                                             \
@@ -282,7 +343,6 @@
   V(FILLER_TYPE)                                                               \
                                                                                \
   V(ACCESSOR_INFO_TYPE)                                                        \
-  V(ACCESSOR_PAIR_TYPE)                                                        \
   V(ACCESS_CHECK_INFO_TYPE)                                                    \
   V(INTERCEPTOR_INFO_TYPE)                                                     \
   V(CALL_HANDLER_INFO_TYPE)                                                    \
@@ -293,8 +353,6 @@
   V(SCRIPT_TYPE)                                                               \
   V(CODE_CACHE_TYPE)                                                           \
   V(POLYMORPHIC_CODE_CACHE_TYPE)                                               \
-  V(TYPE_FEEDBACK_INFO_TYPE)                                                   \
-  V(ALIASED_ARGUMENTS_ENTRY_TYPE)                                              \
                                                                                \
   V(FIXED_ARRAY_TYPE)                                                          \
   V(FIXED_DOUBLE_ARRAY_TYPE)                                                   \
@@ -303,7 +361,6 @@
   V(JS_MESSAGE_OBJECT_TYPE)                                                    \
                                                                                \
   V(JS_VALUE_TYPE)                                                             \
-  V(JS_DATE_TYPE)                                                              \
   V(JS_OBJECT_TYPE)                                                            \
   V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                                          \
   V(JS_GLOBAL_OBJECT_TYPE)                                                     \
@@ -361,18 +418,6 @@
     ExternalAsciiString::kSize,                                                \
     external_ascii_symbol,                                                     \
     ExternalAsciiSymbol)                                                       \
-  V(SHORT_EXTERNAL_SYMBOL_TYPE,                                                \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_symbol,                                                     \
-    ShortExternalSymbol)                                                       \
-  V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE,                                \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_symbol_with_ascii_data,                                     \
-    ShortExternalSymbolWithAsciiData)                                          \
-  V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE,                                          \
-    ExternalAsciiString::kShortSize,                                           \
-    short_external_ascii_symbol,                                               \
-    ShortExternalAsciiSymbol)                                                  \
   V(STRING_TYPE,                                                               \
     kVariableSizeSentinel,                                                     \
     string,                                                                    \
@@ -408,19 +453,7 @@
   V(EXTERNAL_ASCII_STRING_TYPE,                                                \
     ExternalAsciiString::kSize,                                                \
     external_ascii_string,                                                     \
-    ExternalAsciiString)                                                       \
-  V(SHORT_EXTERNAL_STRING_TYPE,                                                \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_string,                                                     \
-    ShortExternalString)                                                       \
-  V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE,                                \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_string_with_ascii_data,                                     \
-    ShortExternalStringWithAsciiData)                                          \
-  V(SHORT_EXTERNAL_ASCII_STRING_TYPE,                                          \
-    ExternalAsciiString::kShortSize,                                           \
-    short_external_ascii_string,                                               \
-    ShortExternalAsciiString)
+    ExternalAsciiString)
 
 // A struct is a simple object a set of object-valued fields.  Including an
 // object type in this causes the compiler to generate most of the boilerplate
@@ -433,7 +466,6 @@
 // manually.
 #define STRUCT_LIST_ALL(V)                                                     \
   V(ACCESSOR_INFO, AccessorInfo, accessor_info)                                \
-  V(ACCESSOR_PAIR, AccessorPair, accessor_pair)                                \
   V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                     \
   V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info)                       \
   V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info)                     \
@@ -443,9 +475,7 @@
   V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info)                        \
   V(SCRIPT, Script, script)                                                    \
   V(CODE_CACHE, CodeCache, code_cache)                                         \
-  V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)      \
-  V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info)                  \
-  V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry)
+  V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define STRUCT_LIST_DEBUGGER(V)                                                \
@@ -503,15 +533,10 @@
 STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0);
 
 // If bit 7 is clear, then bit 3 indicates whether this two-byte
-// string actually contains ASCII data.
+// string actually contains ascii data.
 const uint32_t kAsciiDataHintMask = 0x08;
 const uint32_t kAsciiDataHintTag = 0x08;
 
-// If bit 7 is clear and string representation indicates an external string,
-// then bit 4 indicates whether the data pointer is cached.
-const uint32_t kShortExternalStringMask = 0x10;
-const uint32_t kShortExternalStringTag = 0x10;
-
 
 // A ConsString with an empty string as the right side is a candidate
 // for being shortcut by the garbage collector unless it is a
@@ -531,13 +556,6 @@
   ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
   CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
   CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
-  SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
-                               kExternalStringTag | kShortExternalStringTag,
-  SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
-      kTwoByteStringTag | kSymbolTag | kExternalStringTag |
-      kAsciiDataHintTag | kShortExternalStringTag,
-  SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag |
-                                     kSymbolTag | kShortExternalStringTag,
   EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
   EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
@@ -549,13 +567,6 @@
   CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
   SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
   SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
-  SHORT_EXTERNAL_STRING_TYPE =
-      kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
-  SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
-      kTwoByteStringTag | kExternalStringTag |
-      kAsciiDataHintTag | kShortExternalStringTag,
-  SHORT_EXTERNAL_ASCII_STRING_TYPE =
-      kAsciiStringTag | kExternalStringTag | kShortExternalStringTag,
   EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
   EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
       kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
@@ -574,7 +585,6 @@
   HEAP_NUMBER_TYPE,
   FOREIGN_TYPE,
   BYTE_ARRAY_TYPE,
-  FREE_SPACE_TYPE,
   EXTERNAL_BYTE_ARRAY_TYPE,  // FIRST_EXTERNAL_ARRAY_TYPE
   EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
   EXTERNAL_SHORT_ARRAY_TYPE,
@@ -589,7 +599,6 @@
 
   // Structs.
   ACCESSOR_INFO_TYPE,
-  ACCESSOR_PAIR_TYPE,
   ACCESS_CHECK_INFO_TYPE,
   INTERCEPTOR_INFO_TYPE,
   CALL_HANDLER_INFO_TYPE,
@@ -600,8 +609,6 @@
   SCRIPT_TYPE,
   CODE_CACHE_TYPE,
   POLYMORPHIC_CODE_CACHE_TYPE,
-  TYPE_FEEDBACK_INFO_TYPE,
-  ALIASED_ARGUMENTS_ENTRY_TYPE,
   // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
   // is defined. However as include/v8.h contain some of the instance type
   // constants always having them avoids them getting different numbers
@@ -614,33 +621,24 @@
 
   JS_MESSAGE_OBJECT_TYPE,
 
-  // All the following types are subtypes of JSReceiver, which corresponds to
-  // objects in the JS sense. The first and the last type in this range are
-  // the two forms of function. This organization enables using the same
-  // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
-  // NONCALLABLE_JS_OBJECT range.
-  JS_FUNCTION_PROXY_TYPE,  // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
-  JS_PROXY_TYPE,  // LAST_JS_PROXY_TYPE
-
-  JS_VALUE_TYPE,  // FIRST_JS_OBJECT_TYPE
-  JS_DATE_TYPE,
+  JS_VALUE_TYPE,  // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GLOBAL_OBJECT_TYPE,
   JS_BUILTINS_OBJECT_TYPE,
   JS_GLOBAL_PROXY_TYPE,
   JS_ARRAY_TYPE,
-  JS_SET_TYPE,
-  JS_MAP_TYPE,
+  JS_PROXY_TYPE,
   JS_WEAK_MAP_TYPE,
 
-  JS_REGEXP_TYPE,
+  JS_REGEXP_TYPE,  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
 
-  JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
+  JS_FUNCTION_TYPE,  // FIRST_CALLABLE_SPEC_OBJECT_TYPE
+  JS_FUNCTION_PROXY_TYPE,  // LAST_CALLABLE_SPEC_OBJECT_TYPE
 
   // Pseudo-types
   FIRST_TYPE = 0x0,
-  LAST_TYPE = JS_FUNCTION_TYPE,
+  LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
   INVALID_TYPE = FIRST_TYPE - 1,
   FIRST_NONSTRING_TYPE = MAP_TYPE,
   // Boundaries for testing for an external array.
@@ -653,27 +651,21 @@
   // are not continuous in this enum! The enum ranges instead reflect the
   // external class names, where proxies are treated as either ordinary objects,
   // or functions.
-  FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
+  FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
   LAST_JS_RECEIVER_TYPE = LAST_TYPE,
-  // Boundaries for testing the types represented as JSObject
-  FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
-  LAST_JS_OBJECT_TYPE = LAST_TYPE,
-  // Boundaries for testing the types represented as JSProxy
-  FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
-  LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
-  // Boundaries for testing whether the type is a JavaScript object.
-  FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
-  LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
   // Boundaries for testing the types for which typeof is "object".
-  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
+  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
   LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
-  // Note that the types for which typeof is "function" are not continuous.
-  // Define this so that we can put assertions on discrete checks.
-  NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
+  // Boundaries for testing the types for which typeof is "function".
+  FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
+  LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+  // Boundaries for testing whether the type is a JavaScript object.
+  FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+  LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
 };
 
-const int kExternalArrayTypeCount =
-    LAST_EXTERNAL_ARRAY_TYPE - FIRST_EXTERNAL_ARRAY_TYPE + 1;
+static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
+    FIRST_EXTERNAL_ARRAY_TYPE + 1;
 
 STATIC_CHECK(JS_OBJECT_TYPE == Internals::kJSObjectType);
 STATIC_CHECK(FIRST_NONSTRING_TYPE == Internals::kFirstNonstringType);
@@ -705,7 +697,6 @@
 class FixedArrayBase;
 class ObjectVisitor;
 class StringStream;
-class Failure;
 
 struct ValueInfo : public Malloced {
   ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -719,6 +710,7 @@
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
+class Failure;
 
 class MaybeObject BASE_EMBEDDED {
  public:
@@ -756,7 +748,7 @@
   // Prints this object with details.
   inline void Print() {
     Print(stdout);
-  }
+  };
   inline void PrintLn() {
     PrintLn(stdout);
   }
@@ -799,7 +791,6 @@
   V(ExternalDoubleArray)                       \
   V(ExternalPixelArray)                        \
   V(ByteArray)                                 \
-  V(FreeSpace)                                 \
   V(JSReceiver)                                \
   V(JSObject)                                  \
   V(JSContextExtensionObject)                  \
@@ -807,18 +798,16 @@
   V(DescriptorArray)                           \
   V(DeoptimizationInputData)                   \
   V(DeoptimizationOutputData)                  \
-  V(TypeFeedbackCells)                         \
   V(FixedArray)                                \
   V(FixedDoubleArray)                          \
   V(Context)                                   \
   V(GlobalContext)                             \
-  V(ScopeInfo)                                 \
+  V(SerializedScopeInfo)                       \
   V(JSFunction)                                \
   V(Code)                                      \
   V(Oddball)                                   \
   V(SharedFunctionInfo)                        \
   V(JSValue)                                   \
-  V(JSDate)                                    \
   V(JSMessageObject)                           \
   V(StringWrapper)                             \
   V(Foreign)                                   \
@@ -826,8 +815,6 @@
   V(JSArray)                                   \
   V(JSProxy)                                   \
   V(JSFunctionProxy)                           \
-  V(JSSet)                                     \
-  V(JSMap)                                     \
   V(JSWeakMap)                                 \
   V(JSRegExp)                                  \
   V(HashTable)                                 \
@@ -848,9 +835,6 @@
   V(AccessCheckNeeded)                         \
   V(JSGlobalPropertyCell)                      \
 
-
-class JSReceiver;
-
 // Object is the abstract superclass for all classes in the
 // object hierarchy.
 // Object does not use any virtual functions to avoid the
@@ -860,15 +844,11 @@
 class Object : public MaybeObject {
  public:
   // Type testing.
-  bool IsObject() { return true; }
-
 #define IS_TYPE_FUNCTION_DECL(type_)  inline bool Is##type_();
   OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
-  inline bool IsFixedArrayBase();
-
   // Returns true if this object is an instance of the specified
   // function template.
   inline bool IsInstanceOf(FunctionTemplateInfo* type);
@@ -879,7 +859,6 @@
 #undef DECLARE_STRUCT_PREDICATE
 
   INLINE(bool IsSpecObject());
-  INLINE(bool IsSpecFunction());
 
   // Oddball testing.
   INLINE(bool IsUndefined());
@@ -888,14 +867,9 @@
   INLINE(bool IsTrue());
   INLINE(bool IsFalse());
   inline bool IsArgumentsMarker();
-  inline bool NonFailureIsHeapObject();
-
-  // Filler objects (fillers and free space objects).
-  inline bool IsFiller();
 
   // Extract the number.
   inline double Number();
-  inline bool IsNaN();
 
   // Returns true if the object is of the correct type to be used as a
   // implementation of a JSObject's elements.
@@ -925,41 +899,28 @@
       Object* receiver,
       String* key,
       PropertyAttributes* attributes);
-
-  static Handle<Object> GetProperty(Handle<Object> object,
-                                    Handle<Object> receiver,
-                                    LookupResult* result,
-                                    Handle<String> key,
-                                    PropertyAttributes* attributes);
-
   MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver,
                                            LookupResult* result,
                                            String* key,
                                            PropertyAttributes* attributes);
-
+  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
+                                                       Object* structure,
+                                                       String* name,
+                                                       Object* holder);
+  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
+                                                      String* name,
+                                                      Object* handler);
   MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
-                                                            JSReceiver* getter);
+                                                            JSFunction* getter);
 
-  static Handle<Object> GetElement(Handle<Object> object, uint32_t index);
-  MUST_USE_RESULT inline MaybeObject* GetElement(uint32_t index);
+  inline MaybeObject* GetElement(uint32_t index);
   // For use when we know that no exception can be thrown.
   inline Object* GetElementNoExceptionThrown(uint32_t index);
-  MUST_USE_RESULT MaybeObject* GetElementWithReceiver(Object* receiver,
-                                                      uint32_t index);
+  MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
 
   // Return the object's prototype (might be Heap::null_value()).
   Object* GetPrototype();
 
-  // Returns the permanent hash code associated with this object depending on
-  // the actual object type.  Might return a failure in case no hash was
-  // created yet or GC was caused by creation.
-  MUST_USE_RESULT MaybeObject* GetHash(CreationFlag flag);
-
-  // Checks whether this object has the same value as the given one.  This
-  // function is implemented according to ES5, section 9.12 and can be used
-  // to implement the Harmony "egal" function.
-  bool SameValue(Object* other);
-
   // Tries to convert an object to an array index.  Returns true and sets
   // the output parameter if it succeeds.
   inline bool ToArrayIndex(uint32_t* index);
@@ -1025,8 +986,7 @@
   void SmiVerify();
 #endif
 
-  static const int kMinValue =
-      (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
+  static const int kMinValue = (-1 << (kSmiValueSize - 1));
   static const int kMaxValue = -(kMinValue + 1);
 
  private:
@@ -1107,7 +1067,7 @@
 
 
 // Heap objects typically have a map pointer in their first word.  However,
-// during GC other data (e.g. mark bits, forwarding addresses) is sometimes
+// during GC other data (eg, mark bits, forwarding addresses) is sometimes
 // encoded in the first word.  The class MapWord is an abstraction of the
 // value in a heap object's first word.
 class MapWord BASE_EMBEDDED {
@@ -1126,7 +1086,7 @@
 
   // True if this map word is a forwarding address for a scavenge
   // collection.  Only valid during a scavenge collection (specifically,
-  // when all map words are heap object pointers, i.e. not during a full GC).
+  // when all map words are heap object pointers, ie. not during a full GC).
   inline bool IsForwardingAddress();
 
   // Create a map word from a forwarding address.
@@ -1135,13 +1095,101 @@
   // View this map word as a forwarding address.
   inline HeapObject* ToForwardingAddress();
 
-  static inline MapWord FromRawValue(uintptr_t value) {
-    return MapWord(value);
-  }
+  // Marking phase of full collection: the map word of live objects is
+  // marked, and may be marked as overflowed (eg, the object is live, its
+  // children have not been visited, and it does not fit in the marking
+  // stack).
 
-  inline uintptr_t ToRawValue() {
-    return value_;
-  }
+  // True if this map word's mark bit is set.
+  inline bool IsMarked();
+
+  // Return this map word but with its mark bit set.
+  inline void SetMark();
+
+  // Return this map word but with its mark bit cleared.
+  inline void ClearMark();
+
+  // True if this map word's overflow bit is set.
+  inline bool IsOverflowed();
+
+  // Return this map word but with its overflow bit set.
+  inline void SetOverflow();
+
+  // Return this map word but with its overflow bit cleared.
+  inline void ClearOverflow();
+
+
+  // Compacting phase of a full compacting collection: the map word of live
+  // objects contains an encoding of the original map address along with the
+  // forwarding address (represented as an offset from the first live object
+  // in the same page as the (old) object address).
+
+  // Create a map word from a map address and a forwarding address offset.
+  static inline MapWord EncodeAddress(Address map_address, int offset);
+
+  // Return the map address encoded in this map word.
+  inline Address DecodeMapAddress(MapSpace* map_space);
+
+  // Return the forwarding offset encoded in this map word.
+  inline int DecodeOffset();
+
+
+  // During serialization: the map word is used to hold an encoded
+  // address, and possibly a mark bit (set and cleared with SetMark
+  // and ClearMark).
+
+  // Create a map word from an encoded address.
+  static inline MapWord FromEncodedAddress(Address address);
+
+  inline Address ToEncodedAddress();
+
+  // Bits used by the marking phase of the garbage collector.
+  //
+  // The first word of a heap object is normally a map pointer. The last two
+  // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
+  // mark an object as live and/or overflowed:
+  //   last bit = 0, marked as alive
+  //   second bit = 1, overflowed
+  // An object is only marked as overflowed when it is marked as live while
+  // the marking stack is overflowed.
+  static const int kMarkingBit = 0;  // marking bit
+  static const int kMarkingMask = (1 << kMarkingBit);  // marking mask
+  static const int kOverflowBit = 1;  // overflow bit
+  static const int kOverflowMask = (1 << kOverflowBit);  // overflow mask
+
+  // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
+  // used.
+  // +-----------------+------------------+-----------------+
+  // |forwarding offset|page offset of map|page index of map|
+  // +-----------------+------------------+-----------------+
+  //          ^                 ^                  ^
+  //          |                 |                  |
+  //          |                 |          kMapPageIndexBits
+  //          |         kMapPageOffsetBits
+  // kForwardingOffsetBits
+  static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
+  static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
+#ifdef V8_HOST_ARCH_64_BIT
+  static const int kMapPageIndexBits = 16;
+#else
+  // Use all the 32-bits to encode on a 32-bit platform.
+  static const int kMapPageIndexBits =
+      32 - (kMapPageOffsetBits + kForwardingOffsetBits);
+#endif
+
+  static const int kMapPageIndexShift = 0;
+  static const int kMapPageOffsetShift =
+      kMapPageIndexShift + kMapPageIndexBits;
+  static const int kForwardingOffsetShift =
+      kMapPageOffsetShift + kMapPageOffsetBits;
+
+  // Bit masks covering the different parts the encoding.
+  static const uintptr_t kMapPageIndexMask =
+      (1 << kMapPageOffsetShift) - 1;
+  static const uintptr_t kMapPageOffsetMask =
+      ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
+  static const uintptr_t kForwardingOffsetMask =
+      ~(kMapPageIndexMask | kMapPageOffsetMask);
 
  private:
   // HeapObject calls the private constructor and directly reads the value.
@@ -1161,10 +1209,6 @@
   // information.
   inline Map* map();
   inline void set_map(Map* value);
-  // The no-write-barrier version.  This is OK if the object is white and in
-  // new space, or if the value is an immortal immutable object, like the maps
-  // of primitive (non-JS) objects like strings, heap numbers etc.
-  inline void set_map_no_write_barrier(Map* value);
 
   // During garbage collection, the map word of a heap object does not
   // necessarily contain a map pointer.
@@ -1172,8 +1216,8 @@
   inline void set_map_word(MapWord map_word);
 
   // The Heap the object was allocated in. Used also to access Isolate.
+  // This method can not be used during GC, it ASSERTs this.
   inline Heap* GetHeap();
-
   // Convenience method to get current isolate. This method can be
   // accessed only when its result is the same as
   // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
@@ -1202,6 +1246,31 @@
   // GC internal.
   inline int SizeFromMap(Map* map);
 
+  // Support for the marking heap objects during the marking phase of GC.
+  // True if the object is marked live.
+  inline bool IsMarked();
+
+  // Mutate this object's map pointer to indicate that the object is live.
+  inline void SetMark();
+
+  // Mutate this object's map pointer to remove the indication that the
+  // object is live (ie, partially restore the map pointer).
+  inline void ClearMark();
+
+  // True if this object is marked as overflowed.  Overflowed objects have
+  // been reached and marked during marking of the heap, but their children
+  // have not necessarily been marked and they have not been pushed on the
+  // marking stack.
+  inline bool IsOverflowed();
+
+  // Mutate this object's map pointer to indicate that the object is
+  // overflowed.
+  inline void SetOverflow();
+
+  // Mutate this object's map pointer to remove the indication that the
+  // object is overflowed (ie, partially restore the map pointer).
+  inline void ClearOverflow();
+
   // Returns the field at offset in obj, as a read/write Object* reference.
   // Does no checking, and is safe to use during GC, while maps are invalid.
   // Does not invoke write barrier, so should only be assigned to
@@ -1225,14 +1294,18 @@
     HeapObjectPrint(stdout);
   }
   void HeapObjectPrint(FILE* out);
-  void PrintHeader(FILE* out, const char* id);
 #endif
-
 #ifdef DEBUG
   void HeapObjectVerify();
   inline void VerifyObjectField(int offset);
   inline void VerifySmiField(int offset);
+#endif
 
+#ifdef OBJECT_PRINT
+  void PrintHeader(FILE* out, const char* id);
+#endif
+
+#ifdef DEBUG
   // Verify a pointer is a valid HeapObject pointer that points to object
   // areas in the heap.
   static void VerifyHeapPointer(Object* p);
@@ -1352,30 +1425,6 @@
 };
 
 
-enum EnsureElementsMode {
-  DONT_ALLOW_DOUBLE_ELEMENTS,
-  ALLOW_COPIED_DOUBLE_ELEMENTS,
-  ALLOW_CONVERTED_DOUBLE_ELEMENTS
-};
-
-
-// Indicates whether a property should be set or (re)defined.  Setting of a
-// property causes attributes to remain unchanged, writability to be checked
-// and callbacks to be called.  Defining of a property causes attributes to
-// be updated and callbacks to be overridden.
-enum SetPropertyMode {
-  SET_PROPERTY,
-  DEFINE_PROPERTY
-};
-
-
-// Indicator for one component of an AccessorPair.
-enum AccessorComponent {
-  ACCESSOR_GETTER,
-  ACCESSOR_SETTER
-};
-
-
 // JSReceiver includes types on which properties can be defined, i.e.,
 // JSObject and JSProxy.
 class JSReceiver: public HeapObject {
@@ -1389,11 +1438,6 @@
   // Casting.
   static inline JSReceiver* cast(Object* obj);
 
-  static Handle<Object> SetProperty(Handle<JSReceiver> object,
-                                    Handle<String> key,
-                                    Handle<Object> value,
-                                    PropertyAttributes attributes,
-                                    StrictModeFlag strict_mode);
   // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetProperty(String* key,
                                            Object* value,
@@ -1404,22 +1448,8 @@
                                            Object* value,
                                            PropertyAttributes attributes,
                                            StrictModeFlag strict_mode);
-  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
-                                                            Object* value);
 
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
-  MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
-
-  // Set the index'th array element.
-  // Can cause GC, or return failure if GC is required.
-  MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
-                                          Object* value,
-                                          PropertyAttributes attributes,
-                                          StrictModeFlag strict_mode,
-                                          bool check_prototype);
-
-  // Tests for the fast common case for property enumeration.
-  bool IsSimpleEnum();
 
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
@@ -1436,7 +1466,6 @@
   // Can cause a GC.
   inline bool HasProperty(String* name);
   inline bool HasLocalProperty(String* name);
-  inline bool HasElement(uint32_t index);
 
   // Return the object's prototype (might be Heap::null_value()).
   inline Object* GetPrototype();
@@ -1445,18 +1474,11 @@
   MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
                                             bool skip_hidden_prototypes);
 
-  // Retrieves a permanent object identity hash code. The undefined value might
-  // be returned in case no hash was created yet and OMIT_CREATION was used.
-  inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-
   // Lookup a property.  If found, the result is valid and has
   // detailed information.
   void LocalLookup(String* name, LookupResult* result);
   void Lookup(String* name, LookupResult* result);
 
- protected:
-  Smi* GenerateIdentityHash();
-
  private:
   PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
                                           LookupResult* result,
@@ -1503,14 +1525,8 @@
   MUST_USE_RESULT inline MaybeObject* ResetElements();
   inline ElementsKind GetElementsKind();
   inline ElementsAccessor* GetElementsAccessor();
-  inline bool HasFastSmiOnlyElements();
   inline bool HasFastElements();
-  // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
-  // elements.  TODO(danno): Rename HasFastTypeElements to HasFastElements() and
-  // HasFastElements to HasFastObjectElements.
-  inline bool HasFastTypeElements();
   inline bool HasFastDoubleElements();
-  inline bool HasNonStrictArgumentsElements();
   inline bool HasDictionaryElements();
   inline bool HasExternalPixelElements();
   inline bool HasExternalArrayElements();
@@ -1524,13 +1540,9 @@
   inline bool HasExternalDoubleElements();
   bool HasFastArgumentsElements();
   bool HasDictionaryArgumentsElements();
+  inline bool AllowsSetElementsLength();
   inline SeededNumberDictionary* element_dictionary();  // Gets slow elements.
 
-  inline void set_map_and_elements(
-      Map* map,
-      FixedArrayBase* value,
-      WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
   // Requires: HasFastElements().
   MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements();
 
@@ -1542,11 +1554,6 @@
   // a dictionary, and it will stay a dictionary.
   MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
 
-  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
-                                                       Object* structure,
-                                                       String* name);
-
-  // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
                                            String* key,
                                            Object* value,
@@ -1564,6 +1571,8 @@
       Object* value,
       JSObject* holder,
       StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
+                                                            Object* value);
   MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
       String* name,
       Object* value,
@@ -1574,14 +1583,6 @@
       Object* value,
       PropertyAttributes attributes,
       StrictModeFlag strict_mode);
-
-  static Handle<Object> SetLocalPropertyIgnoreAttributes(
-      Handle<JSObject> object,
-      Handle<String> key,
-      Handle<Object> value,
-      PropertyAttributes attributes);
-
-  // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
       String* key,
       Object* value,
@@ -1597,11 +1598,6 @@
 
   // Sets the property value in a normalized object given (key, value, details).
   // Handles the special representation of JS global objects.
-  static Handle<Object> SetNormalizedProperty(Handle<JSObject> object,
-                                              Handle<String> key,
-                                              Handle<Object> value,
-                                              PropertyDetails details);
-
   MUST_USE_RESULT MaybeObject* SetNormalizedProperty(String* name,
                                                      Object* value,
                                                      PropertyDetails details);
@@ -1627,37 +1623,31 @@
       String* name,
       bool continue_search);
 
-  static void DefineAccessor(Handle<JSObject> object,
-                             Handle<String> name,
-                             Handle<Object> getter,
-                             Handle<Object> setter,
-                             PropertyAttributes attributes);
   MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
-                                              Object* getter,
-                                              Object* setter,
+                                              bool is_getter,
+                                              Object* fun,
                                               PropertyAttributes attributes);
-  Object* LookupAccessor(String* name, AccessorComponent component);
+  Object* LookupAccessor(String* name, bool is_getter);
 
   MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info);
 
   // Used from Object::GetProperty().
-  MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck(
+  MaybeObject* GetPropertyWithFailedAccessCheck(
       Object* receiver,
       LookupResult* result,
       String* name,
       PropertyAttributes* attributes);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor(
+  MaybeObject* GetPropertyWithInterceptor(
       JSReceiver* receiver,
       String* name,
       PropertyAttributes* attributes);
-  MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor(
+  MaybeObject* GetPropertyPostInterceptor(
       JSReceiver* receiver,
       String* name,
       PropertyAttributes* attributes);
-  MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor(
-      JSReceiver* receiver,
-      String* name,
-      PropertyAttributes* attributes);
+  MaybeObject* GetLocalPropertyPostInterceptor(JSReceiver* receiver,
+                                               String* name,
+                                               PropertyAttributes* attributes);
 
   // Returns true if this is an instance of an api function and has
   // been modified since it was created.  May give false positives.
@@ -1670,58 +1660,43 @@
   // Accessors for hidden properties object.
   //
   // Hidden properties are not local properties of the object itself.
-  // Instead they are stored in an auxiliary structure kept as a local
+  // Instead they are stored on an auxiliary JSObject stored as a local
   // property with a special name Heap::hidden_symbol(). But if the
   // receiver is a JSGlobalProxy then the auxiliary object is a property
-  // of its prototype, and if it's a detached proxy, then you can't have
-  // hidden properties.
+  // of its prototype.
+  //
+  // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
+  // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
+  // holder.
+  //
+  // These accessors do not touch interceptors or accessors.
+  inline bool HasHiddenPropertiesObject();
+  inline Object* GetHiddenPropertiesObject();
+  MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
+      Object* hidden_obj);
 
-  // Sets a hidden property on this object. Returns this object if successful,
-  // undefined if called on a detached proxy.
-  static Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
-                                          Handle<String> key,
-                                          Handle<Object> value);
-  // Returns a failure if a GC is required.
-  MUST_USE_RESULT MaybeObject* SetHiddenProperty(String* key, Object* value);
-  // Gets the value of a hidden property with the given key. Returns undefined
-  // if the property doesn't exist (or if called on a detached proxy),
-  // otherwise returns the value set for the key.
-  Object* GetHiddenProperty(String* key);
-  // Deletes a hidden property. Deleting a non-existing property is
-  // considered successful.
-  void DeleteHiddenProperty(String* key);
-  // Returns true if the object has a property with the hidden symbol as name.
-  bool HasHiddenProperties();
+  // Indicates whether the hidden properties object should be created.
+  enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
 
-  static int GetIdentityHash(Handle<JSObject> obj);
-  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
-  MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
+  // Retrieves the hidden properties object.
+  //
+  // The undefined value might be returned in case no hidden properties object
+  // is present and creation was omitted.
+  inline bool HasHiddenProperties();
+  MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
 
-  static Handle<Object> DeleteProperty(Handle<JSObject> obj,
-                                       Handle<String> name);
+  // Retrieves a permanent object identity hash code.
+  //
+  // The identity hash is stored as a hidden property. The undefined value might
+  // be returned in case no hidden properties object is present and creation was
+  // omitted.
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
+
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
-
-  static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
   MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
 
-  inline void ValidateSmiOnlyElements();
-
-  // Makes sure that this object can contain HeapObject as elements.
-  MUST_USE_RESULT inline MaybeObject* EnsureCanContainHeapObjectElements();
-
-  // Makes sure that this object can contain the specified elements.
-  MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
-      Object** elements,
-      uint32_t count,
-      EnsureElementsMode mode);
-  MUST_USE_RESULT inline MaybeObject* EnsureCanContainElements(
-      FixedArrayBase* elements,
-      EnsureElementsMode mode);
-  MUST_USE_RESULT MaybeObject* EnsureCanContainElements(
-      Arguments* arguments,
-      uint32_t first_arg,
-      uint32_t arg_count,
-      EnsureElementsMode mode);
+  // Tests for the fast common case for property enumeration.
+  bool IsSimpleEnum();
 
   // Do we want to keep the elements in fast case when increasing the
   // capacity?
@@ -1732,11 +1707,11 @@
   // elements.
   bool ShouldConvertToFastElements();
   // Returns true if the elements of JSObject contains only values that can be
-  // represented in a FixedDoubleArray and has at least one value that can only
-  // be represented as a double and not a Smi.
-  bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
+  // represented in a FixedDoubleArray.
+  bool CanConvertToFastDoubleElements();
 
   // Tells whether the index'th element is present.
+  inline bool HasElement(uint32_t index);
   bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
 
   // Computes the new capacity when expanding the elements of a JSObject.
@@ -1766,19 +1741,16 @@
   LocalElementType HasLocalElement(uint32_t index);
 
   bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
+  bool HasElementPostInterceptor(JSReceiver* receiver, uint32_t index);
 
   MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
                                               Object* value,
                                               StrictModeFlag strict_mode,
                                               bool check_prototype);
-
-  MUST_USE_RESULT MaybeObject* SetDictionaryElement(
-      uint32_t index,
-      Object* value,
-      PropertyAttributes attributes,
-      StrictModeFlag strict_mode,
-      bool check_prototype,
-      SetPropertyMode set_mode = SET_PROPERTY);
+  MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
+                                                    Object* value,
+                                                    StrictModeFlag strict_mode,
+                                                    bool check_prototype);
 
   MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
       uint32_t index,
@@ -1786,50 +1758,26 @@
       StrictModeFlag strict_mode,
       bool check_prototype = true);
 
-  static Handle<Object> SetOwnElement(Handle<JSObject> object,
-                                      uint32_t index,
-                                      Handle<Object> value,
-                                      StrictModeFlag strict_mode);
-
-  // Empty handle is returned if the element cannot be set to the given value.
-  static MUST_USE_RESULT Handle<Object> SetElement(
-      Handle<JSObject> object,
-      uint32_t index,
-      Handle<Object> value,
-      PropertyAttributes attr,
-      StrictModeFlag strict_mode,
-      SetPropertyMode set_mode = SET_PROPERTY);
-
+  // Set the index'th array element.
   // A Failure object is returned if GC is needed.
-  MUST_USE_RESULT MaybeObject* SetElement(
-      uint32_t index,
-      Object* value,
-      PropertyAttributes attributes,
-      StrictModeFlag strict_mode,
-      bool check_prototype = true,
-      SetPropertyMode set_mode = SET_PROPERTY);
+  MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
+                                          Object* value,
+                                          StrictModeFlag strict_mode,
+                                          bool check_prototype);
 
   // Returns the index'th element.
   // The undefined object if index is out of bounds.
-  MUST_USE_RESULT MaybeObject* GetElementWithInterceptor(Object* receiver,
-                                                         uint32_t index);
-
-  enum SetFastElementsCapacityMode {
-    kAllowSmiOnlyElements,
-    kForceSmiOnlyElements,
-    kDontAllowSmiOnlyElements
-  };
+  MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
 
   // Replace the elements' backing store with fast elements of the given
   // capacity.  Update the length for JSArrays.  Returns the new backing
   // store.
-  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
-      int capacity,
-      int length,
-      SetFastElementsCapacityMode set_capacity_mode);
+  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
+                                                                int length);
   MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
       int capacity,
       int length);
+  MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);
 
   // Lookup interceptors are used for handling properties controlled by host
   // objects.
@@ -1841,6 +1789,9 @@
   bool HasRealElementProperty(uint32_t index);
   bool HasRealNamedCallbackProperty(String* key);
 
+  // Initializes the array to a certain length
+  MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
+
   // Get the header size for a JSObject.  Used to compute the index of
   // internal fields as well as the number of internal fields.
   inline int GetHeaderSize();
@@ -1849,7 +1800,10 @@
   inline int GetInternalFieldOffset(int index);
   inline Object* GetInternalField(int index);
   inline void SetInternalField(int index, Object* value);
-  inline void SetInternalField(int index, Smi* value);
+
+  // Lookup a property.  If found, the result is valid and has
+  // detailed information.
+  void LocalLookup(String* name, LookupResult* result);
 
   // The following lookup functions skip interceptors.
   void LocalLookupRealNamedProperty(String* name, LookupResult* result);
@@ -1862,7 +1816,9 @@
 
   // Returns the number of properties on this object filtering out properties
   // with the specified attributes (ignoring interceptors).
-  int NumberOfLocalProperties(PropertyAttributes filter = NONE);
+  int NumberOfLocalProperties(PropertyAttributes filter);
+  // Returns the number of enumerable properties (ignoring interceptors).
+  int NumberOfEnumProperties();
   // Fill in details for properties into storage starting at the specified
   // index.
   void GetLocalPropertyNames(FixedArray* storage, int index);
@@ -1904,21 +1860,6 @@
       Object* value,
       PropertyAttributes attributes);
 
-  // Returns a new map with all transitions dropped from the object's current
-  // map and the ElementsKind set.
-  static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
-                                              ElementsKind to_kind);
-  inline MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
-      Isolate* isolate,
-      ElementsKind elements_kind);
-  MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
-      ElementsKind elements_kind);
-
-  static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
-                                               ElementsKind to_kind);
-
-  MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
-
   // Converts a descriptor of any other type to a real field,
   // backed by the properties array.  Descriptors of visible
   // types, such as CONSTANT_FUNCTION, keep their enumeration order.
@@ -1957,32 +1898,16 @@
   // representation. If the object is expected to have additional properties
   // added this number can be indicated to have the backing store allocated to
   // an initial capacity for holding these properties.
-  static void NormalizeProperties(Handle<JSObject> object,
-                                  PropertyNormalizationMode mode,
-                                  int expected_additional_properties);
-
   MUST_USE_RESULT MaybeObject* NormalizeProperties(
       PropertyNormalizationMode mode,
       int expected_additional_properties);
 
-  // Convert and update the elements backing store to be a
-  // SeededNumberDictionary dictionary.  Returns the backing after conversion.
-  static Handle<SeededNumberDictionary> NormalizeElements(
-      Handle<JSObject> object);
-
   MUST_USE_RESULT MaybeObject* NormalizeElements();
 
-  static void UpdateMapCodeCache(Handle<JSObject> object,
-                                 Handle<String> name,
-                                 Handle<Code> code);
-
   MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code);
 
   // Transform slow named properties to fast variants.
   // Returns failure if allocation failed.
-  static void TransformToFastProperties(Handle<JSObject> object,
-                                        int unused_property_fields);
-
   MUST_USE_RESULT MaybeObject* TransformToFastProperties(
       int unused_property_fields);
 
@@ -1998,14 +1923,11 @@
                                        WriteBarrierMode mode
                                        = UPDATE_WRITE_BARRIER);
 
-  // Initializes the body after properties slot, properties slot is
-  // initialized by set_properties.  Fill the pre-allocated fields with
-  // pre_allocated_value and the rest with filler_value.
-  // Note: this call does not update write barrier, the caller is responsible
-  // to ensure that |filler_value| can be collected without WB here.
-  inline void InitializeBody(Map* map,
-                             Object* pre_allocated_value,
-                             Object* filler_value);
+  // initializes the body after properties slot, properties slot is
+  // initialized by set_properties
+  // Note: this call does not update write barrier, it is caller's
+  // reponsibility to ensure that *v* can be collected without WB here.
+  inline void InitializeBody(int object_size, Object* value);
 
   // Check whether this object references another object
   bool ReferencesObject(Object* obj);
@@ -2014,7 +1936,6 @@
   static inline JSObject* cast(Object* obj);
 
   // Disalow further properties to be added to the object.
-  static Handle<Object> PreventExtensions(Handle<JSObject> object);
   MUST_USE_RESULT MaybeObject* PreventExtensions();
 
 
@@ -2041,10 +1962,6 @@
   void PrintElements(FILE* out);
 #endif
 
-  void PrintElementsTransition(
-      FILE* file, ElementsKind from_kind, FixedArrayBase* from_elements,
-      ElementsKind to_kind, FixedArrayBase* to_elements);
-
 #ifdef DEBUG
   // Structure for collecting spill information about JSObjects.
   class SpillInformation {
@@ -2119,38 +2036,21 @@
                                                       Object* structure,
                                                       uint32_t index,
                                                       Object* holder);
-  MUST_USE_RESULT MaybeObject* SetElementWithCallback(
-      Object* structure,
-      uint32_t index,
-      Object* value,
-      JSObject* holder,
-      StrictModeFlag strict_mode);
+  MaybeObject* SetElementWithCallback(Object* structure,
+                                      uint32_t index,
+                                      Object* value,
+                                      JSObject* holder,
+                                      StrictModeFlag strict_mode);
   MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
       uint32_t index,
       Object* value,
-      PropertyAttributes attributes,
       StrictModeFlag strict_mode,
-      bool check_prototype,
-      SetPropertyMode set_mode);
+      bool check_prototype);
   MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
       uint32_t index,
       Object* value,
-      PropertyAttributes attributes,
       StrictModeFlag strict_mode,
-      bool check_prototype,
-      SetPropertyMode set_mode);
-
-  // Searches the prototype chain for a callback setter and sets the property
-  // with the setter if it finds one. The '*found' flag indicates whether
-  // a setter was found or not.
-  // This function can cause GC and can return a failure result with
-  // '*found==true'.
-  MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
-      String* name,
-      Object* value,
-      PropertyAttributes attributes,
-      bool* found,
-      StrictModeFlag strict_mode);
+      bool check_prototype);
 
   MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
                                                              DeleteMode mode);
@@ -2165,6 +2065,9 @@
   bool ReferencesObjectFromElements(FixedArray* elements,
                                     ElementsKind kind,
                                     Object* object);
+  bool HasElementInElements(FixedArray* elements,
+                            ElementsKind kind,
+                            uint32_t index);
 
   // Returns true if most of the elements backing storage is used.
   bool HasDenseElements();
@@ -2181,28 +2084,11 @@
       String* name,
       Object* structure,
       PropertyAttributes attributes);
-  MUST_USE_RESULT MaybeObject* DefineElementAccessor(
-      uint32_t index,
-      Object* getter,
-      Object* setter,
-      PropertyAttributes attributes);
-  MUST_USE_RESULT MaybeObject* DefinePropertyAccessor(
+  MUST_USE_RESULT MaybeObject* DefineGetterSetter(
       String* name,
-      Object* getter,
-      Object* setter,
       PropertyAttributes attributes);
-  void LookupInDescriptor(String* name, LookupResult* result);
 
-  // Returns the hidden properties backing store object, currently
-  // a StringDictionary, stored on this object.
-  // If no hidden properties object has been put on this object,
-  // return undefined, unless create_if_absent is true, in which case
-  // a new dictionary is created, added to this object, and returned.
-  MUST_USE_RESULT MaybeObject* GetHiddenPropertiesDictionary(
-      bool create_if_absent);
-  // Updates the existing hidden properties dictionary.
-  MUST_USE_RESULT MaybeObject* SetHiddenPropertiesDictionary(
-      StringDictionary* dictionary);
+  void LookupInDescriptor(String* name, LookupResult* result);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
 };
@@ -2259,9 +2145,6 @@
   // Gives access to raw memory which stores the array's data.
   inline Object** data_start();
 
-  inline Object** GetFirstElementAddress();
-  inline bool ContainsOnlySmisOrHoles();
-
   // Copy operations.
   MUST_USE_RESULT inline MaybeObject* Copy();
   MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2324,16 +2207,7 @@
  protected:
   // Set operation on FixedArray without using write barriers. Can
   // only be used for storing old space objects or smis.
-  static inline void NoWriteBarrierSet(FixedArray* array,
-                                       int index,
-                                       Object* value);
-
-  // Set operation on FixedArray without incremental write barrier. Can
-  // only be used if the object is guaranteed to be white (whiteness witness
-  // is present).
-  static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
-                                                  int index,
-                                                  Object* value);
+  static inline void fast_set(FixedArray* array, int index, Object* value);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
@@ -2343,19 +2217,19 @@
 // FixedDoubleArray describes fixed-sized arrays with element type double.
 class FixedDoubleArray: public FixedArrayBase {
  public:
+  inline void Initialize(FixedArray* from);
+  inline void Initialize(FixedDoubleArray* from);
+  inline void Initialize(SeededNumberDictionary* from);
+
   // Setter and getter for elements.
   inline double get_scalar(int index);
-  inline int64_t get_representation(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, double value);
   inline void set_the_hole(int index);
 
   // Checking for the hole.
   inline bool is_the_hole(int index);
 
-  // Copy operations
-  MUST_USE_RESULT inline MaybeObject* Copy();
-
   // Garbage collection support.
   inline static int SizeFor(int length) {
     return kHeaderSize + length * kDoubleSize;
@@ -2395,9 +2269,6 @@
 };
 
 
-class IncrementalMarking;
-
-
 // DescriptorArrays are fixed arrays used to hold instance descriptors.
 // The format of the these objects is:
 // TODO(1399): It should be possible to make room for bit_field3 in the map
@@ -2439,7 +2310,7 @@
   // Set next enumeration index and flush any enum cache.
   void SetNextEnumerationIndex(int value) {
     if (!IsEmpty()) {
-      set(kEnumerationIndexIndex, Smi::FromInt(value));
+      fast_set(this, kEnumerationIndexIndex, Smi::FromInt(value));
     }
   }
   bool HasEnumCache() {
@@ -2460,9 +2331,7 @@
 
   // Initialize or change the enum cache,
   // using the supplied storage for the small "bridge".
-  void SetEnumCache(FixedArray* bridge_storage,
-                    FixedArray* new_cache,
-                    Object* new_index_cache);
+  void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
 
   // Accessors for fetching instance descriptor at descriptor number.
   inline String* GetKey(int descriptor_number);
@@ -2474,39 +2343,17 @@
   inline Object* GetCallbacksObject(int descriptor_number);
   inline AccessorDescriptor* GetCallbacks(int descriptor_number);
   inline bool IsProperty(int descriptor_number);
-  inline bool IsTransitionOnly(int descriptor_number);
+  inline bool IsTransition(int descriptor_number);
   inline bool IsNullDescriptor(int descriptor_number);
   inline bool IsDontEnum(int descriptor_number);
 
-  class WhitenessWitness {
-   public:
-    inline explicit WhitenessWitness(DescriptorArray* array);
-    inline ~WhitenessWitness();
-
-   private:
-    IncrementalMarking* marking_;
-  };
-
   // Accessor for complete descriptor.
   inline void Get(int descriptor_number, Descriptor* desc);
-  inline void Set(int descriptor_number,
-                  Descriptor* desc,
-                  const WhitenessWitness&);
+  inline void Set(int descriptor_number, Descriptor* desc);
 
-  // Transfer a complete descriptor from the src descriptor array to the dst
-  // one, dropping map transitions in CALLBACKS.
-  static void CopyFrom(Handle<DescriptorArray> dst,
-                       int dst_index,
-                       Handle<DescriptorArray> src,
-                       int src_index,
-                       const WhitenessWitness& witness);
-
-  // Transfer a complete descriptor from the src descriptor array to this
-  // descriptor array, dropping map transitions in CALLBACKS.
-  MUST_USE_RESULT MaybeObject* CopyFrom(int dst_index,
-                                        DescriptorArray* src,
-                                        int src_index,
-                                        const WhitenessWitness&);
+  // Transfer complete descriptor from another descriptor array to
+  // this one.
+  inline void CopyFrom(int index, DescriptorArray* src, int src_index);
 
   // Copy the descriptor array, insert a new descriptor and optionally
   // remove map transitions.  If the descriptor is already present, it is
@@ -2517,17 +2364,17 @@
   MUST_USE_RESULT MaybeObject* CopyInsert(Descriptor* descriptor,
                                           TransitionFlag transition_flag);
 
-  // Return a copy of the array with all transitions and null descriptors
-  // removed. Return a Failure object in case of an allocation failure.
+  // Remove all transitions.  Return  a copy of the array with all transitions
+  // removed, or a Failure object if the new array could not be allocated.
   MUST_USE_RESULT MaybeObject* RemoveTransitions();
 
   // Sort the instance descriptors by the hash codes of their keys.
   // Does not check for duplicates.
-  void SortUnchecked(const WhitenessWitness&);
+  void SortUnchecked();
 
   // Sort the instance descriptors by the hash codes of their keys.
   // Checks the result for duplicates.
-  void Sort(const WhitenessWitness&);
+  void Sort();
 
   // Search the instance descriptors for given name.
   inline int Search(String* name);
@@ -2566,10 +2413,9 @@
   static const int kFirstIndex = 3;
 
   // The length of the "bridge" to the enum cache.
-  static const int kEnumCacheBridgeLength = 3;
+  static const int kEnumCacheBridgeLength = 2;
   static const int kEnumCacheBridgeEnumIndex = 0;
   static const int kEnumCacheBridgeCacheIndex = 1;
-  static const int kEnumCacheBridgeIndicesCacheIndex = 2;
 
   // Layout description.
   static const int kBitField3StorageOffset = FixedArray::kHeaderSize;
@@ -2603,20 +2449,6 @@
   static const int kMaxNumberOfDescriptors = 1024 + 512;
 
  private:
-  // An entry in a DescriptorArray, represented as an (array, index) pair.
-  class Entry {
-   public:
-    inline explicit Entry(DescriptorArray* descs, int index) :
-        descs_(descs), index_(index) { }
-
-    inline PropertyType type() { return descs_->GetType(index_); }
-    inline Object* GetCallbackObject() { return descs_->GetValue(index_); }
-
-   private:
-    DescriptorArray* descs_;
-    int index_;
-  };
-
   // Conversion from descriptor number to array indices.
   static int ToKeyIndex(int descriptor_number) {
     return descriptor_number+kFirstIndex;
@@ -2635,12 +2467,10 @@
         NULL_DESCRIPTOR;
   }
   // Swap operation on FixedArray without using write barriers.
-  static inline void NoIncrementalWriteBarrierSwap(
-      FixedArray* array, int first, int second);
+  static inline void fast_swap(FixedArray* array, int first, int second);
 
   // Swap descriptor first and second.
-  inline void NoIncrementalWriteBarrierSwapDescriptors(
-      int first, int second);
+  inline void Swap(int first, int second);
 
   FixedArray* GetContentArray() {
     return FixedArray::cast(get(kContentArrayIndex));
@@ -2658,7 +2488,7 @@
 // encountered and stops when unused elements are encountered.
 //
 // - Elements with key == undefined have not been used yet.
-// - Elements with key == the_hole have been deleted.
+// - Elements with key == null have been deleted.
 //
 // The hash table class is parameterized with a Shape and a Key.
 // Shape must be a class with the following interface:
@@ -2693,7 +2523,7 @@
   }
   static uint32_t HashForObject(Key key, Object* object) { return 0; }
   static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
-    ASSERT(UsesSeed);
+    // Won't be called if UsesSeed isn't overridden by child class.
     return HashForObject(key, object);
   }
 };
@@ -2704,8 +2534,7 @@
   // Wrapper methods
   inline uint32_t Hash(Key key) {
     if (Shape::UsesSeed) {
-      return Shape::SeededHash(key,
-          GetHeap()->HashSeed());
+      return Shape::SeededHash(key, GetHeap()->HashSeed());
     } else {
       return Shape::Hash(key);
     }
@@ -2713,8 +2542,7 @@
 
   inline uint32_t HashForObject(Key key, Object* object) {
     if (Shape::UsesSeed) {
-      return Shape::SeededHashForObject(key,
-          GetHeap()->HashSeed(), object);
+      return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
     } else {
       return Shape::HashForObject(key, object);
     }
@@ -2762,10 +2590,10 @@
   // Returns the key at entry.
   Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
 
-  // Tells whether k is a real key.  The hole and undefined are not allowed
+  // Tells whether k is a real key.  Null and undefined are not allowed
   // as keys and can be used to indicate missing or deleted elements.
   bool IsKey(Object* k) {
-    return !k->IsTheHole() && !k->IsUndefined();
+    return !k->IsNull() && !k->IsUndefined();
   }
 
   // Garbage collection support.
@@ -2817,12 +2645,12 @@
 
   // Update the number of elements in the hash table.
   void SetNumberOfElements(int nof) {
-    set(kNumberOfElementsIndex, Smi::FromInt(nof));
+    fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
   }
 
   // Update the number of deleted elements in the hash table.
   void SetNumberOfDeletedElements(int nod) {
-    set(kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+    fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
   }
 
   // Sets the capacity of the hash table.
@@ -2832,7 +2660,7 @@
     // and non-zero.
     ASSERT(capacity > 0);
     ASSERT(capacity <= kMaxCapacity);
-    set(kCapacityIndex, Smi::FromInt(capacity));
+    fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
   }
 
 
@@ -2984,12 +2812,22 @@
 
   // Returns the value at entry.
   Object* ValueAt(int entry) {
-    return this->get(HashTable<Shape, Key>::EntryToIndex(entry) + 1);
+    return this->get(HashTable<Shape, Key>::EntryToIndex(entry)+1);
   }
 
   // Set the value for entry.
-  void ValueAtPut(int entry, Object* value) {
+  // Returns false if the put wasn't performed due to property being read only.
+  // Returns true on successful put.
+  bool ValueAtPut(int entry, Object* value) {
+    // Check that this value can actually be written.
+    PropertyDetails details = DetailsAt(entry);
+    // If a value has not been initilized we allow writing to it even if
+    // it is read only (a declared const that has not been initialized).
+    if (details.IsReadOnly() && !ValueAt(entry)->IsTheHole()) {
+      return false;
+    }
     this->set(HashTable<Shape, Key>::EntryToIndex(entry) + 1, value);
+    return true;
   }
 
   // Returns the property details for the property at entry.
@@ -3030,7 +2868,7 @@
 
   // Accessors for next enumeration index.
   void SetNextEnumerationIndex(int index) {
-    this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
+    this->fast_set(this, kNextEnumerationIndexIndex, Smi::FromInt(index));
   }
 
   int NextEnumerationIndex() {
@@ -3110,11 +2948,9 @@
       JSObject* obj,
       int unused_property_fields);
 
-  // Find entry for key, otherwise return kNotFound. Optimized version of
+  // Find entry for key otherwise return kNotFound. Optimzed version of
   // HashTable::FindEntry.
   int FindEntry(String* key);
-
-  bool ContainsTransition(int entry);
 };
 
 
@@ -3163,13 +2999,6 @@
                                               PropertyDetails details);
 
   // Set an existing entry or add a new one if needed.
-  // Return the updated dictionary.
-  MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
-      Handle<SeededNumberDictionary> dictionary,
-      uint32_t index,
-      Handle<Object> value,
-      PropertyDetails details);
-
   MUST_USE_RESULT MaybeObject* Set(uint32_t key,
                                    Object* value,
                                    PropertyDetails details);
@@ -3189,6 +3018,9 @@
   // requires_slow_elements returns false.
   inline uint32_t max_number_key();
 
+  // Remove all entries were key is a number and (from <= key && key < to).
+  void RemoveNumberEntries(uint32_t from, uint32_t to);
+
   // Bit masks.
   static const int kRequiresSlowElementsMask = 1;
   static const int kRequiresSlowElementsTagSize = 1;
@@ -3209,51 +3041,24 @@
   MUST_USE_RESULT MaybeObject* AddNumberEntry(uint32_t key, Object* value);
 
   // Set an existing entry or add a new one if needed.
-  // Return the updated dictionary.
-  MUST_USE_RESULT static Handle<UnseededNumberDictionary> Set(
-      Handle<UnseededNumberDictionary> dictionary,
-      uint32_t index,
-      Handle<Object> value);
-
   MUST_USE_RESULT MaybeObject* Set(uint32_t key, Object* value);
 };
 
 
-template <int entrysize>
 class ObjectHashTableShape : public BaseShape<Object*> {
  public:
-  static inline bool IsMatch(Object* key, Object* other);
-  static inline uint32_t Hash(Object* key);
-  static inline uint32_t HashForObject(Object* key, Object* object);
-  MUST_USE_RESULT static inline MaybeObject* AsObject(Object* key);
+  static inline bool IsMatch(JSObject* key, Object* other);
+  static inline uint32_t Hash(JSObject* key);
+  static inline uint32_t HashForObject(JSObject* key, Object* object);
+  MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
   static const int kPrefixSize = 0;
-  static const int kEntrySize = entrysize;
+  static const int kEntrySize = 2;
 };
 
 
-// ObjectHashSet holds keys that are arbitrary objects by using the identity
-// hash of the key for hashing purposes.
-class ObjectHashSet: public HashTable<ObjectHashTableShape<1>, Object*> {
- public:
-  static inline ObjectHashSet* cast(Object* obj) {
-    ASSERT(obj->IsHashTable());
-    return reinterpret_cast<ObjectHashSet*>(obj);
-  }
-
-  // Looks up whether the given key is part of this hash set.
-  bool Contains(Object* key);
-
-  // Adds the given key to this hash set.
-  MUST_USE_RESULT MaybeObject* Add(Object* key);
-
-  // Removes the given key from this hash set.
-  MUST_USE_RESULT MaybeObject* Remove(Object* key);
-};
-
-
-// ObjectHashTable maps keys that are arbitrary objects to object values by
+// ObjectHashTable maps keys that are JavaScript objects to object values by
 // using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> {
+class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
  public:
   static inline ObjectHashTable* cast(Object* obj) {
     ASSERT(obj->IsHashTable());
@@ -3262,17 +3067,18 @@
 
   // Looks up the value associated with the given key. The undefined value is
   // returned in case the key is not present.
-  Object* Lookup(Object* key);
+  Object* Lookup(JSObject* key);
 
   // Adds (or overwrites) the value associated with the given key. Mapping a
   // key to the undefined value causes removal of the whole entry.
-  MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value);
+  MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
 
  private:
   friend class MarkCompactCollector;
 
-  void AddEntry(int entry, Object* key, Object* value);
-  void RemoveEntry(int entry);
+  void AddEntry(int entry, JSObject* key, Object* value);
+  void RemoveEntry(int entry, Heap* heap);
+  inline void RemoveEntry(int entry);
 
   // Returns the index to the value of an entry.
   static inline int EntryToValueIndex(int entry) {
@@ -3319,207 +3125,6 @@
 };
 
 
-// ScopeInfo represents information about different scopes of a source
-// program  and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in ScopeInfo objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-
-// This object provides quick access to scope info details for runtime
-// routines.
-class ScopeInfo : public FixedArray {
- public:
-  static inline ScopeInfo* cast(Object* object);
-
-  // Return the type of this scope.
-  ScopeType Type();
-
-  // Does this scope call eval?
-  bool CallsEval();
-
-  // Return the language mode of this scope.
-  LanguageMode language_mode();
-
-  // Does this scope make a non-strict eval call?
-  bool CallsNonStrictEval() {
-    return CallsEval() && (language_mode() == CLASSIC_MODE);
-  }
-
-  // Return the total number of locals allocated on the stack and in the
-  // context. This includes the parameters that are allocated in the context.
-  int LocalCount();
-
-  // Return the number of stack slots for code. This number consists of two
-  // parts:
-  //  1. One stack slot per stack allocated local.
-  //  2. One stack slot for the function name if it is stack allocated.
-  int StackSlotCount();
-
-  // Return the number of context slots for code if a context is allocated. This
-  // number consists of three parts:
-  //  1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
-  //  2. One context slot per context allocated local.
-  //  3. One context slot for the function name if it is context allocated.
-  // Parameters allocated in the context count as context allocated locals. If
-  // no contexts are allocated for this scope ContextLength returns 0.
-  int ContextLength();
-
-  // Is this scope the scope of a named function expression?
-  bool HasFunctionName();
-
-  // Return if this has context allocated locals.
-  bool HasHeapAllocatedLocals();
-
-  // Return if contexts are allocated for this scope.
-  bool HasContext();
-
-  // Return the function_name if present.
-  String* FunctionName();
-
-  // Return the name of the given parameter.
-  String* ParameterName(int var);
-
-  // Return the name of the given local.
-  String* LocalName(int var);
-
-  // Return the name of the given stack local.
-  String* StackLocalName(int var);
-
-  // Return the name of the given context local.
-  String* ContextLocalName(int var);
-
-  // Return the mode of the given context local.
-  VariableMode ContextLocalMode(int var);
-
-  // Return the initialization flag of the given context local.
-  InitializationFlag ContextLocalInitFlag(int var);
-
-  // Lookup support for serialized scope info. Returns the
-  // the stack slot index for a given slot name if the slot is
-  // present; otherwise returns a value < 0. The name must be a symbol
-  // (canonicalized).
-  int StackSlotIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the
-  // context slot index for a given slot name if the slot is present; otherwise
-  // returns a value < 0. The name must be a symbol (canonicalized).
-  // If the slot is present and mode != NULL, sets *mode to the corresponding
-  // mode for that variable.
-  int ContextSlotIndex(String* name,
-                       VariableMode* mode,
-                       InitializationFlag* init_flag);
-
-  // Lookup support for serialized scope info. Returns the
-  // parameter index for a given parameter name if the parameter is present;
-  // otherwise returns a value < 0. The name must be a symbol (canonicalized).
-  int ParameterIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the
-  // function context slot index if the function name is present (named
-  // function expressions, only), otherwise returns a value < 0. The name
-  // must be a symbol (canonicalized).
-  int FunctionContextSlotIndex(String* name, VariableMode* mode);
-
-  static Handle<ScopeInfo> Create(Scope* scope);
-
-  // Serializes empty scope info.
-  static ScopeInfo* Empty();
-
-#ifdef DEBUG
-  void Print();
-#endif
-
-  // The layout of the static part of a ScopeInfo is as follows. Each entry is
-  // numeric and occupies one array slot.
-  // 1. A set of properties of the scope
-  // 2. The number of parameters. This only applies to function scopes. For
-  //    non-function scopes this is 0.
-  // 3. The number of non-parameter variables allocated on the stack.
-  // 4. The number of non-parameter and parameter variables allocated in the
-  //    context.
-#define FOR_EACH_NUMERIC_FIELD(V)          \
-  V(Flags)                                 \
-  V(ParameterCount)                        \
-  V(StackLocalCount)                       \
-  V(ContextLocalCount)
-
-#define FIELD_ACCESSORS(name)                            \
-  void Set##name(int value) {                            \
-    set(k##name, Smi::FromInt(value));                   \
-  }                                                      \
-  int name() {                                           \
-    if (length() > 0) {                                  \
-      return Smi::cast(get(k##name))->value();           \
-    } else {                                             \
-      return 0;                                          \
-    }                                                    \
-  }
-  FOR_EACH_NUMERIC_FIELD(FIELD_ACCESSORS)
-#undef FIELD_ACCESSORS
-
- private:
-  enum {
-#define DECL_INDEX(name) k##name,
-  FOR_EACH_NUMERIC_FIELD(DECL_INDEX)
-#undef DECL_INDEX
-#undef FOR_EACH_NUMERIC_FIELD
-  kVariablePartIndex
-  };
-
-  // The layout of the variable part of a ScopeInfo is as follows:
-  // 1. ParameterEntries:
-  //    This part stores the names of the parameters for function scopes. One
-  //    slot is used per parameter, so in total this part occupies
-  //    ParameterCount() slots in the array. For other scopes than function
-  //    scopes ParameterCount() is 0.
-  // 2. StackLocalEntries:
-  //    Contains the names of local variables that are allocated on the stack,
-  //    in increasing order of the stack slot index. One slot is used per stack
-  //    local, so in total this part occupies StackLocalCount() slots in the
-  //    array.
-  // 3. ContextLocalNameEntries:
-  //    Contains the names of local variables and parameters that are allocated
-  //    in the context. They are stored in increasing order of the context slot
-  //    index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
-  //    context local, so in total this part occupies ContextLocalCount() slots
-  //    in the array.
-  // 4. ContextLocalInfoEntries:
-  //    Contains the variable modes and initialization flags corresponding to
-  //    the context locals in ContextLocalNameEntries. One slot is used per
-  //    context local, so in total this part occupies ContextLocalCount()
-  //    slots in the array.
-  // 5. FunctionNameEntryIndex:
-  //    If the scope belongs to a named function expression this part contains
-  //    information about the function variable. It always occupies two array
-  //    slots:  a. The name of the function variable.
-  //            b. The context or stack slot index for the variable.
-  int ParameterEntriesIndex();
-  int StackLocalEntriesIndex();
-  int ContextLocalNameEntriesIndex();
-  int ContextLocalInfoEntriesIndex();
-  int FunctionNameEntryIndex();
-
-  // Location of the function variable for named function expressions.
-  enum FunctionVariableInfo {
-    NONE,     // No function name present.
-    STACK,    // Function
-    CONTEXT,
-    UNUSED
-  };
-
-  // Properties of scopes.
-  class TypeField:             public BitField<ScopeType,            0, 3> {};
-  class CallsEvalField:        public BitField<bool,                 3, 1> {};
-  class LanguageModeField:     public BitField<LanguageMode,         4, 2> {};
-  class FunctionVariableField: public BitField<FunctionVariableInfo, 6, 2> {};
-  class FunctionVariableMode:  public BitField<VariableMode,         8, 3> {};
-
-  // BitFields representing the encoded information for context locals in the
-  // ContextLocalInfoEntries part.
-  class ContextLocalMode:      public BitField<VariableMode,         0, 3> {};
-  class ContextLocalInitFlag:  public BitField<InitializationFlag,   3, 1> {};
-};
-
-
 // The cache for maps used by normalized (dictionary mode) objects.
 // Such maps do not have property descriptors, so a typical program
 // needs very limited number of distinct normalized maps.
@@ -3541,12 +3146,11 @@
 };
 
 
-// ByteArray represents fixed sized byte arrays.  Used for the relocation info
-// that is attached to code objects.
+// ByteArray represents fixed sized byte arrays.  Used by the outside world,
+// such as PCRE, and also by the memory allocator and garbage collector to
+// fill in free blocks in the heap.
 class ByteArray: public FixedArrayBase {
  public:
-  inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
-
   // Setter and getter.
   inline byte get(int index);
   inline void set(int index, byte value);
@@ -3603,41 +3207,6 @@
 };
 
 
-// FreeSpace represents fixed sized areas of the heap that are not currently in
-// use.  Used by the heap and GC.
-class FreeSpace: public HeapObject {
- public:
-  // [size]: size of the free space including the header.
-  inline int size();
-  inline void set_size(int value);
-
-  inline int Size() { return size(); }
-
-  // Casting.
-  static inline FreeSpace* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
-  inline void FreeSpacePrint() {
-    FreeSpacePrint(stdout);
-  }
-  void FreeSpacePrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void FreeSpaceVerify();
-#endif
-
-  // Layout description.
-  // Size is smi tagged when it is stored.
-  static const int kSizeOffset = HeapObject::kHeaderSize;
-  static const int kHeaderSize = kSizeOffset + kPointerSize;
-
-  static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
-};
-
-
 // An ExternalArray represents a fixed-size array of primitive values
 // which live outside the JavaScript heap. Its subclasses are used to
 // implement the CanvasArray types being defined in the WebGL
@@ -3688,7 +3257,7 @@
 
   // Setter and getter.
   inline uint8_t get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber and
@@ -3717,12 +3286,12 @@
  public:
   // Setter and getter.
   inline int8_t get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, int8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalByteArray* cast(Object* obj);
@@ -3746,12 +3315,12 @@
  public:
   // Setter and getter.
   inline uint8_t get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint8_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalUnsignedByteArray* cast(Object* obj);
@@ -3775,12 +3344,12 @@
  public:
   // Setter and getter.
   inline int16_t get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, int16_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalShortArray* cast(Object* obj);
@@ -3804,12 +3373,12 @@
  public:
   // Setter and getter.
   inline uint16_t get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint16_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalUnsignedShortArray* cast(Object* obj);
@@ -3833,12 +3402,12 @@
  public:
   // Setter and getter.
   inline int32_t get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, int32_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalIntArray* cast(Object* obj);
@@ -3862,12 +3431,12 @@
  public:
   // Setter and getter.
   inline uint32_t get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, uint32_t value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalUnsignedIntArray* cast(Object* obj);
@@ -3891,12 +3460,12 @@
  public:
   // Setter and getter.
   inline float get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, float value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalFloatArray* cast(Object* obj);
@@ -3920,12 +3489,12 @@
  public:
   // Setter and getter.
   inline double get_scalar(int index);
-  MUST_USE_RESULT inline MaybeObject* get(int index);
+  inline MaybeObject* get(int index);
   inline void set(int index, double value);
 
   // This accessor applies the correct conversion from Smi, HeapNumber
   // and undefined.
-  MUST_USE_RESULT MaybeObject* SetValue(uint32_t index, Object* value);
+  MaybeObject* SetValue(uint32_t index, Object* value);
 
   // Casting.
   static inline ExternalDoubleArray* cast(Object* obj);
@@ -3984,6 +3553,11 @@
   DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
   DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
 
+  // Unchecked accessor to be used during GC.
+  FixedArray* UncheckedLiteralArray() {
+    return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
+  }
+
 #undef DEFINE_ELEMENT_ACCESSORS
 
   // Accessors for elements of the ith deoptimization entry.
@@ -4058,48 +3632,8 @@
 };
 
 
-// Forward declaration.
-class JSGlobalPropertyCell;
-
-// TypeFeedbackCells is a fixed array used to hold the association between
-// cache cells and AST ids for code generated by the full compiler.
-// The format of the these objects is
-//   [i * 2]: Global property cell of ith cache cell.
-//   [i * 2 + 1]: Ast ID for ith cache cell.
-class TypeFeedbackCells: public FixedArray {
- public:
-  int CellCount() { return length() / 2; }
-  static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
-
-  // Accessors for AST ids associated with cache values.
-  inline Smi* AstId(int index);
-  inline void SetAstId(int index, Smi* id);
-
-  // Accessors for global property cells holding the cache values.
-  inline JSGlobalPropertyCell* Cell(int index);
-  inline void SetCell(int index, JSGlobalPropertyCell* cell);
-
-  // The object that indicates an uninitialized cache.
-  static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
-  // The object that indicates a megamorphic state.
-  static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
-  // A raw version of the uninitialized sentinel that's safe to read during
-  // garbage collection (e.g., for patching the cache).
-  static inline Object* RawUninitializedSentinel(Heap* heap);
-
-  // Casting.
-  static inline TypeFeedbackCells* cast(Object* obj);
-
-  static const int kForInFastCaseMarker = 0;
-  static const int kForInSlowCaseMarker = 1;
-};
-
-
-// Forward declaration.
 class SafepointEntry;
-class TypeFeedbackInfo;
+
 
 // Code describes objects with on-the-fly generated machine code.
 class Code: public HeapObject {
@@ -4165,25 +3699,14 @@
   DECL_ACCESSORS(relocation_info, ByteArray)
   void InvalidateRelocation();
 
-  // [handler_table]: Fixed array containing offsets of exception handlers.
-  DECL_ACCESSORS(handler_table, FixedArray)
-
   // [deoptimization_data]: Array containing data for deopt.
   DECL_ACCESSORS(deoptimization_data, FixedArray)
 
-  // [type_feedback_info]: Struct containing type feedback information.
-  // Will contain either a TypeFeedbackInfo object, or undefined.
-  DECL_ACCESSORS(type_feedback_info, Object)
-
-  // [gc_metadata]: Field used to hold GC related metadata. The contents of this
+  // [code_flushing_candidate]: Field only used during garbage
+  // collection to hold code flushing candidates. The contents of this
   // field does not have to be traced during garbage collection since
   // it is only used by the garbage collector itself.
-  DECL_ACCESSORS(gc_metadata, Object)
-
-  // [ic_age]: Inline caching age: the value of the Heap::global_ic_age
-  // at the moment when this object was created.
-  inline void set_ic_age(int count);
-  inline int ic_age();
+  DECL_ACCESSORS(next_code_flushing_candidate, Object)
 
   // Unchecked accessors to be used during GC.
   inline ByteArray* unchecked_relocation_info();
@@ -4219,11 +3742,6 @@
   inline int major_key();
   inline void set_major_key(int value);
 
-  // For stubs, tells whether they should always exist, so that they can be
-  // called from other stubs.
-  inline bool is_pregenerated();
-  inline void set_is_pregenerated(bool value);
-
   // [optimizable]: For FUNCTION kind, tells if it is optimizable.
   inline bool optimizable();
   inline void set_optimizable(bool value);
@@ -4238,16 +3756,6 @@
   inline bool has_debug_break_slots();
   inline void set_has_debug_break_slots(bool value);
 
-  // [compiled_with_optimizing]: For FUNCTION kind, tells if it has
-  // been compiled with IsOptimizing set to true.
-  inline bool is_compiled_optimizable();
-  inline void set_compiled_optimizable(bool value);
-
-  // [has_self_optimization_header]: For FUNCTION kind, tells if it has
-  // a self-optimization header.
-  inline bool has_self_optimization_header();
-  inline void set_self_optimization_header(bool value);
-
   // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
   // how long the function has been marked for OSR and therefore which
   // level of loop nesting we are willing to do on-stack replacement
@@ -4293,11 +3801,6 @@
   inline byte to_boolean_state();
   inline void set_to_boolean_state(byte value);
 
-  // [has_function_cache]: For kind STUB tells whether there is a function
-  // cache is passed to the stub.
-  inline bool has_function_cache();
-  inline void set_has_function_cache(bool flag);
-
   // Get the safepoint entry for the given pc.
   SafepointEntry GetSafepointEntry(Address pc);
 
@@ -4308,28 +3811,6 @@
   // Find the first map in an IC stub.
   Map* FindFirstMap();
 
-  class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
-  class ExtraICStateKeyedAccessGrowMode:
-      public BitField<KeyedAccessGrowMode, 1, 1> {};  // NOLINT
-
-  static const int kExtraICStateGrowModeShift = 1;
-
-  static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) {
-    return ExtraICStateStrictMode::decode(extra_ic_state);
-  }
-
-  static inline KeyedAccessGrowMode GetKeyedAccessGrowMode(
-      ExtraICState extra_ic_state) {
-    return ExtraICStateKeyedAccessGrowMode::decode(extra_ic_state);
-  }
-
-  static inline ExtraICState ComputeExtraICState(
-      KeyedAccessGrowMode grow_mode,
-      StrictModeFlag strict_mode) {
-    return ExtraICStateKeyedAccessGrowMode::encode(grow_mode) |
-        ExtraICStateStrictMode::encode(strict_mode);
-  }
-
   // Flags operations.
   static inline Flags ComputeFlags(
       Kind kind,
@@ -4424,6 +3905,10 @@
   void CodeVerify();
 #endif
 
+  // Returns the isolate/heap this code object belongs to.
+  inline Isolate* isolate();
+  inline Heap* heap();
+
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
   static const int kMaxLoopNestingMarker = 6;
@@ -4431,15 +3916,13 @@
   // Layout description.
   static const int kInstructionSizeOffset = HeapObject::kHeaderSize;
   static const int kRelocationInfoOffset = kInstructionSizeOffset + kIntSize;
-  static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
   static const int kDeoptimizationDataOffset =
-      kHandlerTableOffset + kPointerSize;
-  static const int kTypeFeedbackInfoOffset =
+      kRelocationInfoOffset + kPointerSize;
+  static const int kNextCodeFlushingCandidateOffset =
       kDeoptimizationDataOffset + kPointerSize;
-  static const int kGCMetadataOffset = kTypeFeedbackInfoOffset + kPointerSize;
-  static const int kICAgeOffset =
-      kGCMetadataOffset + kPointerSize;
-  static const int kFlagsOffset = kICAgeOffset + kIntSize;
+  static const int kFlagsOffset =
+      kNextCodeFlushingCandidateOffset + kPointerSize;
+
   static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
   static const int kKindSpecificFlagsSize = 2 * kIntSize;
 
@@ -4461,14 +3944,11 @@
   static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
   static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
   static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
-  static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
 
   static const int kFullCodeFlags = kOptimizableOffset + 1;
   class FullCodeFlagsHasDeoptimizationSupportField:
       public BitField<bool, 0, 1> {};  // NOLINT
   class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
-  class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
-  class FullCodeFlagsHasSelfOptimizationHeader: public BitField<bool, 3, 1> {};
 
   static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
 
@@ -4480,16 +3960,14 @@
   // Flags layout.  BitField<type, shift, size>.
   class ICStateField: public BitField<InlineCacheState, 0, 3> {};
   class TypeField: public BitField<PropertyType, 3, 4> {};
-  class CacheHolderField: public BitField<InlineCacheHolderFlag, 7, 1> {};
-  class KindField: public BitField<Kind, 8, 4> {};
+  class KindField: public BitField<Kind, 7, 4> {};
+  class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
   class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
-  class IsPregeneratedField: public BitField<bool, 14, 1> {};
 
   // Signed field cannot be encoded using the BitField class.
-  static const int kArgumentsCountShift = 15;
+  static const int kArgumentsCountShift = 14;
   static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
 
-  // This constant should be encodable in an ARM instruction.
   static const int kFlagsNotUsedInLookup =
       TypeField::kMask | CacheHolderField::kMask;
 
@@ -4623,12 +4101,8 @@
         (bit_field2() & kElementsKindMask) >> kElementsKindShift);
   }
 
-  // Tells whether the instance has fast elements that are only Smis.
-  inline bool has_fast_smi_only_elements() {
-    return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
-  }
-
   // Tells whether the instance has fast elements.
+  // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
   inline bool has_fast_elements() {
     return elements_kind() == FAST_ELEMENTS;
   }
@@ -4637,10 +4111,6 @@
     return elements_kind() == FAST_DOUBLE_ELEMENTS;
   }
 
-  inline bool has_non_strict_arguments_elements() {
-    return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
-  }
-
   inline bool has_external_array_elements() {
     ElementsKind kind(elements_kind());
     return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
@@ -4651,14 +4121,6 @@
     return elements_kind() == DICTIONARY_ELEMENTS;
   }
 
-  inline bool has_slow_elements_kind() {
-    return elements_kind() == DICTIONARY_ELEMENTS
-        || elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
-  }
-
-  static bool IsValidElementsTransition(ElementsKind from_kind,
-                                        ElementsKind to_kind);
-
   // Tells whether the map is attached to SharedFunctionInfo
   // (for inobject slack tracking).
   inline void set_attached_to_shared_function_info(bool value);
@@ -4707,7 +4169,6 @@
   //    1 + 2 * i: prototype
   //    2 + 2 * i: target map
   DECL_ACCESSORS(prototype_transitions, FixedArray)
-
   inline FixedArray* unchecked_prototype_transitions();
 
   static const int kProtoTransitionHeaderSize = 1;
@@ -4717,14 +4178,14 @@
   static const int kProtoTransitionMapOffset = 1;
 
   inline int NumberOfProtoTransitions() {
-    FixedArray* cache = prototype_transitions();
+    FixedArray* cache = unchecked_prototype_transitions();
     if (cache->length() == 0) return 0;
     return
         Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
   }
 
   inline void SetNumberOfProtoTransitions(int value) {
-    FixedArray* cache = prototype_transitions();
+    FixedArray* cache = unchecked_prototype_transitions();
     ASSERT(cache->length() != 0);
     cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
                          Smi::FromInt(value));
@@ -4746,15 +4207,35 @@
   // instance descriptors.
   MUST_USE_RESULT MaybeObject* CopyDropTransitions();
 
+  // Returns this map if it already has elements that are fast, otherwise
+  // returns a copy of the map, with all transitions dropped from the
+  // descriptors and the ElementsKind set to FAST_ELEMENTS.
+  MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
+
+  // Returns this map if it already has fast elements that are doubles,
+  // otherwise returns a copy of the map, with all transitions dropped from the
+  // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
+  MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
+
+  // Returns this map if already has dictionary elements, otherwise returns a
+  // copy of the map, with all transitions dropped from the descriptors and the
+  // ElementsKind set to DICTIONARY_ELEMENTS.
+  MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
+
+  // Returns a new map with all transitions dropped from the descriptors and the
+  // ElementsKind set.
+  MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
+      ElementsKind elements_kind,
+      bool safe_to_add_transition);
+
   // Returns the property index for name (only valid for FAST MODE).
   int PropertyIndexFor(String* name);
 
   // Returns the next free property index (only valid for FAST MODE).
   int NextFreePropertyIndex();
 
-  // Returns the number of properties described in instance_descriptors
-  // filtering out properties with the specified attributes.
-  int NumberOfDescribedProperties(PropertyAttributes filter = NONE);
+  // Returns the number of properties described in instance_descriptors.
+  int NumberOfDescribedProperties();
 
   // Casting.
   static inline Map* cast(Object* obj);
@@ -4768,9 +4249,6 @@
   inline void ClearCodeCache(Heap* heap);
 
   // Update code cache.
-  static void UpdateCodeCache(Handle<Map> map,
-                              Handle<String> name,
-                              Handle<Code> code);
   MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code);
 
   // Returns the found code or undefined if absent.
@@ -4788,21 +4266,12 @@
   // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
   void CreateBackPointers();
 
-  void CreateOneBackPointer(Object* transition_target);
-
   // Set all map transitions from this map to dead maps to null.
   // Also, restore the original prototype on the targets of these
   // transitions, so that we do not process this map again while
   // following back pointers.
   void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
 
-  // Restore a possible back pointer in the prototype field of object.
-  // Return true in that case and false otherwise. Set *keep_entry to
-  // true when a live map transition has been found.
-  bool RestoreOneBackPointer(Object* object,
-                             Object* real_prototype,
-                             bool* keep_entry);
-
   // Computes a hash value for this map, to be used in HashTables and such.
   int Hash();
 
@@ -4812,30 +4281,11 @@
   // The "shared" flags of both this map and |other| are ignored.
   bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
 
-  // Returns the contents of this map's descriptor array for the given string.
-  // May return NULL. |safe_to_add_transition| is set to false and NULL
-  // is returned if adding transitions is not allowed.
-  Object* GetDescriptorContents(String* sentinel_name,
-                                bool* safe_to_add_transitions);
-
-  // Returns the map that this map transitions to if its elements_kind
-  // is changed to |elements_kind|, or NULL if no such map is cached yet.
-  // |safe_to_add_transitions| is set to false if adding transitions is not
-  // allowed.
-  Map* LookupElementsTransitionMap(ElementsKind elements_kind,
-                                   bool* safe_to_add_transition);
-
-  // Adds an entry to this map's descriptor array for a transition to
-  // |transitioned_map| when its elements_kind is changed to |elements_kind|.
-  MUST_USE_RESULT MaybeObject* AddElementsTransition(
-      ElementsKind elements_kind, Map* transitioned_map);
-
-  // Returns the transitioned map for this map with the most generic
-  // elements_kind that's found in |candidates|, or null handle if no match is
-  // found at all.
-  Handle<Map> FindTransitionedMap(MapHandleList* candidates);
-  Map* FindTransitionedMap(MapList* candidates);
-
+  // Returns true if this map and |other| describe equivalent objects.
+  // The "shared" flags of both this map and |other| are ignored.
+  bool EquivalentTo(Map* other) {
+    return EquivalentToForNormalization(other, KEEP_INOBJECT_PROPERTIES);
+  }
 
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
@@ -4852,6 +4302,10 @@
   inline int visitor_id();
   inline void set_visitor_id(int visitor_id);
 
+  // Returns the isolate/heap this map belongs to.
+  inline Isolate* isolate();
+  inline Heap* heap();
+
   typedef void (*TraverseCallback)(Map* map, void* data);
 
   void TraverseTransitionTree(TraverseCallback callback, void* data);
@@ -4860,8 +4314,7 @@
 
   Object* GetPrototypeTransition(Object* prototype);
 
-  MUST_USE_RESULT MaybeObject* PutPrototypeTransition(Object* prototype,
-                                                      Map* map);
+  MaybeObject* PutPrototypeTransition(Object* prototype, Map* map);
 
   static const int kMaxPreAllocatedPropertyFields = 255;
 
@@ -4889,7 +4342,7 @@
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);
 
   // Layout of pointer fields. Heap iteration code relies on them
-  // being continuously allocated.
+  // being continiously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
   static const int kPointerFieldsEndOffset =
       Map::kPrototypeTransitionsOffset + kPointerSize;
@@ -4929,7 +4382,7 @@
   static const int kStringWrapperSafeForDefaultValueOf = 2;
   static const int kAttachedToSharedFunctionInfo = 3;
   // No bits can be used after kElementsKindFirstBit, they are all reserved for
-  // storing ElementKind.
+  // storing ElementKind.  for anything other than storing the ElementKind.
   static const int kElementsKindShift = 4;
   static const int kElementsKindBitCount = 4;
 
@@ -4938,9 +4391,6 @@
       ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
   static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
       (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
-  static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
-      static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
-                          Map::kElementsKindShift) - 1;
 
   // Bit positions for bit field 3
   static const int kIsShared = 0;
@@ -4955,7 +4405,6 @@
                               kSize> BodyDescriptor;
 
  private:
-  String* elements_transition_sentinel_name();
   DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
 };
 
@@ -4986,12 +4435,6 @@
     COMPILATION_TYPE_EVAL = 1
   };
 
-  // Script compilation state.
-  enum CompilationState {
-    COMPILATION_STATE_INITIAL = 0,
-    COMPILATION_STATE_COMPILED = 1
-  };
-
   // [source]: the script source.
   DECL_ACCESSORS(source, Object)
 
@@ -5023,9 +4466,6 @@
   // [compilation]: how the the script was compiled.
   DECL_ACCESSORS(compilation_type, Smi)
 
-  // [is_compiled]: determines whether the script has already been compiled.
-  DECL_ACCESSORS(compilation_state, Smi)
-
   // [line_ends]: FixedArray of line ends positions.
   DECL_ACCESSORS(line_ends, Object)
 
@@ -5062,9 +4502,7 @@
   static const int kWrapperOffset = kContextOffset + kPointerSize;
   static const int kTypeOffset = kWrapperOffset + kPointerSize;
   static const int kCompilationTypeOffset = kTypeOffset + kPointerSize;
-  static const int kCompilationStateOffset =
-      kCompilationTypeOffset + kPointerSize;
-  static const int kLineEndsOffset = kCompilationStateOffset + kPointerSize;
+  static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize;
   static const int kIdOffset = kLineEndsOffset + kPointerSize;
   static const int kEvalFromSharedOffset = kIdOffset + kPointerSize;
   static const int kEvalFrominstructionsOffsetOffset =
@@ -5108,10 +4546,7 @@
   V(Math, atan, MathATan)                           \
   V(Math, exp, MathExp)                             \
   V(Math, sqrt, MathSqrt)                           \
-  V(Math, pow, MathPow)                             \
-  V(Math, random, MathRandom)                       \
-  V(Math, max, MathMax)                             \
-  V(Math, min, MathMin)
+  V(Math, pow, MathPow)
 
 
 enum BuiltinFunctionId {
@@ -5137,7 +4572,7 @@
   DECL_ACCESSORS(code, Code)
 
   // [scope_info]: Scope info.
-  DECL_ACCESSORS(scope_info, ScopeInfo)
+  DECL_ACCESSORS(scope_info, SerializedScopeInfo)
 
   // [construct stub]: Code stub for constructing instances of this function.
   DECL_ACCESSORS(construct_stub, Code)
@@ -5325,14 +4760,8 @@
 
   // A counter used to determine when to stress the deoptimizer with a
   // deopt.
-  inline int deopt_counter();
-  inline void set_deopt_counter(int counter);
-
-  inline int profiler_ticks();
-  inline void set_profiler_ticks(int ticks);
-
-  inline int ast_node_count();
-  inline void set_ast_node_count(int count);
+  inline Smi* deopt_counter();
+  inline void set_deopt_counter(Smi* counter);
 
   // Add information on assignments of the form this.x = ...;
   void SetThisPropertyAssignmentsInfo(
@@ -5365,20 +4794,8 @@
   // spending time attempting to optimize it again.
   DECL_BOOLEAN_ACCESSORS(optimization_disabled)
 
-  // Indicates the language mode of the function's code as defined by the
-  // current harmony drafts for the next ES language standard. Possible
-  // values are:
-  // 1. CLASSIC_MODE - Unrestricted syntax and semantics, same as in ES5.
-  // 2. STRICT_MODE - Restricted syntax and semantics, same as in ES5.
-  // 3. EXTENDED_MODE - Only available under the harmony flag, not part of ES5.
-  inline LanguageMode language_mode();
-  inline void set_language_mode(LanguageMode language_mode);
-
-  // Indicates whether the language mode of this function is CLASSIC_MODE.
-  inline bool is_classic_mode();
-
-  // Indicates whether the language mode of this function is EXTENDED_MODE.
-  inline bool is_extended_mode();
+  // Indicates whether the function is a strict mode function.
+  DECL_BOOLEAN_ACCESSORS(strict_mode)
 
   // False if the function definitely does not allocate an arguments object.
   DECL_BOOLEAN_ACCESSORS(uses_arguments)
@@ -5406,15 +4823,6 @@
   // through the API, which does not change this flag).
   DECL_BOOLEAN_ACCESSORS(is_anonymous)
 
-  // Is this a function or top-level/eval code.
-  DECL_BOOLEAN_ACCESSORS(is_function)
-
-  // Indicates that the function cannot be optimized.
-  DECL_BOOLEAN_ACCESSORS(dont_optimize)
-
-  // Indicates that the function cannot be inlined.
-  DECL_BOOLEAN_ACCESSORS(dont_inline)
-
   // Indicates whether or not the code in the shared function support
   // deoptimization.
   inline bool has_deoptimization_support();
@@ -5423,8 +4831,9 @@
   void EnableDeoptimizationSupport(Code* recompiled);
 
   // Disable (further) attempted optimization of all functions sharing this
-  // shared function info.
-  void DisableOptimization();
+  // shared function info.  The function is the one we actually tried to
+  // optimize.
+  void DisableOptimization(JSFunction* function);
 
   // Lookup the bailout ID and ASSERT that it exists in the non-optimized
   // code, returns whether it asserted (i.e., always true if assertions are
@@ -5451,7 +4860,7 @@
 
   // [source code]: Source code for the function.
   bool HasSourceCode();
-  Handle<Object> GetSourceCode();
+  Object* GetSourceCode();
 
   inline int opt_count();
   inline void set_opt_count(int opt_count);
@@ -5478,13 +4887,6 @@
   void SharedFunctionInfoVerify();
 #endif
 
-  // Helpers to compile the shared code.  Returns true on success, false on
-  // failure (e.g., stack overflow during compilation).
-  static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                             ClearExceptionFlag flag);
-  static bool CompileLazy(Handle<SharedFunctionInfo> shared,
-                          ClearExceptionFlag flag);
-
   // Casting.
   static inline SharedFunctionInfo* cast(Object* obj);
 
@@ -5508,12 +4910,12 @@
       kInferredNameOffset + kPointerSize;
   static const int kThisPropertyAssignmentsOffset =
       kInitialMapOffset + kPointerSize;
-  static const int kProfilerTicksOffset =
+  static const int kDeoptCounterOffset =
       kThisPropertyAssignmentsOffset + kPointerSize;
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
   static const int kLengthOffset =
-      kProfilerTicksOffset + kPointerSize;
+      kDeoptCounterOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kPointerSize;
@@ -5531,11 +4933,8 @@
       kCompilerHintsOffset + kPointerSize;
   static const int kOptCountOffset =
       kThisPropertyAssignmentsCountOffset + kPointerSize;
-  static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
-  static const int kDeoptCounterOffset =
-      kAstNodeCountOffset + kPointerSize;
   // Total size.
-  static const int kSize = kDeoptCounterOffset + kPointerSize;
+  static const int kSize = kOptCountOffset + kPointerSize;
 #else
   // The only reason to use smi fields instead of int fields
   // is to allow iteration without maps decoding during
@@ -5547,7 +4946,7 @@
   // word is not set and thus this word cannot be treated as pointer
   // to HeapObject during old space traversal.
   static const int kLengthOffset =
-      kProfilerTicksOffset + kPointerSize;
+      kDeoptCounterOffset + kPointerSize;
   static const int kFormalParameterCountOffset =
       kLengthOffset + kIntSize;
 
@@ -5571,11 +4970,8 @@
   static const int kOptCountOffset =
       kThisPropertyAssignmentsCountOffset + kIntSize;
 
-  static const int kAstNodeCountOffset = kOptCountOffset + kIntSize;
-  static const int kDeoptCounterOffset = kAstNodeCountOffset + kIntSize;
-
   // Total size.
-  static const int kSize = kDeoptCounterOffset + kIntSize;
+  static const int kSize = kOptCountOffset + kIntSize;
 
 #endif
 
@@ -5615,16 +5011,12 @@
     kCodeAgeShift,
     kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
     kStrictModeFunction,
-    kExtendedModeFunction,
     kUsesArguments,
     kHasDuplicateParameters,
     kNative,
     kBoundFunction,
     kIsAnonymous,
     kNameShouldPrintAsAnonymous,
-    kIsFunction,
-    kDontOptimize,
-    kDontInline,
     kCompilerHintsCount  // Pseudo entry
   };
 
@@ -5645,30 +5037,22 @@
  public:
   // Constants for optimizing codegen for strict mode function and
   // native tests.
-  // Allows to use byte-width instructions.
+  // Allows to use byte-widgh instructions.
   static const int kStrictModeBitWithinByte =
       (kStrictModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
-  static const int kExtendedModeBitWithinByte =
-      (kExtendedModeFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
-
   static const int kNativeBitWithinByte =
       (kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
-  static const int kExtendedModeByteOffset = kCompilerHintsOffset +
-      (kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
   static const int kNativeByteOffset = kCompilerHintsOffset +
       (kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
 #elif __BYTE_ORDER == __BIG_ENDIAN
   static const int kStrictModeByteOffset = kCompilerHintsOffset +
       (kCompilerHintsSize - 1) -
       ((kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
-  static const int kExtendedModeByteOffset = kCompilerHintsOffset +
-      (kCompilerHintsSize - 1) -
-      ((kExtendedModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
   static const int kNativeByteOffset = kCompilerHintsOffset +
       (kCompilerHintsSize - 1) -
       ((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
@@ -5724,14 +5108,6 @@
   // recompiled the next time it is executed.
   void MarkForLazyRecompilation();
 
-  // Helpers to compile this function.  Returns true on success, false on
-  // failure (e.g., stack overflow during compilation).
-  static bool CompileLazy(Handle<JSFunction> function,
-                          ClearExceptionFlag flag);
-  static bool CompileOptimized(Handle<JSFunction> function,
-                               int osr_ast_id,
-                               ClearExceptionFlag flag);
-
   // Tells whether or not the function is already marked for lazy
   // recompilation.
   inline bool IsMarkedForLazyRecompilation();
@@ -5739,8 +5115,7 @@
   // Check whether or not this function is inlineable.
   bool IsInlineable();
 
-  // [literals_or_bindings]: Fixed array holding either
-  // the materialized literals or the bindings of a bound function.
+  // [literals]: Fixed array holding the materialized literals.
   //
   // If the function contains object, regexp or array literals, the
   // literals array prefix contains the object, regexp, and array
@@ -5749,23 +5124,11 @@
   // or array functions.  Performing a dynamic lookup, we might end up
   // using the functions from a new context that we should not have
   // access to.
-  //
-  // On bound functions, the array is a (copy-on-write) fixed-array containing
-  // the function that was bound, bound this-value and any bound
-  // arguments. Bound functions never contain literals.
-  DECL_ACCESSORS(literals_or_bindings, FixedArray)
-
-  inline FixedArray* literals();
-  inline void set_literals(FixedArray* literals);
-
-  inline FixedArray* function_bindings();
-  inline void set_function_bindings(FixedArray* bindings);
+  DECL_ACCESSORS(literals, FixedArray)
 
   // The initial map for an object created by this constructor.
   inline Map* initial_map();
   inline void set_initial_map(Map* value);
-  MUST_USE_RESULT inline MaybeObject* set_initial_map_and_cache_transitions(
-      Map* value);
   inline bool has_initial_map();
 
   // Get and set the prototype property on a JSFunction. If the
@@ -5776,7 +5139,7 @@
   inline bool has_instance_prototype();
   inline Object* prototype();
   inline Object* instance_prototype();
-  MUST_USE_RESULT MaybeObject* SetInstancePrototype(Object* value);
+  Object* SetInstancePrototype(Object* value);
   MUST_USE_RESULT MaybeObject* SetPrototype(Object* value);
 
   // After prototype is removed, it will not be created when accessed, and
@@ -5849,11 +5212,6 @@
   static const int kLiteralsPrefixSize = 1;
   static const int kLiteralGlobalContextIndex = 0;
 
-  // Layout of the bound-function binding array.
-  static const int kBoundFunctionIndex = 0;
-  static const int kBoundThisIndex = 1;
-  static const int kBoundArgumentsStartIndex = 2;
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSFunction);
 };
@@ -5898,6 +5256,7 @@
 
 // Forward declaration.
 class JSBuiltinsObject;
+class JSGlobalPropertyCell;
 
 // Common super class for JavaScript global objects and the special
 // builtins global objects.
@@ -5925,11 +5284,6 @@
   }
 
   // Ensure that the global object has a cell for the given property name.
-  static Handle<JSGlobalPropertyCell> EnsurePropertyCell(
-      Handle<GlobalObject> global,
-      Handle<String> name);
-  // TODO(kmillikin): This function can be eliminated once the stub cache is
-  // full handlified (and the static helper can be written directly).
   MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
 
   // Casting.
@@ -5942,6 +5296,8 @@
   static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
 
  private:
+  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
 };
 
@@ -6020,7 +5376,7 @@
 };
 
 
-// Representation for JS Wrapper objects, String, Number, Boolean, etc.
+// Representation for JS Wrapper objects, String, Number, Boolean, Date, etc.
 class JSValue: public JSObject {
  public:
   // [value]: the object being wrapped.
@@ -6049,106 +5405,6 @@
 };
 
 
-class DateCache;
-
-// Representation for JS date objects.
-class JSDate: public JSObject {
- public:
-  // If one component is NaN, all of them are, indicating a NaN time value.
-  // [value]: the time value.
-  DECL_ACCESSORS(value, Object)
-  // [year]: caches year. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(year, Object)
-  // [month]: caches month. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(month, Object)
-  // [day]: caches day. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(day, Object)
-  // [weekday]: caches day of week. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(weekday, Object)
-  // [hour]: caches hours. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(hour, Object)
-  // [min]: caches minutes. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(min, Object)
-  // [sec]: caches seconds. Either undefined, smi, or NaN.
-  DECL_ACCESSORS(sec, Object)
-  // [cache stamp]: sample of the date cache stamp at the
-  // moment when local fields were cached.
-  DECL_ACCESSORS(cache_stamp, Object)
-
-  // Casting.
-  static inline JSDate* cast(Object* obj);
-
-  // Returns the date field with the specified index.
-  // See FieldIndex for the list of date fields.
-  static MaybeObject* GetField(Object* date, Smi* index);
-
-  void SetValue(Object* value, bool is_value_nan);
-
-
-  // Dispatched behavior.
-#ifdef OBJECT_PRINT
-  inline void JSDatePrint() {
-    JSDatePrint(stdout);
-  }
-  void JSDatePrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void JSDateVerify();
-#endif
-  // The order is important. It must be kept in sync with date macros
-  // in macros.py.
-  enum FieldIndex {
-    kDateValue,
-    kYear,
-    kMonth,
-    kDay,
-    kWeekday,
-    kHour,
-    kMinute,
-    kSecond,
-    kFirstUncachedField,
-    kMillisecond = kFirstUncachedField,
-    kDays,
-    kTimeInDay,
-    kFirstUTCField,
-    kYearUTC = kFirstUTCField,
-    kMonthUTC,
-    kDayUTC,
-    kWeekdayUTC,
-    kHourUTC,
-    kMinuteUTC,
-    kSecondUTC,
-    kMillisecondUTC,
-    kDaysUTC,
-    kTimeInDayUTC,
-    kTimezoneOffset
-  };
-
-  // Layout description.
-  static const int kValueOffset = JSObject::kHeaderSize;
-  static const int kYearOffset = kValueOffset + kPointerSize;
-  static const int kMonthOffset = kYearOffset + kPointerSize;
-  static const int kDayOffset = kMonthOffset + kPointerSize;
-  static const int kWeekdayOffset = kDayOffset + kPointerSize;
-  static const int kHourOffset = kWeekdayOffset  + kPointerSize;
-  static const int kMinOffset = kHourOffset + kPointerSize;
-  static const int kSecOffset = kMinOffset + kPointerSize;
-  static const int kCacheStampOffset = kSecOffset + kPointerSize;
-  static const int kSize = kCacheStampOffset + kPointerSize;
-
- private:
-  inline Object* DoGetField(FieldIndex index);
-
-  Object* GetUTCField(FieldIndex index, double value, DateCache* date_cache);
-
-  // Computes and caches the cacheable fields of the date.
-  inline void SetLocalFields(int64_t local_time_ms, DateCache* date_cache);
-
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(JSDate);
-};
-
-
 // Representation of message objects used for error reporting through
 // the API. The messages are formatted in JavaScript so this object is
 // a real JavaScript object. The information used for formatting the
@@ -6384,19 +5640,13 @@
  public:
   // Find cached value for a string key, otherwise return null.
   Object* Lookup(String* src);
-  Object* LookupEval(String* src,
-                     Context* context,
-                     LanguageMode language_mode,
-                     int scope_position);
+  Object* LookupEval(String* src, Context* context, StrictModeFlag strict_mode);
   Object* LookupRegExp(String* source, JSRegExp::Flags flags);
-  MUST_USE_RESULT MaybeObject* Put(String* src, Object* value);
-  MUST_USE_RESULT MaybeObject* PutEval(String* src,
-                                       Context* context,
-                                       SharedFunctionInfo* value,
-                                       int scope_position);
-  MUST_USE_RESULT MaybeObject* PutRegExp(String* src,
-                                         JSRegExp::Flags flags,
-                                         FixedArray* value);
+  MaybeObject* Put(String* src, Object* value);
+  MaybeObject* PutEval(String* src,
+                       Context* context,
+                       SharedFunctionInfo* value);
+  MaybeObject* PutRegExp(String* src, JSRegExp::Flags flags, FixedArray* value);
 
   // Remove given value from cache.
   void Remove(Object* value);
@@ -6508,17 +5758,10 @@
  public:
   DECL_ACCESSORS(cache, Object)
 
-  static void Update(Handle<PolymorphicCodeCache> cache,
-                     MapHandleList* maps,
-                     Code::Flags flags,
-                     Handle<Code> code);
-
-  MUST_USE_RESULT MaybeObject* Update(MapHandleList* maps,
+  MUST_USE_RESULT MaybeObject* Update(MapList* maps,
                                       Code::Flags flags,
                                       Code* code);
-
-  // Returns an undefined value if the entry is not found.
-  Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
+  Object* Lookup(MapList* maps, Code::Flags flags);
 
   static inline PolymorphicCodeCache* cast(Object* obj);
 
@@ -6543,11 +5786,8 @@
 class PolymorphicCodeCacheHashTable
     : public HashTable<CodeCacheHashTableShape, HashTableKey*> {
  public:
-  Object* Lookup(MapHandleList* maps, int code_kind);
-
-  MUST_USE_RESULT MaybeObject* Put(MapHandleList* maps,
-                                   int code_kind,
-                                   Code* code);
+  Object* Lookup(MapList* maps, int code_kind);
+  MUST_USE_RESULT MaybeObject* Put(MapList* maps, int code_kind, Code* code);
 
   static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
 
@@ -6557,73 +5797,6 @@
 };
 
 
-class TypeFeedbackInfo: public Struct {
- public:
-  inline int ic_total_count();
-  inline void set_ic_total_count(int count);
-
-  inline int ic_with_typeinfo_count();
-  inline void set_ic_with_typeinfo_count(int count);
-
-  DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
-
-  static inline TypeFeedbackInfo* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
-  inline void TypeFeedbackInfoPrint() {
-    TypeFeedbackInfoPrint(stdout);
-  }
-  void TypeFeedbackInfoPrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void TypeFeedbackInfoVerify();
-#endif
-
-  static const int kIcTotalCountOffset = HeapObject::kHeaderSize;
-  static const int kIcWithTypeinfoCountOffset =
-      kIcTotalCountOffset + kPointerSize;
-  static const int kTypeFeedbackCellsOffset =
-      kIcWithTypeinfoCountOffset + kPointerSize;
-  static const int kSize = kTypeFeedbackCellsOffset + kPointerSize;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
-};
-
-
-// Representation of a slow alias as part of a non-strict arguments objects.
-// For fast aliases (if HasNonStrictArgumentsElements()):
-// - the parameter map contains an index into the context
-// - all attributes of the element have default values
-// For slow aliases (if HasDictionaryArgumentsElements()):
-// - the parameter map contains no fast alias mapping (i.e. the hole)
-// - this struct (in the slow backing store) contains an index into the context
-// - all attributes are available as part if the property details
-class AliasedArgumentsEntry: public Struct {
- public:
-  inline int aliased_context_slot();
-  inline void set_aliased_context_slot(int count);
-
-  static inline AliasedArgumentsEntry* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
-  inline void AliasedArgumentsEntryPrint() {
-    AliasedArgumentsEntryPrint(stdout);
-  }
-  void AliasedArgumentsEntryPrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void AliasedArgumentsEntryVerify();
-#endif
-
-  static const int kAliasedContextSlot = HeapObject::kHeaderSize;
-  static const int kSize = kAliasedContextSlot + kPointerSize;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
-};
-
-
 enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
 enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
 
@@ -6637,17 +5810,12 @@
   inline bool has_trivial_hash();
 
   // Add a character to the hash and update the array index calculation.
-  inline void AddCharacter(uint32_t c);
+  inline void AddCharacter(uc32 c);
 
   // Adds a character to the hash but does not update the array index
   // calculation.  This can only be called when it has been verified
   // that the input is not an array index.
-  inline void AddCharacterNoIndex(uint32_t c);
-
-  // Add a character above 0xffff as a surrogate pair.  These can get into
-  // the hasher through the routines that take a UTF-8 string and make a symbol.
-  void AddSurrogatePair(uc32 c);
-  void AddSurrogatePairNoIndex(uc32 c);
+  inline void AddCharacterNoIndex(uc32 c);
 
   // Returns the value to store in the hash field of a string with
   // the given length and contents.
@@ -6865,7 +6033,7 @@
   inline String* GetUnderlying();
 
   // Mark the string as an undetectable object. It only applies to
-  // ASCII and two byte string types.
+  // ascii and two byte string types.
   bool MarkAsUndetectable();
 
   // Return a substring.
@@ -6897,6 +6065,8 @@
       RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL,
       int* length_output = 0);
 
+  int Utf8Length();
+
   // Return a 16 bit Unicode representation of the string.
   // The string should be nearly flat, otherwise the performance of
   // of this method may be very bad.  Setting robustness_flag to
@@ -6959,10 +6129,13 @@
   // value into an array index.
   static const int kMaxArrayIndexSize = 10;
 
-  // Max ASCII char code.
+  // Max ascii char code.
   static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
   static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
-  static const int kMaxUtf16CodeUnit = 0xffff;
+  static const int kMaxUC16CharCode = 0xffff;
+
+  // Minimum length for a cons string.
+  static const int kMinNonFlatLength = 13;
 
   // Mask constant for checking if a string has a computed hash code
   // and if it is an array index.  The least significant bit indicates
@@ -7134,16 +6307,13 @@
   // Casting.
   static inline SeqString* cast(Object* obj);
 
-  // Layout description.
-  static const int kHeaderSize = String::kSize;
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
 };
 
 
-// The AsciiString class captures sequential ASCII string objects.
-// Each character in the AsciiString is an ASCII character.
+// The AsciiString class captures sequential ascii string objects.
+// Each character in the AsciiString is an ascii character.
 class SeqAsciiString: public SeqString {
  public:
   static const bool kHasAsciiEncoding = true;
@@ -7170,8 +6340,12 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
   }
 
+  // Layout description.
+  static const int kHeaderSize = String::kSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+
   // Maximal memory usage for a single sequential ASCII string.
-  static const int kMaxSize = 512 * MB - 1;
+  static const int kMaxSize = 512 * MB;
   // Maximal length of a single sequential ASCII string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize);
@@ -7220,8 +6394,12 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
   }
 
+  // Layout description.
+  static const int kHeaderSize = String::kSize;
+  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+
   // Maximal memory usage for a single sequential two-byte string.
-  static const int kMaxSize = 512 * MB - 1;
+  static const int kMaxSize = 512 * MB;
   // Maximal length of a single sequential two-byte string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
@@ -7365,12 +6543,7 @@
 
   // Layout description.
   static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
-  static const int kShortSize = kResourceOffset + kPointerSize;
-  static const int kResourceDataOffset = kResourceOffset + kPointerSize;
-  static const int kSize = kResourceDataOffset + kPointerSize;
-
-  // Return whether external string is short (data pointer is not cached).
-  inline bool is_short();
+  static const int kSize = kResourceOffset + kPointerSize;
 
   STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
 
@@ -7388,19 +6561,11 @@
   typedef v8::String::ExternalAsciiStringResource Resource;
 
   // The underlying resource.
-  inline const Resource* resource();
-  inline void set_resource(const Resource* buffer);
-
-  // Update the pointer cache to the external character array.
-  // The cached pointer is always valid, as the external character array does =
-  // not move during lifetime.  Deserialization is the only exception, after
-  // which the pointer cache has to be refreshed.
-  inline void update_data_cache();
-
-  inline const char* GetChars();
+  inline Resource* resource();
+  inline void set_resource(Resource* buffer);
 
   // Dispatched behavior.
-  inline uint16_t ExternalAsciiStringGet(int index);
+  uint16_t ExternalAsciiStringGet(int index);
 
   // Casting.
   static inline ExternalAsciiString* cast(Object* obj);
@@ -7433,22 +6598,14 @@
   typedef v8::String::ExternalStringResource Resource;
 
   // The underlying string resource.
-  inline const Resource* resource();
-  inline void set_resource(const Resource* buffer);
-
-  // Update the pointer cache to the external character array.
-  // The cached pointer is always valid, as the external character array does =
-  // not move during lifetime.  Deserialization is the only exception, after
-  // which the pointer cache has to be refreshed.
-  inline void update_data_cache();
-
-  inline const uint16_t* GetChars();
+  inline Resource* resource();
+  inline void set_resource(Resource* buffer);
 
   // Dispatched behavior.
-  inline uint16_t ExternalTwoByteStringGet(int index);
+  uint16_t ExternalTwoByteStringGet(int index);
 
   // For regexp code.
-  inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+  const uint16_t* ExternalTwoByteStringGetData(unsigned start);
 
   // Casting.
   static inline ExternalTwoByteString* cast(Object* obj);
@@ -7628,6 +6785,10 @@
                               kValueOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
+  // Returns the isolate/heap this cell object belongs to.
+  inline Isolate* isolate();
+  inline Heap* heap();
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
 };
@@ -7639,56 +6800,25 @@
   // [handler]: The handler property.
   DECL_ACCESSORS(handler, Object)
 
-  // [hash]: The hash code property (undefined if not initialized yet).
-  DECL_ACCESSORS(hash, Object)
-
   // Casting.
   static inline JSProxy* cast(Object* obj);
 
   bool HasPropertyWithHandler(String* name);
-  bool HasElementWithHandler(uint32_t index);
-
-  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
-      Object* receiver,
-      String* name);
-  MUST_USE_RESULT MaybeObject* GetElementWithHandler(
-      Object* receiver,
-      uint32_t index);
 
   MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
       String* name,
       Object* value,
       PropertyAttributes attributes,
       StrictModeFlag strict_mode);
-  MUST_USE_RESULT MaybeObject* SetElementWithHandler(
-      uint32_t index,
-      Object* value,
-      StrictModeFlag strict_mode);
-
-  // If the handler defines an accessor property, invoke its setter
-  // (or throw if only a getter exists) and set *found to true. Otherwise false.
-  MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
-      String* name,
-      Object* value,
-      PropertyAttributes attributes,
-      StrictModeFlag strict_mode,
-      bool* found);
 
   MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
       String* name,
       DeleteMode mode);
-  MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
-      uint32_t index,
-      DeleteMode mode);
 
   MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
       JSReceiver* receiver,
-      String* name);
-  MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
-      JSReceiver* receiver,
-      uint32_t index);
-
-  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+      String* name,
+      bool* has_exception);
 
   // Turn this into an (empty) JSObject.
   void Fix();
@@ -7696,13 +6826,6 @@
   // Initializes the body after the handler slot.
   inline void InitializeBody(int object_size, Object* value);
 
-  // Invoke a trap by name. If the trap does not exist on this's handler,
-  // but derived_trap is non-NULL, invoke that instead.  May cause GC.
-  Handle<Object> CallTrap(const char* name,
-                          Handle<Object> derived_trap,
-                          int argc,
-                          Handle<Object> args[]);
-
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
   inline void JSProxyPrint() {
@@ -7718,8 +6841,7 @@
   // size as a virgin JSObject. This is essential for becoming a JSObject
   // upon freeze.
   static const int kHandlerOffset = HeapObject::kHeaderSize;
-  static const int kHashOffset = kHandlerOffset + kPointerSize;
-  static const int kPaddingOffset = kHashOffset + kPointerSize;
+  static const int kPaddingOffset = kHandlerOffset + kPointerSize;
   static const int kSize = JSObject::kHeaderSize;
   static const int kHeaderSize = kPaddingOffset;
   static const int kPaddingSize = kSize - kPaddingOffset;
@@ -7727,7 +6849,7 @@
   STATIC_CHECK(kPaddingSize >= 0);
 
   typedef FixedBodyDescriptor<kHandlerOffset,
-                              kPaddingOffset,
+                              kHandlerOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
  private:
@@ -7758,7 +6880,7 @@
 #endif
 
   // Layout description.
-  static const int kCallTrapOffset = JSProxy::kPaddingOffset;
+  static const int kCallTrapOffset = kHandlerOffset + kPointerSize;
   static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
   static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
   static const int kSize = JSFunction::kSize;
@@ -7775,69 +6897,18 @@
 };
 
 
-// The JSSet describes EcmaScript Harmony sets
-class JSSet: public JSObject {
- public:
-  // [set]: the backing hash set containing keys.
-  DECL_ACCESSORS(table, Object)
-
-  // Casting.
-  static inline JSSet* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
-  inline void JSSetPrint() {
-    JSSetPrint(stdout);
-  }
-  void JSSetPrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void JSSetVerify();
-#endif
-
-  static const int kTableOffset = JSObject::kHeaderSize;
-  static const int kSize = kTableOffset + kPointerSize;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
-};
-
-
-// The JSMap describes EcmaScript Harmony maps
-class JSMap: public JSObject {
- public:
-  // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, Object)
-
-  // Casting.
-  static inline JSMap* cast(Object* obj);
-
-#ifdef OBJECT_PRINT
-  inline void JSMapPrint() {
-    JSMapPrint(stdout);
-  }
-  void JSMapPrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void JSMapVerify();
-#endif
-
-  static const int kTableOffset = JSObject::kHeaderSize;
-  static const int kSize = kTableOffset + kPointerSize;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
-};
-
-
 // The JSWeakMap describes EcmaScript Harmony weak maps
 class JSWeakMap: public JSObject {
  public:
   // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, Object)
+  DECL_ACCESSORS(table, ObjectHashTable)
 
   // [next]: linked list of encountered weak maps during GC.
   DECL_ACCESSORS(next, Object)
 
+  // Unchecked accessors to be used during GC.
+  inline ObjectHashTable* unchecked_table();
+
   // Casting.
   static inline JSWeakMap* cast(Object* obj);
 
@@ -7866,8 +6937,8 @@
 class Foreign: public HeapObject {
  public:
   // [address]: field containing the address.
-  inline Address foreign_address();
-  inline void set_foreign_address(Address value);
+  inline Address address();
+  inline void set_address(Address value);
 
   // Casting.
   static inline Foreign* cast(Object* obj);
@@ -7890,10 +6961,10 @@
 
   // Layout description.
 
-  static const int kForeignAddressOffset = HeapObject::kHeaderSize;
-  static const int kSize = kForeignAddressOffset + kPointerSize;
+  static const int kAddressOffset = HeapObject::kHeaderSize;
+  static const int kSize = kAddressOffset + kPointerSize;
 
-  STATIC_CHECK(kForeignAddressOffset == Internals::kForeignAddressOffset);
+  STATIC_CHECK(kAddressOffset == Internals::kForeignAddressOffset);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Foreign);
@@ -7922,12 +6993,8 @@
   // capacity is non-zero.
   MUST_USE_RESULT MaybeObject* Initialize(int capacity);
 
-  // Initializes the array to a certain length.
-  inline bool AllowsSetElementsLength();
-  MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
-
   // Set the content of the array to the content of storage.
-  MUST_USE_RESULT inline MaybeObject* SetContent(FixedArrayBase* storage);
+  inline void SetContent(FixedArray* storage);
 
   // Casting.
   static inline JSArray* cast(Object* obj);
@@ -8042,59 +7109,6 @@
 };
 
 
-// Support for JavaScript accessors: A pair of a getter and a setter. Each
-// accessor can either be
-//   * a pointer to a JavaScript function or proxy: a real accessor
-//   * undefined: considered an accessor by the spec, too, strangely enough
-//   * the hole: an accessor which has not been set
-//   * a pointer to a map: a transition used to ensure map sharing
-class AccessorPair: public Struct {
- public:
-  DECL_ACCESSORS(getter, Object)
-  DECL_ACCESSORS(setter, Object)
-
-  static inline AccessorPair* cast(Object* obj);
-
-  MUST_USE_RESULT MaybeObject* CopyWithoutTransitions();
-
-  // Note: Returns undefined instead in case of a hole.
-  Object* GetComponent(AccessorComponent component);
-
-  // Set both components, skipping arguments which are a JavaScript null.
-  void SetComponents(Object* getter, Object* setter) {
-    if (!getter->IsNull()) set_getter(getter);
-    if (!setter->IsNull()) set_setter(setter);
-  }
-
-  bool ContainsAccessor() {
-    return IsJSAccessor(getter()) || IsJSAccessor(setter());
-  }
-
-#ifdef OBJECT_PRINT
-  void AccessorPairPrint(FILE* out = stdout);
-#endif
-#ifdef DEBUG
-  void AccessorPairVerify();
-#endif
-
-  static const int kGetterOffset = HeapObject::kHeaderSize;
-  static const int kSetterOffset = kGetterOffset + kPointerSize;
-  static const int kSize = kSetterOffset + kPointerSize;
-
- private:
-  // Strangely enough, in addition to functions and harmony proxies, the spec
-  // requires us to consider undefined as a kind of accessor, too:
-  //    var obj = {};
-  //    Object.defineProperty(obj, "foo", {get: undefined});
-  //    assertTrue("foo" in obj);
-  bool IsJSAccessor(Object* obj) {
-    return obj->IsSpecFunction() || obj->IsUndefined();
-  }
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(AccessorPair);
-};
-
-
 class AccessCheckInfo: public Struct {
  public:
   DECL_ACCESSORS(named_callback, Object)
@@ -8195,8 +7209,8 @@
   static const int kTagOffset          = HeapObject::kHeaderSize;
   static const int kPropertyListOffset = kTagOffset + kPointerSize;
   static const int kHeaderSize         = kPropertyListOffset + kPointerSize;
-
- private:
+ protected:
+  friend class AGCCVersionRequiresThisClassToHaveAFriendSoHereItIs;
   DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
 };
 
@@ -8462,34 +7476,6 @@
 #undef DECL_BOOLEAN_ACCESSORS
 #undef DECL_ACCESSORS
 
-#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V)                            \
-  V(kSymbolTable, "symbol_table", "(Symbols)")                          \
-  V(kExternalStringsTable, "external_strings_table", "(External strings)") \
-  V(kStrongRootList, "strong_root_list", "(Strong roots)")              \
-  V(kSymbol, "symbol", "(Symbol)")                                      \
-  V(kBootstrapper, "bootstrapper", "(Bootstrapper)")                    \
-  V(kTop, "top", "(Isolate)")                                           \
-  V(kRelocatable, "relocatable", "(Relocatable)")                       \
-  V(kDebug, "debug", "(Debugger)")                                      \
-  V(kCompilationCache, "compilationcache", "(Compilation cache)")       \
-  V(kHandleScope, "handlescope", "(Handle scope)")                      \
-  V(kBuiltins, "builtins", "(Builtins)")                                \
-  V(kGlobalHandles, "globalhandles", "(Global handles)")                \
-  V(kThreadManager, "threadmanager", "(Thread manager)")                \
-  V(kExtensions, "Extensions", "(Extensions)")
-
-class VisitorSynchronization : public AllStatic {
- public:
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
-  enum SyncTag {
-    VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
-    kNumberOfSyncTags
-  };
-#undef DECLARE_ENUM
-
-  static const char* const kTags[kNumberOfSyncTags];
-  static const char* const kTagNames[kNumberOfSyncTags];
-};
 
 // Abstract base class for visiting, and optionally modifying, the
 // pointers contained in Objects. Used in GC and serialization/deserialization.
@@ -8528,16 +7514,11 @@
   // Handy shorthand for visiting a single pointer.
   virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
 
-  // Visit pointer embedded into a code object.
-  virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
-
   // Visits a contiguous arrays of external references (references to the C++
   // heap) in the half-open range [start, end). Any or all of the values
   // may be modified on return.
   virtual void VisitExternalReferences(Address* start, Address* end) {}
 
-  virtual void VisitExternalReference(RelocInfo* rinfo);
-
   inline void VisitExternalReference(Address* p) {
     VisitExternalReferences(p, p + 1);
   }
@@ -8545,10 +7526,13 @@
   // Visits a handle that has an embedder-assigned class ID.
   virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
 
+#ifdef DEBUG
   // Intended for serialization/deserialization checking: insert, or
   // check for the presence of, a tag at this position in the stream.
-  // Also used for marking up GC roots in heap snapshots.
-  virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
+  virtual void Synchronize(const char* tag) {}
+#else
+  inline void Synchronize(const char* tag) {}
+#endif
 };
 
 
diff --git a/src/once.cc b/src/once.cc
deleted file mode 100644
index 37fe369..0000000
--- a/src/once.cc
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "once.h"
-
-#ifdef _WIN32
-#include <windows.h>
-#else
-#include <sched.h>
-#endif
-
-#include "atomicops.h"
-#include "checks.h"
-
-namespace v8 {
-namespace internal {
-
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
-  AtomicWord state = Acquire_Load(once);
-  // Fast path. The provided function was already executed.
-  if (state == ONCE_STATE_DONE) {
-    return;
-  }
-
-  // The function execution did not complete yet. The once object can be in one
-  // of the two following states:
-  //   - UNINITIALIZED: We are the first thread calling this function.
-  //   - EXECUTING_FUNCTION: Another thread is already executing the function.
-  //
-  // First, try to change the state from UNINITIALIZED to EXECUTING_FUNCTION
-  // atomically.
-  state = Acquire_CompareAndSwap(
-      once, ONCE_STATE_UNINITIALIZED, ONCE_STATE_EXECUTING_FUNCTION);
-  if (state == ONCE_STATE_UNINITIALIZED) {
-    // We are the first thread to call this function, so we have to call the
-    // function.
-    init_func(arg);
-    Release_Store(once, ONCE_STATE_DONE);
-  } else {
-    // Another thread has already started executing the function. We need to
-    // wait until it completes the initialization.
-    while (state == ONCE_STATE_EXECUTING_FUNCTION) {
-#ifdef _WIN32
-      ::Sleep(0);
-#else
-      sched_yield();
-#endif
-      state = Acquire_Load(once);
-    }
-  }
-}
-
-} }  // namespace v8::internal
diff --git a/src/once.h b/src/once.h
deleted file mode 100644
index a44b8fa..0000000
--- a/src/once.h
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// emulates google3/base/once.h
-//
-// This header is intended to be included only by v8's internal code. Users
-// should not use this directly.
-//
-// This is basically a portable version of pthread_once().
-//
-// This header declares:
-// * A type called OnceType.
-// * A macro V8_DECLARE_ONCE() which declares a (global) variable of type
-//   OnceType.
-// * A function CallOnce(OnceType* once, void (*init_func)()).
-//   This function, when invoked multiple times given the same OnceType object,
-//   will invoke init_func on the first call only, and will make sure none of
-//   the calls return before that first call to init_func has finished.
-//
-// Additionally, the following features are supported:
-// * A macro V8_ONCE_INIT which is expanded into the expression used to
-//   initialize a OnceType. This is only useful when clients embed a OnceType
-//   into a structure of their own and want to initialize it statically.
-// * The user can provide a parameter which CallOnce() forwards to the
-//   user-provided function when it is called. Usage example:
-//     CallOnce(&my_once, &MyFunctionExpectingIntArgument, 10);
-// * This implementation guarantees that OnceType is a POD (i.e. no static
-//   initializer generated).
-//
-// This implements a way to perform lazy initialization.  It's more efficient
-// than using mutexes as no lock is needed if initialization has already
-// happened.
-//
-// Example usage:
-//   void Init();
-//   V8_DECLARE_ONCE(once_init);
-//
-//   // Calls Init() exactly once.
-//   void InitOnce() {
-//     CallOnce(&once_init, &Init);
-//   }
-//
-// Note that if CallOnce() is called before main() has begun, it must
-// only be called by the thread that will eventually call main() -- that is,
-// the thread that performs dynamic initialization.  In general this is a safe
-// assumption since people don't usually construct threads before main() starts,
-// but it is technically not guaranteed.  Unfortunately, Win32 provides no way
-// whatsoever to statically-initialize its synchronization primitives, so our
-// only choice is to assume that dynamic initialization is single-threaded.
-
-#ifndef V8_ONCE_H_
-#define V8_ONCE_H_
-
-#include "atomicops.h"
-
-namespace v8 {
-namespace internal {
-
-typedef AtomicWord OnceType;
-
-#define V8_ONCE_INIT 0
-
-#define V8_DECLARE_ONCE(NAME) ::v8::internal::OnceType NAME
-
-enum {
-  ONCE_STATE_UNINITIALIZED = 0,
-  ONCE_STATE_EXECUTING_FUNCTION = 1,
-  ONCE_STATE_DONE = 2
-};
-
-typedef void (*NoArgFunction)();
-typedef void (*PointerArgFunction)(void* arg);
-
-template <typename T>
-struct OneArgFunction {
-  typedef void (*type)(T);
-};
-
-void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg);
-
-inline void CallOnce(OnceType* once, NoArgFunction init_func) {
-  if (Acquire_Load(once) != ONCE_STATE_DONE) {
-    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func), NULL);
-  }
-}
-
-
-template <typename Arg>
-inline void CallOnce(OnceType* once,
-    typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
-  if (Acquire_Load(once) != ONCE_STATE_DONE) {
-    CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
-        static_cast<void*>(arg));
-  }
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_ONCE_H_
diff --git a/src/parser.cc b/src/parser.cc
index da68041..90d5c91 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,7 @@
 #include "v8.h"
 
 #include "api.h"
-#include "ast.h"
+#include "ast-inl.h"
 #include "bootstrapper.h"
 #include "char-predicates-inl.h"
 #include "codegen.h"
@@ -258,7 +258,7 @@
           scanner().literal_ascii_string());
     } else {
       return isolate()->factory()->LookupTwoByteSymbol(
-          scanner().literal_utf16_string());
+          scanner().literal_uc16_string());
     }
   }
   return LookupCachedSymbol(symbol_id);
@@ -279,7 +279,7 @@
           scanner().literal_ascii_string());
     } else {
       result = isolate()->factory()->LookupTwoByteSymbol(
-          scanner().literal_utf16_string());
+          scanner().literal_uc16_string());
     }
     symbol_cache_.at(symbol_id) = result;
     return result;
@@ -407,9 +407,9 @@
 }
 
 
-Scope* Parser::NewScope(Scope* parent, ScopeType type) {
+Scope* Parser::NewScope(Scope* parent, Scope::Type type, bool inside_with) {
   Scope* result = new(zone()) Scope(parent, type);
-  result->Initialize();
+  result->Initialize(inside_with);
   return result;
 }
 
@@ -459,53 +459,90 @@
 
 
 // ----------------------------------------------------------------------------
-// FunctionState and BlockState together implement the parser's scope stack.
-// The parser's current scope is in top_scope_.  The BlockState and
-// FunctionState constructors push on the scope stack and the destructors
-// pop.  They are also used to hold the parser's per-function and per-block
-// state.
+// LexicalScope is a support class to facilitate manipulation of the
+// Parser's scope stack. The constructor sets the parser's top scope
+// to the incoming scope, and the destructor resets it.
+//
+// Additionally, it stores transient information used during parsing.
+// These scopes are not kept around after parsing or referenced by syntax
+// trees so they can be stack-allocated and hence used by the pre-parser.
 
-class Parser::BlockState BASE_EMBEDDED {
+class LexicalScope BASE_EMBEDDED {
  public:
-  BlockState(Parser* parser, Scope* scope)
-      : parser_(parser),
-        outer_scope_(parser->top_scope_) {
-    parser->top_scope_ = scope;
+  LexicalScope(Parser* parser, Scope* scope, Isolate* isolate);
+  ~LexicalScope();
+
+  int NextMaterializedLiteralIndex() {
+    int next_index =
+        materialized_literal_count_ + JSFunction::kLiteralsPrefixSize;
+    materialized_literal_count_++;
+    return next_index;
+  }
+  int materialized_literal_count() { return materialized_literal_count_; }
+
+  void SetThisPropertyAssignmentInfo(
+      bool only_simple_this_property_assignments,
+      Handle<FixedArray> this_property_assignments) {
+    only_simple_this_property_assignments_ =
+        only_simple_this_property_assignments;
+    this_property_assignments_ = this_property_assignments;
+  }
+  bool only_simple_this_property_assignments() {
+    return only_simple_this_property_assignments_;
+  }
+  Handle<FixedArray> this_property_assignments() {
+    return this_property_assignments_;
   }
 
-  ~BlockState() { parser_->top_scope_ = outer_scope_; }
+  void AddProperty() { expected_property_count_++; }
+  int expected_property_count() { return expected_property_count_; }
 
  private:
+  // Captures the number of literals that need materialization in the
+  // function.  Includes regexp literals, and boilerplate for object
+  // and array literals.
+  int materialized_literal_count_;
+
+  // Properties count estimation.
+  int expected_property_count_;
+
+  // Keeps track of assignments to properties of this. Used for
+  // optimizing constructors.
+  bool only_simple_this_property_assignments_;
+  Handle<FixedArray> this_property_assignments_;
+
+  // Bookkeeping
   Parser* parser_;
-  Scope* outer_scope_;
+  // Previous values
+  LexicalScope* lexical_scope_parent_;
+  Scope* previous_scope_;
+  int previous_with_nesting_level_;
+  unsigned previous_ast_node_id_;
 };
 
 
-Parser::FunctionState::FunctionState(Parser* parser,
-                                     Scope* scope,
-                                     Isolate* isolate)
-    : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
-      next_handler_index_(0),
-      expected_property_count_(0),
-      only_simple_this_property_assignments_(false),
-      this_property_assignments_(isolate->factory()->empty_fixed_array()),
-      parser_(parser),
-      outer_function_state_(parser->current_function_state_),
-      outer_scope_(parser->top_scope_),
-      saved_ast_node_id_(isolate->ast_node_id()),
-      factory_(isolate) {
+LexicalScope::LexicalScope(Parser* parser, Scope* scope, Isolate* isolate)
+  : materialized_literal_count_(0),
+    expected_property_count_(0),
+    only_simple_this_property_assignments_(false),
+    this_property_assignments_(isolate->factory()->empty_fixed_array()),
+    parser_(parser),
+    lexical_scope_parent_(parser->lexical_scope_),
+    previous_scope_(parser->top_scope_),
+    previous_with_nesting_level_(parser->with_nesting_level_),
+    previous_ast_node_id_(isolate->ast_node_id()) {
   parser->top_scope_ = scope;
-  parser->current_function_state_ = this;
+  parser->lexical_scope_ = this;
+  parser->with_nesting_level_ = 0;
   isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
 }
 
 
-Parser::FunctionState::~FunctionState() {
-  parser_->top_scope_ = outer_scope_;
-  parser_->current_function_state_ = outer_function_state_;
-  if (outer_function_state_ != NULL) {
-    parser_->isolate()->set_ast_node_id(saved_ast_node_id_);
-  }
+LexicalScope::~LexicalScope() {
+  parser_->top_scope_ = previous_scope_;
+  parser_->lexical_scope_ = lexical_scope_parent_;
+  parser_->with_nesting_level_ = previous_with_nesting_level_;
+  parser_->isolate()->set_ast_node_id(previous_ast_node_id_);
 }
 
 
@@ -533,40 +570,34 @@
 // Implementation of Parser
 
 Parser::Parser(Handle<Script> script,
-               int parser_flags,
+               bool allow_natives_syntax,
                v8::Extension* extension,
                ScriptDataImpl* pre_data)
     : isolate_(script->GetIsolate()),
       symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
       script_(script),
       scanner_(isolate_->unicode_cache()),
-      reusable_preparser_(NULL),
       top_scope_(NULL),
-      current_function_state_(NULL),
+      with_nesting_level_(0),
+      lexical_scope_(NULL),
       target_stack_(NULL),
+      allow_natives_syntax_(allow_natives_syntax),
       extension_(extension),
       pre_data_(pre_data),
       fni_(NULL),
-      allow_natives_syntax_((parser_flags & kAllowNativesSyntax) != 0),
-      allow_lazy_((parser_flags & kAllowLazy) != 0),
-      allow_modules_((parser_flags & kAllowModules) != 0),
       stack_overflow_(false),
-      parenthesized_function_(false) {
-  isolate_->set_ast_node_id(0);
-  if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
-    scanner().SetHarmonyScoping(true);
-  }
-  if ((parser_flags & kAllowModules) != 0) {
-    scanner().SetHarmonyModules(true);
-  }
+      parenthesized_function_(false),
+      harmony_block_scoping_(false) {
+  AstNode::ResetIds();
 }
 
 
-FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
+FunctionLiteral* Parser::ParseProgram(Handle<String> source,
+                                      bool in_global_context,
+                                      StrictModeFlag strict_mode) {
   ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(isolate()->counters()->parse());
-  Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
   fni_ = new(zone()) FuncNameInferrer(isolate());
 
@@ -576,74 +607,68 @@
     // Notice that the stream is destroyed at the end of the branch block.
     // The last line of the blocks can't be moved outside, even though they're
     // identical calls.
-    ExternalTwoByteStringUtf16CharacterStream stream(
+    ExternalTwoByteStringUC16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source), 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(info, source, &zone_scope);
+    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
   } else {
-    GenericStringUtf16CharacterStream stream(source, 0, source->length());
+    GenericStringUC16CharacterStream stream(source, 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(info, source, &zone_scope);
+    return DoParseProgram(source, in_global_context, strict_mode, &zone_scope);
   }
 }
 
 
-FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
-                                        Handle<String> source,
+FunctionLiteral* Parser::DoParseProgram(Handle<String> source,
+                                        bool in_global_context,
+                                        StrictModeFlag strict_mode,
                                         ZoneScope* zone_scope) {
-  ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
   if (pre_data_ != NULL) pre_data_->Initialize();
 
   // Compute the parsing mode.
-  mode_ = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
+  mode_ = FLAG_lazy ? PARSE_LAZILY : PARSE_EAGERLY;
   if (allow_natives_syntax_ || extension_ != NULL) mode_ = PARSE_EAGERLY;
 
+  Scope::Type type =
+    in_global_context
+      ? Scope::GLOBAL_SCOPE
+      : Scope::EVAL_SCOPE;
   Handle<String> no_name = isolate()->factory()->empty_symbol();
 
   FunctionLiteral* result = NULL;
-  { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
-    info->SetGlobalScope(scope);
-    if (info->is_eval()) {
-      Handle<SharedFunctionInfo> shared = info->shared_info();
-      if (!info->is_global() && (shared.is_null() || shared->is_function())) {
-        scope = Scope::DeserializeScopeChain(*info->calling_context(), scope);
-      }
-      if (!scope->is_global_scope() || info->language_mode() != CLASSIC_MODE) {
-        scope = NewScope(scope, EVAL_SCOPE);
-      }
+  { Scope* scope = NewScope(top_scope_, type, inside_with());
+    LexicalScope lexical_scope(this, scope, isolate());
+    if (strict_mode == kStrictMode) {
+      top_scope_->EnableStrictMode();
     }
-    scope->set_start_position(0);
-    scope->set_end_position(source->length());
-    FunctionState function_state(this, scope, isolate());
-    top_scope_->SetLanguageMode(info->language_mode());
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16);
     bool ok = true;
     int beg_loc = scanner().location().beg_pos;
-    ParseSourceElements(body, Token::EOS, info->is_eval(), &ok);
-    if (ok && !top_scope_->is_classic_mode()) {
+    ParseSourceElements(body, Token::EOS, &ok);
+    if (ok && top_scope_->is_strict_mode()) {
       CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
     }
 
-    if (ok && is_extended_mode()) {
-      CheckConflictingVarDeclarations(top_scope_, &ok);
+    if (ok && harmony_block_scoping_) {
+      CheckConflictingVarDeclarations(scope, &ok);
     }
 
     if (ok) {
-      result = factory()->NewFunctionLiteral(
+      result = new(zone()) FunctionLiteral(
+          isolate(),
           no_name,
           top_scope_,
           body,
-          function_state.materialized_literal_count(),
-          function_state.expected_property_count(),
-          function_state.handler_count(),
-          function_state.only_simple_this_property_assignments(),
-          function_state.this_property_assignments(),
+          lexical_scope.materialized_literal_count(),
+          lexical_scope.expected_property_count(),
+          lexical_scope.only_simple_this_property_assignments(),
+          lexical_scope.this_property_assignments(),
           0,
-          FunctionLiteral::kNoDuplicateParameters,
+          0,
+          source->length(),
           FunctionLiteral::ANONYMOUS_EXPRESSION,
-          FunctionLiteral::kGlobalOrEval);
-      result->set_ast_properties(factory()->visitor()->ast_properties());
+          false);  // Does not have duplicate parameters.
     } else if (stack_overflow_) {
       isolate()->StackOverflow();
     }
@@ -658,7 +683,6 @@
   return result;
 }
 
-
 FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
   ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(isolate()->counters()->parse_lazy());
@@ -669,16 +693,16 @@
   // Initialize parser state.
   source->TryFlatten();
   if (source->IsExternalTwoByteString()) {
-    ExternalTwoByteStringUtf16CharacterStream stream(
+    ExternalTwoByteStringUC16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source),
         shared_info->start_position(),
         shared_info->end_position());
     FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
     return result;
   } else {
-    GenericStringUtf16CharacterStream stream(source,
-                                             shared_info->start_position(),
-                                             shared_info->end_position());
+    GenericStringUC16CharacterStream stream(source,
+                                            shared_info->start_position(),
+                                            shared_info->end_position());
     FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
     return result;
   }
@@ -686,11 +710,10 @@
 
 
 FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
-                                   Utf16CharacterStream* source,
+                                   UC16CharacterStream* source,
                                    ZoneScope* zone_scope) {
   Handle<SharedFunctionInfo> shared_info = info->shared_info();
   scanner_.Initialize(source);
-  ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
 
   Handle<String> name(String::cast(shared_info->name()));
@@ -704,17 +727,16 @@
 
   {
     // Parse the function literal.
-    Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
-    info->SetGlobalScope(scope);
+    Scope* scope = NewScope(top_scope_, Scope::GLOBAL_SCOPE, inside_with());
     if (!info->closure().is_null()) {
-      scope = Scope::DeserializeScopeChain(info->closure()->context(), scope);
+      scope = Scope::DeserializeScopeChain(info, scope);
     }
-    FunctionState function_state(this, scope, isolate());
-    ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
-    ASSERT(scope->language_mode() != EXTENDED_MODE ||
-           info->is_extended_mode());
-    ASSERT(info->language_mode() == shared_info->language_mode());
-    scope->SetLanguageMode(shared_info->language_mode());
+    LexicalScope lexical_scope(this, scope, isolate());
+
+    if (shared_info->strict_mode()) {
+      top_scope_->EnableStrictMode();
+    }
+
     FunctionLiteral::Type type = shared_info->is_expression()
         ? (shared_info->is_anonymous()
               ? FunctionLiteral::ANONYMOUS_EXPRESSION
@@ -761,12 +783,6 @@
 }
 
 
-void Parser::ReportMessage(const char* type, Vector<Handle<String> > args) {
-  Scanner::Location source_location = scanner().location();
-  ReportMessageAt(source_location, type, args);
-}
-
-
 void Parser::ReportMessageAt(Scanner::Location source_location,
                              const char* type,
                              Vector<const char*> args) {
@@ -801,6 +817,10 @@
   isolate()->Throw(*result, &location);
 }
 
+void Parser::SetHarmonyBlockScoping(bool block_scoping) {
+  scanner().SetHarmonyBlockScoping(block_scoping);
+  harmony_block_scoping_ = block_scoping;
+}
 
 // Base class containing common code for the different finder classes used by
 // the parser.
@@ -937,18 +957,17 @@
 };
 
 
-// A ThisNamedPropertyAssignmentFinder finds and marks statements of the form
+// A ThisNamedPropertyAssigmentFinder finds and marks statements of the form
 // this.x = ...;, where x is a named property. It also determines whether a
 // function contains only assignments of this type.
-class ThisNamedPropertyAssignmentFinder : public ParserFinder {
+class ThisNamedPropertyAssigmentFinder : public ParserFinder {
  public:
-  explicit ThisNamedPropertyAssignmentFinder(Isolate* isolate)
+  explicit ThisNamedPropertyAssigmentFinder(Isolate* isolate)
       : isolate_(isolate),
         only_simple_this_property_assignments_(true),
-        names_(0),
-        assigned_arguments_(0),
-        assigned_constants_(0) {
-  }
+        names_(NULL),
+        assigned_arguments_(NULL),
+        assigned_constants_(NULL) {}
 
   void Update(Scope* scope, Statement* stat) {
     // Bail out if function already has property assignment that are
@@ -975,17 +994,19 @@
   // Returns a fixed array containing three elements for each assignment of the
   // form this.x = y;
   Handle<FixedArray> GetThisPropertyAssignments() {
-    if (names_.is_empty()) {
+    if (names_ == NULL) {
       return isolate_->factory()->empty_fixed_array();
     }
-    ASSERT_EQ(names_.length(), assigned_arguments_.length());
-    ASSERT_EQ(names_.length(), assigned_constants_.length());
+    ASSERT(names_ != NULL);
+    ASSERT(assigned_arguments_ != NULL);
+    ASSERT_EQ(names_->length(), assigned_arguments_->length());
+    ASSERT_EQ(names_->length(), assigned_constants_->length());
     Handle<FixedArray> assignments =
-        isolate_->factory()->NewFixedArray(names_.length() * 3);
-    for (int i = 0; i < names_.length(); ++i) {
-      assignments->set(i * 3, *names_[i]);
-      assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_[i]));
-      assignments->set(i * 3 + 2, *assigned_constants_[i]);
+        isolate_->factory()->NewFixedArray(names_->length() * 3);
+    for (int i = 0; i < names_->length(); i++) {
+      assignments->set(i * 3, *names_->at(i));
+      assignments->set(i * 3 + 1, Smi::FromInt(assigned_arguments_->at(i)));
+      assignments->set(i * 3 + 2, *assigned_constants_->at(i));
     }
     return assignments;
   }
@@ -1042,37 +1063,18 @@
     AssignmentFromSomethingElse();
   }
 
-
-
-
-  // We will potentially reorder the property assignments, so they must be
-  // simple enough that the ordering does not matter.
   void AssignmentFromParameter(Handle<String> name, int index) {
-    EnsureInitialized();
-    for (int i = 0; i < names_.length(); ++i) {
-      if (name->Equals(*names_[i])) {
-        assigned_arguments_[i] = index;
-        assigned_constants_[i] = isolate_->factory()->undefined_value();
-        return;
-      }
-    }
-    names_.Add(name);
-    assigned_arguments_.Add(index);
-    assigned_constants_.Add(isolate_->factory()->undefined_value());
+    EnsureAllocation();
+    names_->Add(name);
+    assigned_arguments_->Add(index);
+    assigned_constants_->Add(isolate_->factory()->undefined_value());
   }
 
   void AssignmentFromConstant(Handle<String> name, Handle<Object> value) {
-    EnsureInitialized();
-    for (int i = 0; i < names_.length(); ++i) {
-      if (name->Equals(*names_[i])) {
-        assigned_arguments_[i] = -1;
-        assigned_constants_[i] = value;
-        return;
-      }
-    }
-    names_.Add(name);
-    assigned_arguments_.Add(-1);
-    assigned_constants_.Add(value);
+    EnsureAllocation();
+    names_->Add(name);
+    assigned_arguments_->Add(-1);
+    assigned_constants_->Add(value);
   }
 
   void AssignmentFromSomethingElse() {
@@ -1080,30 +1082,49 @@
     only_simple_this_property_assignments_ = false;
   }
 
-  void EnsureInitialized() {
-    if (names_.capacity() == 0) {
-      ASSERT(assigned_arguments_.capacity() == 0);
-      ASSERT(assigned_constants_.capacity() == 0);
-      names_.Initialize(4);
-      assigned_arguments_.Initialize(4);
-      assigned_constants_.Initialize(4);
+  void EnsureAllocation() {
+    if (names_ == NULL) {
+      ASSERT(assigned_arguments_ == NULL);
+      ASSERT(assigned_constants_ == NULL);
+      Zone* zone = isolate_->zone();
+      names_ = new(zone) ZoneStringList(4);
+      assigned_arguments_ = new(zone) ZoneList<int>(4);
+      assigned_constants_ = new(zone) ZoneObjectList(4);
     }
   }
 
   Isolate* isolate_;
   bool only_simple_this_property_assignments_;
-  ZoneStringList names_;
-  ZoneList<int> assigned_arguments_;
-  ZoneObjectList assigned_constants_;
+  ZoneStringList* names_;
+  ZoneList<int>* assigned_arguments_;
+  ZoneObjectList* assigned_constants_;
 };
 
 
+Statement* Parser::ParseSourceElement(ZoneStringList* labels,
+                                      bool* ok) {
+  if (peek() == Token::FUNCTION) {
+    // FunctionDeclaration is only allowed in the context of SourceElements
+    // (Ecma 262 5th Edition, clause 14):
+    // SourceElement:
+    //    Statement
+    //    FunctionDeclaration
+    // Common language extension is to allow function declaration in place
+    // of any statement. This language extension is disabled in strict mode.
+    return ParseFunctionDeclaration(ok);
+  } else if (peek() == Token::LET) {
+    return ParseVariableStatement(kSourceElement, ok);
+  } else {
+    return ParseStatement(labels, ok);
+  }
+}
+
+
 void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
                                   int end_token,
-                                  bool is_eval,
                                   bool* ok) {
   // SourceElements ::
-  //   (ModuleElement)* <end_token>
+  //   (Statement)* <end_token>
 
   // Allocate a target stack to use for this set of source
   // elements. This way, all scripts and functions get their own
@@ -1113,7 +1134,7 @@
 
   ASSERT(processor != NULL);
   InitializationBlockFinder block_finder(top_scope_, target_stack_);
-  ThisNamedPropertyAssignmentFinder this_property_assignment_finder(isolate());
+  ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate());
   bool directive_prologue = true;     // Parsing directive prologue.
 
   while (peek() != end_token) {
@@ -1122,7 +1143,7 @@
     }
 
     Scanner::Location token_loc = scanner().peek_location();
-    Statement* stat = ParseModuleElement(NULL, CHECK_OK);
+    Statement* stat = ParseSourceElement(NULL, CHECK_OK);
     if (stat == NULL || stat->IsEmpty()) {
       directive_prologue = false;   // End of directive prologue.
       continue;
@@ -1130,8 +1151,8 @@
 
     if (directive_prologue) {
       // A shot at a directive.
-      ExpressionStatement* e_stat;
-      Literal* literal;
+      ExpressionStatement *e_stat;
+      Literal *literal;
       // Still processing directive prologue?
       if ((e_stat = stat->AsExpressionStatement()) != NULL &&
           (literal = e_stat->expression()->AsLiteral()) != NULL &&
@@ -1139,24 +1160,11 @@
         Handle<String> directive = Handle<String>::cast(literal->handle());
 
         // Check "use strict" directive (ES5 14.1).
-        if (top_scope_->is_classic_mode() &&
+        if (!top_scope_->is_strict_mode() &&
             directive->Equals(isolate()->heap()->use_strict()) &&
             token_loc.end_pos - token_loc.beg_pos ==
               isolate()->heap()->use_strict()->length() + 2) {
-          // TODO(mstarzinger): Global strict eval calls, need their own scope
-          // as specified in ES5 10.4.2(3). The correct fix would be to always
-          // add this scope in DoParseProgram(), but that requires adaptations
-          // all over the code base, so we go with a quick-fix for now.
-          if (is_eval && !top_scope_->is_eval_scope()) {
-            ASSERT(top_scope_->is_global_scope());
-            Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
-            scope->set_start_position(top_scope_->start_position());
-            scope->set_end_position(top_scope_->end_position());
-            top_scope_ = scope;
-          }
-          // TODO(ES6): Fix entering extended mode, once it is specified.
-          top_scope_->SetLanguageMode(FLAG_harmony_scoping
-                                      ? EXTENDED_MODE : STRICT_MODE);
+          top_scope_->EnableStrictMode();
           // "use strict" is the only directive for now.
           directive_prologue = false;
         }
@@ -1180,394 +1188,15 @@
         this_property_assignment_finder.only_simple_this_property_assignments()
         && top_scope_->declarations()->length() == 0;
     if (only_simple_this_property_assignments) {
-      current_function_state_->SetThisPropertyAssignmentInfo(
+      lexical_scope_->SetThisPropertyAssignmentInfo(
           only_simple_this_property_assignments,
           this_property_assignment_finder.GetThisPropertyAssignments());
     }
   }
-
   return 0;
 }
 
 
-Statement* Parser::ParseModuleElement(ZoneStringList* labels,
-                                      bool* ok) {
-  // (Ecma 262 5th Edition, clause 14):
-  // SourceElement:
-  //    Statement
-  //    FunctionDeclaration
-  //
-  // In harmony mode we allow additionally the following productions
-  // ModuleElement:
-  //    LetDeclaration
-  //    ConstDeclaration
-  //    ModuleDeclaration
-  //    ImportDeclaration
-  //    ExportDeclaration
-
-  switch (peek()) {
-    case Token::FUNCTION:
-      return ParseFunctionDeclaration(NULL, ok);
-    case Token::LET:
-    case Token::CONST:
-      return ParseVariableStatement(kModuleElement, NULL, ok);
-    case Token::IMPORT:
-      return ParseImportDeclaration(ok);
-    case Token::EXPORT:
-      return ParseExportDeclaration(ok);
-    default: {
-      Statement* stmt = ParseStatement(labels, CHECK_OK);
-      // Handle 'module' as a context-sensitive keyword.
-      if (FLAG_harmony_modules &&
-          peek() == Token::IDENTIFIER &&
-          !scanner().HasAnyLineTerminatorBeforeNext() &&
-          stmt != NULL) {
-        ExpressionStatement* estmt = stmt->AsExpressionStatement();
-        if (estmt != NULL &&
-            estmt->expression()->AsVariableProxy() != NULL &&
-            estmt->expression()->AsVariableProxy()->name()->Equals(
-                isolate()->heap()->module_symbol()) &&
-            !scanner().literal_contains_escapes()) {
-          return ParseModuleDeclaration(NULL, ok);
-        }
-      }
-      return stmt;
-    }
-  }
-}
-
-
-Block* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
-  // ModuleDeclaration:
-  //    'module' Identifier Module
-
-  // Create new block with one expected declaration.
-  Block* block = factory()->NewBlock(NULL, 1, true);
-  Handle<String> name = ParseIdentifier(CHECK_OK);
-
-#ifdef DEBUG
-  if (FLAG_print_interface_details)
-    PrintF("# Module %s...\n", name->ToAsciiArray());
-#endif
-
-  Module* module = ParseModule(CHECK_OK);
-  VariableProxy* proxy = NewUnresolved(name, LET, module->interface());
-  Declaration* declaration =
-      factory()->NewModuleDeclaration(proxy, module, top_scope_);
-  Declare(declaration, true, CHECK_OK);
-
-#ifdef DEBUG
-  if (FLAG_print_interface_details)
-    PrintF("# Module %s.\n", name->ToAsciiArray());
-
-  if (FLAG_print_interfaces) {
-    PrintF("module %s : ", name->ToAsciiArray());
-    module->interface()->Print();
-  }
-#endif
-
-  // TODO(rossberg): Add initialization statement to block.
-
-  if (names) names->Add(name);
-  return block;
-}
-
-
-Module* Parser::ParseModule(bool* ok) {
-  // Module:
-  //    '{' ModuleElement '}'
-  //    '=' ModulePath ';'
-  //    'at' String ';'
-
-  switch (peek()) {
-    case Token::LBRACE:
-      return ParseModuleLiteral(ok);
-
-    case Token::ASSIGN: {
-      Expect(Token::ASSIGN, CHECK_OK);
-      Module* result = ParseModulePath(CHECK_OK);
-      ExpectSemicolon(CHECK_OK);
-      return result;
-    }
-
-    default: {
-      ExpectContextualKeyword("at", CHECK_OK);
-      Module* result = ParseModuleUrl(CHECK_OK);
-      ExpectSemicolon(CHECK_OK);
-      return result;
-    }
-  }
-}
-
-
-Module* Parser::ParseModuleLiteral(bool* ok) {
-  // Module:
-  //    '{' ModuleElement '}'
-
-  // Construct block expecting 16 statements.
-  Block* body = factory()->NewBlock(NULL, 16, false);
-#ifdef DEBUG
-  if (FLAG_print_interface_details) PrintF("# Literal ");
-#endif
-  Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
-
-  Expect(Token::LBRACE, CHECK_OK);
-  scope->set_start_position(scanner().location().beg_pos);
-  scope->SetLanguageMode(EXTENDED_MODE);
-
-  {
-    BlockState block_state(this, scope);
-    TargetCollector collector;
-    Target target(&this->target_stack_, &collector);
-    Target target_body(&this->target_stack_, body);
-    InitializationBlockFinder block_finder(top_scope_, target_stack_);
-
-    while (peek() != Token::RBRACE) {
-      Statement* stat = ParseModuleElement(NULL, CHECK_OK);
-      if (stat && !stat->IsEmpty()) {
-        body->AddStatement(stat);
-        block_finder.Update(stat);
-      }
-    }
-  }
-
-  Expect(Token::RBRACE, CHECK_OK);
-  scope->set_end_position(scanner().location().end_pos);
-  body->set_block_scope(scope);
-
-  scope->interface()->Freeze(ok);
-  ASSERT(ok);
-  return factory()->NewModuleLiteral(body, scope->interface());
-}
-
-
-Module* Parser::ParseModulePath(bool* ok) {
-  // ModulePath:
-  //    Identifier
-  //    ModulePath '.' Identifier
-
-  Module* result = ParseModuleVariable(CHECK_OK);
-  while (Check(Token::PERIOD)) {
-    Handle<String> name = ParseIdentifierName(CHECK_OK);
-#ifdef DEBUG
-    if (FLAG_print_interface_details)
-      PrintF("# Path .%s ", name->ToAsciiArray());
-#endif
-    Module* member = factory()->NewModulePath(result, name);
-    result->interface()->Add(name, member->interface(), ok);
-    if (!*ok) {
-#ifdef DEBUG
-      if (FLAG_print_interfaces) {
-        PrintF("PATH TYPE ERROR at '%s'\n", name->ToAsciiArray());
-        PrintF("result: ");
-        result->interface()->Print();
-        PrintF("member: ");
-        member->interface()->Print();
-      }
-#endif
-      ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
-      return NULL;
-    }
-    result = member;
-  }
-
-  return result;
-}
-
-
-Module* Parser::ParseModuleVariable(bool* ok) {
-  // ModulePath:
-  //    Identifier
-
-  Handle<String> name = ParseIdentifier(CHECK_OK);
-#ifdef DEBUG
-  if (FLAG_print_interface_details)
-    PrintF("# Module variable %s ", name->ToAsciiArray());
-#endif
-  VariableProxy* proxy = top_scope_->NewUnresolved(
-      factory(), name, scanner().location().beg_pos, Interface::NewModule());
-
-  return factory()->NewModuleVariable(proxy);
-}
-
-
-Module* Parser::ParseModuleUrl(bool* ok) {
-  // Module:
-  //    String
-
-  Expect(Token::STRING, CHECK_OK);
-  Handle<String> symbol = GetSymbol(CHECK_OK);
-
-  // TODO(ES6): Request JS resource from environment...
-
-#ifdef DEBUG
-  if (FLAG_print_interface_details) PrintF("# Url ");
-#endif
-  return factory()->NewModuleUrl(symbol);
-}
-
-
-Module* Parser::ParseModuleSpecifier(bool* ok) {
-  // ModuleSpecifier:
-  //    String
-  //    ModulePath
-
-  if (peek() == Token::STRING) {
-    return ParseModuleUrl(ok);
-  } else {
-    return ParseModulePath(ok);
-  }
-}
-
-
-Block* Parser::ParseImportDeclaration(bool* ok) {
-  // ImportDeclaration:
-  //    'import' IdentifierName (',' IdentifierName)* 'from' ModuleSpecifier ';'
-  //
-  // TODO(ES6): implement destructuring ImportSpecifiers
-
-  Expect(Token::IMPORT, CHECK_OK);
-  ZoneStringList names(1);
-
-  Handle<String> name = ParseIdentifierName(CHECK_OK);
-  names.Add(name);
-  while (peek() == Token::COMMA) {
-    Consume(Token::COMMA);
-    name = ParseIdentifierName(CHECK_OK);
-    names.Add(name);
-  }
-
-  ExpectContextualKeyword("from", CHECK_OK);
-  Module* module = ParseModuleSpecifier(CHECK_OK);
-  ExpectSemicolon(CHECK_OK);
-
-  // Generate a separate declaration for each identifier.
-  // TODO(ES6): once we implement destructuring, make that one declaration.
-  Block* block = factory()->NewBlock(NULL, 1, true);
-  for (int i = 0; i < names.length(); ++i) {
-#ifdef DEBUG
-    if (FLAG_print_interface_details)
-      PrintF("# Import %s ", names[i]->ToAsciiArray());
-#endif
-    Interface* interface = Interface::NewUnknown();
-    module->interface()->Add(names[i], interface, ok);
-    if (!*ok) {
-#ifdef DEBUG
-      if (FLAG_print_interfaces) {
-        PrintF("IMPORT TYPE ERROR at '%s'\n", names[i]->ToAsciiArray());
-        PrintF("module: ");
-        module->interface()->Print();
-      }
-#endif
-      ReportMessage("invalid_module_path", Vector<Handle<String> >(&name, 1));
-      return NULL;
-    }
-    VariableProxy* proxy = NewUnresolved(names[i], LET, interface);
-    Declaration* declaration =
-        factory()->NewImportDeclaration(proxy, module, top_scope_);
-    Declare(declaration, true, CHECK_OK);
-    // TODO(rossberg): Add initialization statement to block.
-  }
-
-  return block;
-}
-
-
-Statement* Parser::ParseExportDeclaration(bool* ok) {
-  // ExportDeclaration:
-  //    'export' Identifier (',' Identifier)* ';'
-  //    'export' VariableDeclaration
-  //    'export' FunctionDeclaration
-  //    'export' ModuleDeclaration
-  //
-  // TODO(ES6): implement structuring ExportSpecifiers
-
-  Expect(Token::EXPORT, CHECK_OK);
-
-  Statement* result = NULL;
-  ZoneStringList names(1);
-  switch (peek()) {
-    case Token::IDENTIFIER: {
-      Handle<String> name = ParseIdentifier(CHECK_OK);
-      // Handle 'module' as a context-sensitive keyword.
-      if (!name->IsEqualTo(CStrVector("module"))) {
-        names.Add(name);
-        while (peek() == Token::COMMA) {
-          Consume(Token::COMMA);
-          name = ParseIdentifier(CHECK_OK);
-          names.Add(name);
-        }
-        ExpectSemicolon(CHECK_OK);
-        result = factory()->NewEmptyStatement();
-      } else {
-        result = ParseModuleDeclaration(&names, CHECK_OK);
-      }
-      break;
-    }
-
-    case Token::FUNCTION:
-      result = ParseFunctionDeclaration(&names, CHECK_OK);
-      break;
-
-    case Token::VAR:
-    case Token::LET:
-    case Token::CONST:
-      result = ParseVariableStatement(kModuleElement, &names, CHECK_OK);
-      break;
-
-    default:
-      *ok = false;
-      ReportUnexpectedToken(scanner().current_token());
-      return NULL;
-  }
-
-  // Extract declared names into export declarations and interface.
-  Interface* interface = top_scope_->interface();
-  for (int i = 0; i < names.length(); ++i) {
-#ifdef DEBUG
-    if (FLAG_print_interface_details)
-      PrintF("# Export %s ", names[i]->ToAsciiArray());
-#endif
-    Interface* inner = Interface::NewUnknown();
-    interface->Add(names[i], inner, CHECK_OK);
-    VariableProxy* proxy = NewUnresolved(names[i], LET, inner);
-    USE(proxy);
-    // TODO(rossberg): Rethink whether we actually need to store export
-    // declarations (for compilation?).
-    // ExportDeclaration* declaration =
-    //     factory()->NewExportDeclaration(proxy, top_scope_);
-    // top_scope_->AddDeclaration(declaration);
-  }
-
-  ASSERT(result != NULL);
-  return result;
-}
-
-
-Statement* Parser::ParseBlockElement(ZoneStringList* labels,
-                                     bool* ok) {
-  // (Ecma 262 5th Edition, clause 14):
-  // SourceElement:
-  //    Statement
-  //    FunctionDeclaration
-  //
-  // In harmony mode we allow additionally the following productions
-  // BlockElement (aka SourceElement):
-  //    LetDeclaration
-  //    ConstDeclaration
-
-  switch (peek()) {
-    case Token::FUNCTION:
-      return ParseFunctionDeclaration(NULL, ok);
-    case Token::LET:
-    case Token::CONST:
-      return ParseVariableStatement(kModuleElement, NULL, ok);
-    default:
-      return ParseStatement(labels, ok);
-  }
-}
-
-
 Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
   // Statement ::
   //   Block
@@ -1601,14 +1230,13 @@
       return ParseBlock(labels, ok);
 
     case Token::CONST:  // fall through
-    case Token::LET:
     case Token::VAR:
-      stmt = ParseVariableStatement(kStatement, NULL, ok);
+      stmt = ParseVariableStatement(kStatement, ok);
       break;
 
     case Token::SEMICOLON:
       Next();
-      return factory()->NewEmptyStatement();
+      return EmptyStatement();
 
     case Token::IF:
       stmt = ParseIfStatement(labels, ok);
@@ -1656,7 +1284,7 @@
       // one must take great care not to treat it as a
       // fall-through. It is much easier just to wrap the entire
       // try-statement in a statement block and put the labels there
-      Block* result = factory()->NewBlock(labels, 1, false);
+      Block* result = new(zone()) Block(isolate(), labels, 1, false);
       Target target(&this->target_stack_, result);
       TryStatement* statement = ParseTryStatement(CHECK_OK);
       if (statement) {
@@ -1667,20 +1295,15 @@
     }
 
     case Token::FUNCTION: {
-      // FunctionDeclaration is only allowed in the context of SourceElements
-      // (Ecma 262 5th Edition, clause 14):
-      // SourceElement:
-      //    Statement
-      //    FunctionDeclaration
-      // Common language extension is to allow function declaration in place
-      // of any statement. This language extension is disabled in strict mode.
-      if (!top_scope_->is_classic_mode()) {
+      // In strict mode, FunctionDeclaration is only allowed in the context
+      // of SourceElements.
+      if (top_scope_->is_strict_mode()) {
         ReportMessageAt(scanner().peek_location(), "strict_function",
                         Vector<const char*>::empty());
         *ok = false;
         return NULL;
       }
-      return ParseFunctionDeclaration(NULL, ok);
+      return ParseFunctionDeclaration(ok);
     }
 
     case Token::DEBUGGER:
@@ -1697,24 +1320,15 @@
 }
 
 
-VariableProxy* Parser::NewUnresolved(
-    Handle<String> name, VariableMode mode, Interface* interface) {
+VariableProxy* Parser::Declare(Handle<String> name,
+                               Variable::Mode mode,
+                               FunctionLiteral* fun,
+                               bool resolve,
+                               bool* ok) {
+  Variable* var = NULL;
   // If we are inside a function, a declaration of a var/const variable is a
   // truly local variable, and the scope of the variable is always the function
   // scope.
-  // Let/const variables in harmony mode are always added to the immediately
-  // enclosing scope.
-  return DeclarationScope(mode)->NewUnresolved(
-      factory(), name, scanner().location().beg_pos, interface);
-}
-
-
-void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
-  VariableProxy* proxy = declaration->proxy();
-  Handle<String> name = proxy->name();
-  VariableMode mode = declaration->mode();
-  Scope* declaration_scope = DeclarationScope(mode);
-  Variable* var = NULL;
 
   // If a function scope exists, then we can statically declare this
   // variable and also set its mode. In any case, a Declaration node
@@ -1724,19 +1338,17 @@
   // to the calling function context.
   // Similarly, strict mode eval scope does not leak variable declarations to
   // the caller's scope so we declare all locals, too.
-  // Also for block scoped let/const bindings the variable can be
-  // statically declared.
+
+  Scope* declaration_scope = mode == Variable::LET ? top_scope_
+      : top_scope_->DeclarationScope();
   if (declaration_scope->is_function_scope() ||
-      declaration_scope->is_strict_or_extended_eval_scope() ||
-      declaration_scope->is_block_scope() ||
-      declaration_scope->is_module_scope() ||
-      declaration->AsModuleDeclaration() != NULL) {
+      declaration_scope->is_strict_mode_eval_scope() ||
+      declaration_scope->is_block_scope()) {
     // Declare the variable in the function scope.
     var = declaration_scope->LocalLookup(name);
     if (var == NULL) {
       // Declare the name.
-      var = declaration_scope->DeclareLocal(
-          name, mode, declaration->initialization(), proxy->interface());
+      var = declaration_scope->DeclareLocal(name, mode);
     } else {
       // The name was declared in this scope before; check for conflicting
       // re-declarations. We have a conflict if either of the declarations is
@@ -1749,13 +1361,12 @@
       //
       // because the var declaration is hoisted to the function scope where 'x'
       // is already bound.
-      if ((mode != VAR) || (var->mode() != VAR)) {
+      if ((mode != Variable::VAR) || (var->mode() != Variable::VAR)) {
         // We only have vars, consts and lets in declarations.
-        ASSERT(var->mode() == VAR ||
-               var->mode() == CONST ||
-               var->mode() == CONST_HARMONY ||
-               var->mode() == LET);
-        if (is_extended_mode()) {
+        ASSERT(var->mode() == Variable::VAR ||
+               var->mode() == Variable::CONST ||
+               var->mode() == Variable::LET);
+        if (harmony_block_scoping_) {
           // In harmony mode we treat re-declarations as early errors. See
           // ES5 16 for a definition of early errors.
           SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
@@ -1763,10 +1374,10 @@
           Vector<const char*> args(elms, 2);
           ReportMessage("redeclaration", args);
           *ok = false;
-          return;
+          return NULL;
         }
-        const char* type = (var->mode() == VAR)
-            ? "var" : var->is_const_mode() ? "const" : "let";
+        const char* type = (var->mode() == Variable::VAR) ? "var" :
+                           (var->mode() == Variable::CONST) ? "const" : "let";
         Handle<String> type_string =
             isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
         Expression* expression =
@@ -1793,33 +1404,20 @@
   // semantic issue as long as we keep the source order, but it may be
   // a performance issue since it may lead to repeated
   // Runtime::DeclareContextSlot() calls.
-  declaration_scope->AddDeclaration(declaration);
+  VariableProxy* proxy = declaration_scope->NewUnresolved(
+      name, false, scanner().location().beg_pos);
+  declaration_scope->AddDeclaration(
+      new(zone()) Declaration(proxy, mode, fun, top_scope_));
 
-  if ((mode == CONST || mode == CONST_HARMONY) &&
-      declaration_scope->is_global_scope()) {
-    // For global const variables we bind the proxy to a variable.
+  // For global const variables we bind the proxy to a variable.
+  if (mode == Variable::CONST && declaration_scope->is_global_scope()) {
     ASSERT(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
     var = new(zone()) Variable(declaration_scope,
                                name,
-                               mode,
+                               Variable::CONST,
                                true,
-                               kind,
-                               kNeedsInitialization);
-  } else if (declaration_scope->is_eval_scope() &&
-             declaration_scope->is_classic_mode()) {
-    // For variable declarations in a non-strict eval scope the proxy is bound
-    // to a lookup variable to force a dynamic declaration using the
-    // DeclareContextSlot runtime function.
-    Variable::Kind kind = Variable::NORMAL;
-    var = new(zone()) Variable(declaration_scope,
-                               name,
-                               mode,
-                               true,
-                               kind,
-                               declaration->initialization());
-    var->AllocateTo(Variable::LOOKUP, -1);
-    resolve = true;
+                               kind);
   }
 
   // If requested and we have a local variable, bind the proxy to the variable
@@ -1846,30 +1444,9 @@
   // initialization code. Thus, inside the 'with' statement, we need
   // both access to the static and the dynamic context chain; the
   // runtime needs to provide both.
-  if (resolve && var != NULL) {
-    proxy->BindTo(var);
+  if (resolve && var != NULL) proxy->BindTo(var);
 
-    if (FLAG_harmony_modules) {
-      bool ok;
-#ifdef DEBUG
-      if (FLAG_print_interface_details)
-        PrintF("# Declare %s\n", var->name()->ToAsciiArray());
-#endif
-      proxy->interface()->Unify(var->interface(), &ok);
-      if (!ok) {
-#ifdef DEBUG
-        if (FLAG_print_interfaces) {
-          PrintF("DECLARE TYPE ERROR\n");
-          PrintF("proxy: ");
-          proxy->interface()->Print();
-          PrintF("var: ");
-          var->interface()->Print();
-        }
-#endif
-        ReportMessage("module_type_error", Vector<Handle<String> >(&name, 1));
-      }
-    }
-  }
+  return proxy;
 }
 
 
@@ -1896,7 +1473,7 @@
   // isn't lazily compiled. The extension structures are only
   // accessible while parsing the first time not when reparsing
   // because of lazy compilation.
-  DeclarationScope(VAR)->ForceEagerCompilation();
+  top_scope_->DeclarationScope()->ForceEagerCompilation();
 
   // Compute the function template for the native function.
   v8::Handle<v8::FunctionTemplate> fun_template =
@@ -1910,7 +1487,7 @@
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared =
       isolate()->factory()->NewSharedFunctionInfo(name, literals, code,
-          Handle<ScopeInfo>(fun->shared()->scope_info()));
+          Handle<SerializedScopeInfo>(fun->shared()->scope_info()));
   shared->set_construct_stub(*construct_stub);
 
   // Copy the function data to the shared function info.
@@ -1920,20 +1497,16 @@
 
   // TODO(1240846): It's weird that native function declarations are
   // introduced dynamically when we meet their declarations, whereas
-  // other functions are set up when entering the surrounding scope.
-  VariableProxy* proxy = NewUnresolved(name, VAR);
-  Declaration* declaration =
-      factory()->NewVariableDeclaration(proxy, VAR, top_scope_);
-  Declare(declaration, true, CHECK_OK);
+  // other functions are setup when entering the surrounding scope.
   SharedFunctionInfoLiteral* lit =
-      factory()->NewSharedFunctionInfoLiteral(shared);
-  return factory()->NewExpressionStatement(
-      factory()->NewAssignment(
-          Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition));
+      new(zone()) SharedFunctionInfoLiteral(isolate(), shared);
+  VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK);
+  return new(zone()) ExpressionStatement(new(zone()) Assignment(
+      isolate(), Token::INIT_VAR, var, lit, RelocInfo::kNoPosition));
 }
 
 
-Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
+Statement* Parser::ParseFunctionDeclaration(bool* ok) {
   // FunctionDeclaration ::
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
@@ -1949,18 +1522,14 @@
   // Even if we're not at the top-level of the global or a function
   // scope, we treat is as such and introduce the function with it's
   // initial value upon entering the corresponding scope.
-  VariableMode mode = is_extended_mode() ? LET : VAR;
-  VariableProxy* proxy = NewUnresolved(name, mode);
-  Declaration* declaration =
-      factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_);
-  Declare(declaration, true, CHECK_OK);
-  if (names) names->Add(name);
-  return factory()->NewEmptyStatement();
+  Variable::Mode mode = harmony_block_scoping_ ? Variable::LET : Variable::VAR;
+  Declare(name, mode, fun, true, CHECK_OK);
+  return EmptyStatement();
 }
 
 
 Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
-  if (top_scope_->is_extended_mode()) return ParseScopedBlock(labels, ok);
+  if (harmony_block_scoping_) return ParseScopedBlock(labels, ok);
 
   // Block ::
   //   '{' Statement* '}'
@@ -1969,7 +1538,7 @@
   // (ECMA-262, 3rd, 12.2)
   //
   // Construct block expecting 16 statements.
-  Block* result = factory()->NewBlock(labels, 16, false);
+  Block* result = new(zone()) Block(isolate(), labels, 16, false);
   Target target(&this->target_stack_, result);
   Expect(Token::LBRACE, CHECK_OK);
   InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -1986,26 +1555,27 @@
 
 
 Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
-  // The harmony mode uses block elements instead of statements.
-  //
-  // Block ::
-  //   '{' BlockElement* '}'
-
   // Construct block expecting 16 statements.
-  Block* body = factory()->NewBlock(labels, 16, false);
-  Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
+  Block* body = new(zone()) Block(isolate(), labels, 16, false);
+  Scope* saved_scope = top_scope_;
+  Scope* block_scope = NewScope(top_scope_,
+                                Scope::BLOCK_SCOPE,
+                                inside_with());
+  if (top_scope_->is_strict_mode()) {
+    block_scope->EnableStrictMode();
+  }
+  top_scope_ = block_scope;
 
   // Parse the statements and collect escaping labels.
+  TargetCollector collector;
+  Target target(&this->target_stack_, &collector);
   Expect(Token::LBRACE, CHECK_OK);
-  block_scope->set_start_position(scanner().location().beg_pos);
-  { BlockState block_state(this, block_scope);
-    TargetCollector collector;
-    Target target(&this->target_stack_, &collector);
+  {
     Target target_body(&this->target_stack_, body);
     InitializationBlockFinder block_finder(top_scope_, target_stack_);
 
     while (peek() != Token::RBRACE) {
-      Statement* stat = ParseBlockElement(NULL, CHECK_OK);
+      Statement* stat = ParseSourceElement(NULL, CHECK_OK);
       if (stat && !stat->IsEmpty()) {
         body->AddStatement(stat);
         block_finder.Update(stat);
@@ -2013,7 +1583,8 @@
     }
   }
   Expect(Token::RBRACE, CHECK_OK);
-  block_scope->set_end_position(scanner().location().end_pos);
+  top_scope_ = saved_scope;
+
   block_scope = block_scope->FinalizeBlockScope();
   body->set_block_scope(block_scope);
   return body;
@@ -2021,14 +1592,14 @@
 
 
 Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
-                                      ZoneStringList* names,
                                       bool* ok) {
   // VariableStatement ::
   //   VariableDeclarations ';'
 
   Handle<String> ignore;
-  Block* result =
-      ParseVariableDeclarations(var_context, NULL, names, &ignore, CHECK_OK);
+  Block* result = ParseVariableDeclarations(var_context,
+                                            &ignore,
+                                            CHECK_OK);
   ExpectSemicolon(CHECK_OK);
   return result;
 }
@@ -2041,30 +1612,17 @@
 
 
 // If the variable declaration declares exactly one non-const
-// variable, then *out is set to that variable. In all other cases,
-// *out is untouched; in particular, it is the caller's responsibility
+// variable, then *var is set to that variable. In all other cases,
+// *var is untouched; in particular, it is the caller's responsibility
 // to initialize it properly. This mechanism is used for the parsing
 // of 'for-in' loops.
-Block* Parser::ParseVariableDeclarations(
-    VariableDeclarationContext var_context,
-    VariableDeclarationProperties* decl_props,
-    ZoneStringList* names,
-    Handle<String>* out,
-    bool* ok) {
+Block* Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
+                                         Handle<String>* out,
+                                         bool* ok) {
   // VariableDeclarations ::
-  //   ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
-  //
-  // The ES6 Draft Rev3 specifies the following grammar for const declarations
-  //
-  // ConstDeclaration ::
-  //   const ConstBinding (',' ConstBinding)* ';'
-  // ConstBinding ::
-  //   Identifier '=' AssignmentExpression
-  //
-  // TODO(ES6):
-  // ConstBinding ::
-  //   BindingPattern '=' AssignmentExpression
-  VariableMode mode = VAR;
+  //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
+
+  Variable::Mode mode = Variable::VAR;
   // True if the binding needs initialization. 'let' and 'const' declared
   // bindings are created uninitialized by their declaration nodes and
   // need initialization. 'var' declared bindings are always initialized
@@ -2075,67 +1633,34 @@
   if (peek() == Token::VAR) {
     Consume(Token::VAR);
   } else if (peek() == Token::CONST) {
-    // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
-    //
-    // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
-    //
-    // * It is a Syntax Error if the code that matches this production is not
-    //   contained in extended code.
-    //
-    // However disallowing const in classic mode will break compatibility with
-    // existing pages. Therefore we keep allowing const with the old
-    // non-harmony semantics in classic mode.
     Consume(Token::CONST);
-    switch (top_scope_->language_mode()) {
-      case CLASSIC_MODE:
-        mode = CONST;
-        init_op = Token::INIT_CONST;
-        break;
-      case STRICT_MODE:
-        ReportMessage("strict_const", Vector<const char*>::empty());
-        *ok = false;
-        return NULL;
-      case EXTENDED_MODE:
-        if (var_context == kStatement) {
-          // In extended mode 'const' declarations are only allowed in source
-          // element positions.
-          ReportMessage("unprotected_const", Vector<const char*>::empty());
-          *ok = false;
-          return NULL;
-        }
-        mode = CONST_HARMONY;
-        init_op = Token::INIT_CONST_HARMONY;
-    }
-    is_const = true;
-    needs_init = true;
-  } else if (peek() == Token::LET) {
-    // ES6 Draft Rev4 section 12.2.1:
-    //
-    // LetDeclaration : let LetBindingList ;
-    //
-    // * It is a Syntax Error if the code that matches this production is not
-    //   contained in extended code.
-    if (!is_extended_mode()) {
-      ReportMessage("illegal_let", Vector<const char*>::empty());
+    if (top_scope_->is_strict_mode()) {
+      ReportMessage("strict_const", Vector<const char*>::empty());
       *ok = false;
       return NULL;
     }
+    mode = Variable::CONST;
+    is_const = true;
+    needs_init = true;
+    init_op = Token::INIT_CONST;
+  } else if (peek() == Token::LET) {
     Consume(Token::LET);
-    if (var_context == kStatement) {
-      // Let declarations are only allowed in source element positions.
+    if (var_context != kSourceElement &&
+        var_context != kForStatement) {
+      ASSERT(var_context == kStatement);
       ReportMessage("unprotected_let", Vector<const char*>::empty());
       *ok = false;
       return NULL;
     }
-    mode = LET;
+    mode = Variable::LET;
     needs_init = true;
     init_op = Token::INIT_LET;
   } else {
     UNREACHABLE();  // by current callers
   }
 
-  Scope* declaration_scope = DeclarationScope(mode);
-
+  Scope* declaration_scope = mode == Variable::LET
+      ? top_scope_ : top_scope_->DeclarationScope();
   // The scope of a var/const declared variable anywhere inside a function
   // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). Thus we can
   // transform a source-level var/const declaration into a (Function)
@@ -2149,7 +1674,7 @@
   // is inside an initializer block, it is ignored.
   //
   // Create new block with one expected declaration.
-  Block* block = factory()->NewBlock(NULL, 1, true);
+  Block* block = new(zone()) Block(isolate(), NULL, 1, true);
   int nvars = 0;  // the number of variables declared
   Handle<String> name;
   do {
@@ -2161,7 +1686,7 @@
     if (fni_ != NULL) fni_->PushVariableName(name);
 
     // Strict mode variables may not be named eval or arguments
-    if (!declaration_scope->is_classic_mode() && IsEvalOrArguments(name)) {
+    if (declaration_scope->is_strict_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_var_name", Vector<const char*>::empty());
       *ok = false;
       return NULL;
@@ -2179,13 +1704,8 @@
     // If we have a const declaration, in an inner scope, the proxy is always
     // bound to the declared variable (independent of possibly surrounding with
     // statements).
-    // For let/const declarations in harmony mode, we can also immediately
-    // pre-resolve the proxy because it resides in the same scope as the
-    // declaration.
-    VariableProxy* proxy = NewUnresolved(name, mode);
-    Declaration* declaration =
-        factory()->NewVariableDeclaration(proxy, mode, top_scope_);
-    Declare(declaration, mode != VAR, CHECK_OK);
+    Declare(name, mode, NULL, is_const /* always bound for CONST! */,
+            CHECK_OK);
     nvars++;
     if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
       ReportMessageAt(scanner().location(), "too_many_variables",
@@ -2193,7 +1713,6 @@
       *ok = false;
       return NULL;
     }
-    if (names) names->Add(name);
 
     // Parse initialization expression if present and/or needed. A
     // declaration of the form:
@@ -2225,8 +1744,7 @@
     Scope* initialization_scope = is_const ? declaration_scope : top_scope_;
     Expression* value = NULL;
     int position = -1;
-    // Harmony consts have non-optional initializers.
-    if (peek() == Token::ASSIGN || mode == CONST_HARMONY) {
+    if (peek() == Token::ASSIGN) {
       Expect(Token::ASSIGN, CHECK_OK);
       position = scanner().location().beg_pos;
       value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
@@ -2238,12 +1756,6 @@
       } else {
         fni_->RemoveLastFunction();
       }
-      if (decl_props != NULL) *decl_props = kHasInitializers;
-    }
-
-    // Record the end position of the initializer.
-    if (proxy->var() != NULL) {
-      proxy->var()->set_initializer_position(scanner().location().end_pos);
     }
 
     // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
@@ -2270,11 +1782,12 @@
     // declaration statement has been executed. This is important in
     // browsers where the global object (window) has lots of
     // properties defined in prototype objects.
+
     if (initialization_scope->is_global_scope()) {
       // Compute the arguments for the runtime call.
       ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3);
       // We have at least 1 parameter.
-      arguments->Add(factory()->NewLiteral(name));
+      arguments->Add(NewLiteral(name));
       CallRuntime* initialize;
 
       if (is_const) {
@@ -2285,15 +1798,19 @@
         // and add it to the initialization statement block.
         // Note that the function does different things depending on
         // the number of arguments (1 or 2).
-        initialize = factory()->NewCallRuntime(
-            isolate()->factory()->InitializeConstGlobal_symbol(),
-            Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
-            arguments);
+        initialize =
+            new(zone()) CallRuntime(
+                isolate(),
+                isolate()->factory()->InitializeConstGlobal_symbol(),
+                Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
+                arguments);
       } else {
         // Add strict mode.
         // We may want to pass singleton to avoid Literal allocations.
-        LanguageMode language_mode = initialization_scope->language_mode();
-        arguments->Add(factory()->NewNumberLiteral(language_mode));
+        StrictModeFlag flag = initialization_scope->is_strict_mode()
+            ? kStrictMode
+            : kNonStrictMode;
+        arguments->Add(NewNumberLiteral(flag));
 
         // Be careful not to assign a value to the global variable if
         // we're in a with. The initialization value should not
@@ -2308,42 +1825,39 @@
         // and add it to the initialization statement block.
         // Note that the function does different things depending on
         // the number of arguments (2 or 3).
-        initialize = factory()->NewCallRuntime(
-            isolate()->factory()->InitializeVarGlobal_symbol(),
-            Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
-            arguments);
+        initialize =
+            new(zone()) CallRuntime(
+                isolate(),
+                isolate()->factory()->InitializeVarGlobal_symbol(),
+                Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
+                arguments);
       }
 
-      block->AddStatement(factory()->NewExpressionStatement(initialize));
-    } else if (needs_init) {
-      // Constant initializations always assign to the declared constant which
-      // is always at the function scope level. This is only relevant for
-      // dynamically looked-up variables and constants (the start context for
-      // constant lookups is always the function context, while it is the top
-      // context for var declared variables). Sigh...
-      // For 'let' and 'const' declared variables in harmony mode the
-      // initialization also always assigns to the declared variable.
-      ASSERT(proxy != NULL);
-      ASSERT(proxy->var() != NULL);
-      ASSERT(value != NULL);
-      Assignment* assignment =
-          factory()->NewAssignment(init_op, proxy, value, position);
-      block->AddStatement(factory()->NewExpressionStatement(assignment));
-      value = NULL;
+      block->AddStatement(new(zone()) ExpressionStatement(initialize));
     }
 
     // Add an assignment node to the initialization statement block if we still
-    // have a pending initialization value.
+    // have a pending initialization value. We must distinguish between
+    // different kinds of declarations: 'var' initializations are simply
+    // assignments (with all the consequences if they are inside a 'with'
+    // statement - they may change a 'with' object property). Constant
+    // initializations always assign to the declared constant which is
+    // always at the function scope level. This is only relevant for
+    // dynamically looked-up variables and constants (the start context
+    // for constant lookups is always the function context, while it is
+    // the top context for var declared variables). Sigh...
+    // For 'let' declared variables the initialization is in the same scope
+    // as the declaration. Thus dynamic lookups are unnecessary even if the
+    // block scope is inside a with.
     if (value != NULL) {
-      ASSERT(mode == VAR);
-      // 'var' initializations are simply assignments (with all the consequences
-      // if they are inside a 'with' statement - they may change a 'with' object
-      // property).
+      bool in_with = mode == Variable::VAR ? inside_with() : false;
       VariableProxy* proxy =
-          initialization_scope->NewUnresolved(factory(), name);
+          initialization_scope->NewUnresolved(name, in_with);
       Assignment* assignment =
-          factory()->NewAssignment(init_op, proxy, value, position);
-      block->AddStatement(factory()->NewExpressionStatement(assignment));
+          new(zone()) Assignment(isolate(), init_op, proxy, value, position);
+      if (block) {
+        block->AddStatement(new(zone()) ExpressionStatement(assignment));
+      }
     }
 
     if (fni_ != NULL) fni_->Leave();
@@ -2421,18 +1935,9 @@
     return ParseNativeDeclaration(ok);
   }
 
-  // Parsed expression statement, or the context-sensitive 'module' keyword.
-  // Only expect semicolon in the former case.
-  if (!FLAG_harmony_modules ||
-      peek() != Token::IDENTIFIER ||
-      scanner().HasAnyLineTerminatorBeforeNext() ||
-      expr->AsVariableProxy() == NULL ||
-      !expr->AsVariableProxy()->name()->Equals(
-          isolate()->heap()->module_symbol()) ||
-      scanner().literal_contains_escapes()) {
-    ExpectSemicolon(CHECK_OK);
-  }
-  return factory()->NewExpressionStatement(expr);
+  // Parsed expression statement.
+  ExpectSemicolon(CHECK_OK);
+  return new(zone()) ExpressionStatement(expr);
 }
 
 
@@ -2450,9 +1955,10 @@
     Next();
     else_statement = ParseStatement(labels, CHECK_OK);
   } else {
-    else_statement = factory()->NewEmptyStatement();
+    else_statement = EmptyStatement();
   }
-  return factory()->NewIfStatement(condition, then_statement, else_statement);
+  return new(zone()) IfStatement(
+      isolate(), condition, then_statement, else_statement);
 }
 
 
@@ -2482,7 +1988,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return factory()->NewContinueStatement(target);
+  return new(zone()) ContinueStatement(target);
 }
 
 
@@ -2500,8 +2006,7 @@
   // Parse labeled break statements that target themselves into
   // empty statements, e.g. 'l1: l2: l3: break l2;'
   if (!label.is_null() && ContainsLabel(labels, label)) {
-    ExpectSemicolon(CHECK_OK);
-    return factory()->NewEmptyStatement();
+    return EmptyStatement();
   }
   BreakableStatement* target = NULL;
   target = LookupBreakTarget(label, CHECK_OK);
@@ -2518,7 +2023,7 @@
     return NULL;
   }
   ExpectSemicolon(CHECK_OK);
-  return factory()->NewBreakStatement(target);
+  return new(zone()) BreakStatement(target);
 }
 
 
@@ -2538,11 +2043,11 @@
       tok == Token::RBRACE ||
       tok == Token::EOS) {
     ExpectSemicolon(CHECK_OK);
-    result = factory()->NewReturnStatement(GetLiteralUndefined());
+    result = new(zone()) ReturnStatement(GetLiteralUndefined());
   } else {
     Expression* expr = ParseExpression(true, CHECK_OK);
     ExpectSemicolon(CHECK_OK);
-    result = factory()->NewReturnStatement(expr);
+    result = new(zone()) ReturnStatement(expr);
   }
 
   // An ECMAScript program is considered syntactically incorrect if it
@@ -2555,7 +2060,7 @@
       declaration_scope->is_eval_scope()) {
     Handle<String> type = isolate()->factory()->illegal_return_symbol();
     Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
-    return factory()->NewExpressionStatement(throw_error);
+    return new(zone()) ExpressionStatement(throw_error);
   }
   return result;
 }
@@ -2567,7 +2072,7 @@
 
   Expect(Token::WITH, CHECK_OK);
 
-  if (!top_scope_->is_classic_mode()) {
+  if (top_scope_->is_strict_mode()) {
     ReportMessage("strict_mode_with", Vector<const char*>::empty());
     *ok = false;
     return NULL;
@@ -2577,15 +2082,11 @@
   Expression* expr = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
 
+  ++with_nesting_level_;
   top_scope_->DeclarationScope()->RecordWithStatement();
-  Scope* with_scope = NewScope(top_scope_, WITH_SCOPE);
-  Statement* stmt;
-  { BlockState block_state(this, with_scope);
-    with_scope->set_start_position(scanner().peek_location().beg_pos);
-    stmt = ParseStatement(labels, CHECK_OK);
-    with_scope->set_end_position(scanner().location().end_pos);
-  }
-  return factory()->NewWithStatement(expr, stmt);
+  Statement* stmt = ParseStatement(labels, CHECK_OK);
+  --with_nesting_level_;
+  return new(zone()) WithStatement(expr, stmt);
 }
 
 
@@ -2627,7 +2128,7 @@
   // SwitchStatement ::
   //   'switch' '(' Expression ')' '{' CaseClause* '}'
 
-  SwitchStatement* statement = factory()->NewSwitchStatement(labels);
+  SwitchStatement* statement = new(zone()) SwitchStatement(isolate(), labels);
   Target target(&this->target_stack_, statement);
 
   Expect(Token::SWITCH, CHECK_OK);
@@ -2663,7 +2164,8 @@
   Expression* exception = ParseExpression(true, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
 
-  return factory()->NewExpressionStatement(factory()->NewThrow(exception, pos));
+  return new(zone()) ExpressionStatement(
+      new(zone()) Throw(isolate(), exception, pos));
 }
 
 
@@ -2708,11 +2210,9 @@
     Consume(Token::CATCH);
 
     Expect(Token::LPAREN, CHECK_OK);
-    catch_scope = NewScope(top_scope_, CATCH_SCOPE);
-    catch_scope->set_start_position(scanner().location().beg_pos);
     name = ParseIdentifier(CHECK_OK);
 
-    if (!top_scope_->is_classic_mode() && IsEvalOrArguments(name)) {
+    if (top_scope_->is_strict_mode() && IsEvalOrArguments(name)) {
       ReportMessage("strict_catch_variable", Vector<const char*>::empty());
       *ok = false;
       return NULL;
@@ -2722,16 +2222,22 @@
 
     if (peek() == Token::LBRACE) {
       Target target(&this->target_stack_, &catch_collector);
-      VariableMode mode = is_extended_mode() ? LET : VAR;
-      catch_variable =
-          catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
+      catch_scope = NewScope(top_scope_, Scope::CATCH_SCOPE, inside_with());
+      if (top_scope_->is_strict_mode()) {
+        catch_scope->EnableStrictMode();
+      }
+      Variable::Mode mode = harmony_block_scoping_
+          ? Variable::LET : Variable::VAR;
+      catch_variable = catch_scope->DeclareLocal(name, mode);
 
-      BlockState block_state(this, catch_scope);
+      Scope* saved_scope = top_scope_;
+      top_scope_ = catch_scope;
       catch_block = ParseBlock(NULL, CHECK_OK);
+      top_scope_ = saved_scope;
     } else {
       Expect(Token::LBRACE, CHECK_OK);
     }
-    catch_scope->set_end_position(scanner().location().end_pos);
+
     tok = peek();
   }
 
@@ -2749,11 +2255,13 @@
   if (catch_block != NULL && finally_block != NULL) {
     // If we have both, create an inner try/catch.
     ASSERT(catch_scope != NULL && catch_variable != NULL);
-    int index = current_function_state_->NextHandlerIndex();
-    TryCatchStatement* statement = factory()->NewTryCatchStatement(
-        index, try_block, catch_scope, catch_variable, catch_block);
+    TryCatchStatement* statement =
+        new(zone()) TryCatchStatement(try_block,
+                                      catch_scope,
+                                      catch_variable,
+                                      catch_block);
     statement->set_escaping_targets(try_collector.targets());
-    try_block = factory()->NewBlock(NULL, 1, false);
+    try_block = new(zone()) Block(isolate(), NULL, 1, false);
     try_block->AddStatement(statement);
     catch_block = NULL;  // Clear to indicate it's been handled.
   }
@@ -2762,13 +2270,14 @@
   if (catch_block != NULL) {
     ASSERT(finally_block == NULL);
     ASSERT(catch_scope != NULL && catch_variable != NULL);
-    int index = current_function_state_->NextHandlerIndex();
-    result = factory()->NewTryCatchStatement(
-        index, try_block, catch_scope, catch_variable, catch_block);
+    result =
+        new(zone()) TryCatchStatement(try_block,
+                                      catch_scope,
+                                      catch_variable,
+                                      catch_block);
   } else {
     ASSERT(finally_block != NULL);
-    int index = current_function_state_->NextHandlerIndex();
-    result = factory()->NewTryFinallyStatement(index, try_block, finally_block);
+    result = new(zone()) TryFinallyStatement(try_block, finally_block);
     // Combine the jump targets of the try block and the possible catch block.
     try_collector.targets()->AddAll(*catch_collector.targets());
   }
@@ -2783,7 +2292,7 @@
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
-  DoWhileStatement* loop = factory()->NewDoWhileStatement(labels);
+  DoWhileStatement* loop = new(zone()) DoWhileStatement(isolate(), labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::DO, CHECK_OK);
@@ -2814,7 +2323,7 @@
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
-  WhileStatement* loop = factory()->NewWhileStatement(labels);
+  WhileStatement* loop = new(zone()) WhileStatement(isolate(), labels);
   Target target(&this->target_stack_, loop);
 
   Expect(Token::WHILE, CHECK_OK);
@@ -2834,23 +2343,17 @@
 
   Statement* init = NULL;
 
-  // Create an in-between scope for let-bound iteration variables.
-  Scope* saved_scope = top_scope_;
-  Scope* for_scope = NewScope(top_scope_, BLOCK_SCOPE);
-  top_scope_ = for_scope;
-
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
-  for_scope->set_start_position(scanner().location().beg_pos);
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || peek() == Token::CONST) {
       Handle<String> name;
       Block* variable_statement =
-          ParseVariableDeclarations(kForStatement, NULL, NULL, &name, CHECK_OK);
+          ParseVariableDeclarations(kForStatement, &name, CHECK_OK);
 
       if (peek() == Token::IN && !name.is_null()) {
-        VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
-        ForInStatement* loop = factory()->NewForInStatement(labels);
+        VariableProxy* each = top_scope_->NewUnresolved(name, inside_with());
+        ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2859,71 +2362,15 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         loop->Initialize(each, enumerable, body);
-        Block* result = factory()->NewBlock(NULL, 2, false);
+        Block* result = new(zone()) Block(isolate(), NULL, 2, false);
         result->AddStatement(variable_statement);
         result->AddStatement(loop);
-        top_scope_ = saved_scope;
-        for_scope->set_end_position(scanner().location().end_pos);
-        for_scope = for_scope->FinalizeBlockScope();
-        ASSERT(for_scope == NULL);
         // Parsed for-in loop w/ variable/const declaration.
         return result;
       } else {
         init = variable_statement;
       }
-    } else if (peek() == Token::LET) {
-      Handle<String> name;
-      VariableDeclarationProperties decl_props = kHasNoInitializers;
-      Block* variable_statement =
-         ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
-                                   CHECK_OK);
-      bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
-      if (peek() == Token::IN && accept_IN) {
-        // Rewrite a for-in statement of the form
-        //
-        //   for (let x in e) b
-        //
-        // into
-        //
-        //   <let x' be a temporary variable>
-        //   for (x' in e) {
-        //     let x;
-        //     x = x';
-        //     b;
-        //   }
 
-        // TODO(keuchel): Move the temporary variable to the block scope, after
-        // implementing stack allocated block scoped variables.
-        Variable* temp = top_scope_->DeclarationScope()->NewTemporary(name);
-        VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
-        VariableProxy* each = top_scope_->NewUnresolved(factory(), name);
-        ForInStatement* loop = factory()->NewForInStatement(labels);
-        Target target(&this->target_stack_, loop);
-
-        Expect(Token::IN, CHECK_OK);
-        Expression* enumerable = ParseExpression(true, CHECK_OK);
-        Expect(Token::RPAREN, CHECK_OK);
-
-        Statement* body = ParseStatement(NULL, CHECK_OK);
-        Block* body_block = factory()->NewBlock(NULL, 3, false);
-        Assignment* assignment = factory()->NewAssignment(
-            Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
-        Statement* assignment_statement =
-            factory()->NewExpressionStatement(assignment);
-        body_block->AddStatement(variable_statement);
-        body_block->AddStatement(assignment_statement);
-        body_block->AddStatement(body);
-        loop->Initialize(temp_proxy, enumerable, body_block);
-        top_scope_ = saved_scope;
-        for_scope->set_end_position(scanner().location().end_pos);
-        for_scope = for_scope->FinalizeBlockScope();
-        body_block->set_block_scope(for_scope);
-        // Parsed for-in loop w/ let declaration.
-        return loop;
-
-      } else {
-        init = variable_statement;
-      }
     } else {
       Expression* expression = ParseExpression(false, CHECK_OK);
       if (peek() == Token::IN) {
@@ -2936,7 +2383,7 @@
               isolate()->factory()->invalid_lhs_in_for_in_symbol();
           expression = NewThrowReferenceError(type);
         }
-        ForInStatement* loop = factory()->NewForInStatement(labels);
+        ForInStatement* loop = new(zone()) ForInStatement(isolate(), labels);
         Target target(&this->target_stack_, loop);
 
         Expect(Token::IN, CHECK_OK);
@@ -2945,21 +2392,17 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         if (loop) loop->Initialize(expression, enumerable, body);
-        top_scope_ = saved_scope;
-        for_scope->set_end_position(scanner().location().end_pos);
-        for_scope = for_scope->FinalizeBlockScope();
-        ASSERT(for_scope == NULL);
         // Parsed for-in loop.
         return loop;
 
       } else {
-        init = factory()->NewExpressionStatement(expression);
+        init = new(zone()) ExpressionStatement(expression);
       }
     }
   }
 
   // Standard 'for' loop
-  ForStatement* loop = factory()->NewForStatement(labels);
+  ForStatement* loop = new(zone()) ForStatement(isolate(), labels);
   Target target(&this->target_stack_, loop);
 
   // Parsed initializer at this point.
@@ -2974,36 +2417,13 @@
   Statement* next = NULL;
   if (peek() != Token::RPAREN) {
     Expression* exp = ParseExpression(true, CHECK_OK);
-    next = factory()->NewExpressionStatement(exp);
+    next = new(zone()) ExpressionStatement(exp);
   }
   Expect(Token::RPAREN, CHECK_OK);
 
   Statement* body = ParseStatement(NULL, CHECK_OK);
-  top_scope_ = saved_scope;
-  for_scope->set_end_position(scanner().location().end_pos);
-  for_scope = for_scope->FinalizeBlockScope();
-  if (for_scope != NULL) {
-    // Rewrite a for statement of the form
-    //
-    //   for (let x = i; c; n) b
-    //
-    // into
-    //
-    //   {
-    //     let x = i;
-    //     for (; c; n) b
-    //   }
-    ASSERT(init != NULL);
-    Block* result = factory()->NewBlock(NULL, 2, false);
-    result->AddStatement(init);
-    result->AddStatement(loop);
-    result->set_block_scope(for_scope);
-    if (loop) loop->Initialize(NULL, cond, next, body);
-    return result;
-  } else {
-    if (loop) loop->Initialize(init, cond, next, body);
-    return loop;
-  }
+  if (loop) loop->Initialize(init, cond, next, body);
+  return loop;
 }
 
 
@@ -3018,8 +2438,8 @@
     Expect(Token::COMMA, CHECK_OK);
     int position = scanner().location().beg_pos;
     Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-    result =
-        factory()->NewBinaryOperation(Token::COMMA, result, right, position);
+    result = new(zone()) BinaryOperation(
+        isolate(), Token::COMMA, result, right, position);
   }
   return result;
 }
@@ -3050,11 +2470,10 @@
     expression = NewThrowReferenceError(type);
   }
 
-  if (!top_scope_->is_classic_mode()) {
+  if (top_scope_->is_strict_mode()) {
     // Assignment to eval or arguments is disallowed in strict mode.
     CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
   }
-  MarkAsLValue(expression);
 
   Token::Value op = Next();  // Get assignment operator.
   int pos = scanner().location().beg_pos;
@@ -3070,13 +2489,13 @@
       property != NULL &&
       property->obj()->AsVariableProxy() != NULL &&
       property->obj()->AsVariableProxy()->is_this()) {
-    current_function_state_->AddProperty();
+    lexical_scope_->AddProperty();
   }
 
   // If we assign a function literal to a property we pretenure the
   // literal so it can be added as a constant function property.
   if (property != NULL && right->AsFunctionLiteral() != NULL) {
-    right->AsFunctionLiteral()->set_pretenure();
+    right->AsFunctionLiteral()->set_pretenure(true);
   }
 
   if (fni_ != NULL) {
@@ -3094,7 +2513,7 @@
     fni_->Leave();
   }
 
-  return factory()->NewAssignment(op, expression, right, pos);
+  return new(zone()) Assignment(isolate(), op, expression, right, pos);
 }
 
 
@@ -3116,8 +2535,8 @@
   Expect(Token::COLON, CHECK_OK);
   int right_position = scanner().peek_location().beg_pos;
   Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK);
-  return factory()->NewConditional(
-      expression, left, right, left_position, right_position);
+  return new(zone()) Conditional(
+      isolate(), expression, left, right, left_position, right_position);
 }
 
 
@@ -3148,47 +2567,41 @@
 
         switch (op) {
           case Token::ADD:
-            x = factory()->NewNumberLiteral(x_val + y_val);
+            x = NewNumberLiteral(x_val + y_val);
             continue;
           case Token::SUB:
-            x = factory()->NewNumberLiteral(x_val - y_val);
+            x = NewNumberLiteral(x_val - y_val);
             continue;
           case Token::MUL:
-            x = factory()->NewNumberLiteral(x_val * y_val);
+            x = NewNumberLiteral(x_val * y_val);
             continue;
           case Token::DIV:
-            x = factory()->NewNumberLiteral(x_val / y_val);
+            x = NewNumberLiteral(x_val / y_val);
             continue;
-          case Token::BIT_OR: {
-            int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
-            x = factory()->NewNumberLiteral(value);
+          case Token::BIT_OR:
+            x = NewNumberLiteral(DoubleToInt32(x_val) | DoubleToInt32(y_val));
             continue;
-          }
-          case Token::BIT_AND: {
-            int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
-            x = factory()->NewNumberLiteral(value);
+          case Token::BIT_AND:
+            x = NewNumberLiteral(DoubleToInt32(x_val) & DoubleToInt32(y_val));
             continue;
-          }
-          case Token::BIT_XOR: {
-            int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
-            x = factory()->NewNumberLiteral(value);
+          case Token::BIT_XOR:
+            x = NewNumberLiteral(DoubleToInt32(x_val) ^ DoubleToInt32(y_val));
             continue;
-          }
           case Token::SHL: {
             int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
-            x = factory()->NewNumberLiteral(value);
+            x = NewNumberLiteral(value);
             continue;
           }
           case Token::SHR: {
             uint32_t shift = DoubleToInt32(y_val) & 0x1f;
             uint32_t value = DoubleToUint32(x_val) >> shift;
-            x = factory()->NewNumberLiteral(value);
+            x = NewNumberLiteral(value);
             continue;
           }
           case Token::SAR: {
             uint32_t shift = DoubleToInt32(y_val) & 0x1f;
             int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
-            x = factory()->NewNumberLiteral(value);
+            x = NewNumberLiteral(value);
             continue;
           }
           default:
@@ -3207,15 +2620,15 @@
           case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
           default: break;
         }
-        x = factory()->NewCompareOperation(cmp, x, y, position);
+        x = NewCompareNode(cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
-          x = factory()->NewUnaryOperation(Token::NOT, x, position);
+          x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
         }
 
       } else {
         // We have a "normal" binary operation.
-        x = factory()->NewBinaryOperation(op, x, y, position);
+        x = new(zone()) BinaryOperation(isolate(), op, x, y, position);
       }
     }
   }
@@ -3223,6 +2636,27 @@
 }
 
 
+Expression* Parser::NewCompareNode(Token::Value op,
+                                   Expression* x,
+                                   Expression* y,
+                                   int position) {
+  ASSERT(op != Token::NE && op != Token::NE_STRICT);
+  if (op == Token::EQ || op == Token::EQ_STRICT) {
+    bool is_strict = (op == Token::EQ_STRICT);
+    Literal* x_literal = x->AsLiteral();
+    if (x_literal != NULL && x_literal->IsNull()) {
+      return new(zone()) CompareToNull(isolate(), is_strict, y);
+    }
+
+    Literal* y_literal = y->AsLiteral();
+    if (y_literal != NULL && y_literal->IsNull()) {
+      return new(zone()) CompareToNull(isolate(), is_strict, x);
+    }
+  }
+  return new(zone()) CompareOperation(isolate(), op, x, y, position);
+}
+
+
 Expression* Parser::ParseUnaryExpression(bool* ok) {
   // UnaryExpression ::
   //   PostfixExpression
@@ -3248,7 +2682,7 @@
         // Convert the literal to a boolean condition and negate it.
         bool condition = literal->ToBoolean()->IsTrue();
         Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
-        return factory()->NewLiteral(result);
+        return NewLiteral(result);
       } else if (literal->IsNumber()) {
         // Compute some expressions involving only number literals.
         double value = literal->Number();
@@ -3256,9 +2690,9 @@
           case Token::ADD:
             return expression;
           case Token::SUB:
-            return factory()->NewNumberLiteral(-value);
+            return NewNumberLiteral(-value);
           case Token::BIT_NOT:
-            return factory()->NewNumberLiteral(~DoubleToInt32(value));
+            return NewNumberLiteral(~DoubleToInt32(value));
           default:
             break;
         }
@@ -3266,7 +2700,7 @@
     }
 
     // "delete identifier" is a syntax error in strict mode.
-    if (op == Token::DELETE && !top_scope_->is_classic_mode()) {
+    if (op == Token::DELETE && top_scope_->is_strict_mode()) {
       VariableProxy* operand = expression->AsVariableProxy();
       if (operand != NULL && !operand->is_this()) {
         ReportMessage("strict_delete", Vector<const char*>::empty());
@@ -3275,7 +2709,7 @@
       }
     }
 
-    return factory()->NewUnaryOperation(op, expression, position);
+    return new(zone()) UnaryOperation(isolate(), op, expression, position);
 
   } else if (Token::IsCountOp(op)) {
     op = Next();
@@ -3290,17 +2724,17 @@
       expression = NewThrowReferenceError(type);
     }
 
-    if (!top_scope_->is_classic_mode()) {
+    if (top_scope_->is_strict_mode()) {
       // Prefix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
-    MarkAsLValue(expression);
 
     int position = scanner().location().beg_pos;
-    return factory()->NewCountOperation(op,
-                                        true /* prefix */,
-                                        expression,
-                                        position);
+    return new(zone()) CountOperation(isolate(),
+                                      op,
+                                      true /* prefix */,
+                                      expression,
+                                      position);
 
   } else {
     return ParsePostfixExpression(ok);
@@ -3325,19 +2759,19 @@
       expression = NewThrowReferenceError(type);
     }
 
-    if (!top_scope_->is_classic_mode()) {
+    if (top_scope_->is_strict_mode()) {
       // Postfix expression operand in strict mode may not be eval or arguments.
       CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
     }
-    MarkAsLValue(expression);
 
     Token::Value next = Next();
     int position = scanner().location().beg_pos;
     expression =
-        factory()->NewCountOperation(next,
-                                     false /* postfix */,
-                                     expression,
-                                     position);
+        new(zone()) CountOperation(isolate(),
+                                   next,
+                                   false /* postfix */,
+                                   expression,
+                                   position);
   }
   return expression;
 }
@@ -3360,40 +2794,37 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = factory()->NewProperty(result, index, pos);
+        result = new(zone()) Property(isolate(), result, index, pos);
         Expect(Token::RBRACK, CHECK_OK);
         break;
       }
 
       case Token::LPAREN: {
-        int pos;
-        if (scanner().current_token() == Token::IDENTIFIER) {
-          // For call of an identifier we want to report position of
-          // the identifier as position of the call in the stack trace.
-          pos = scanner().location().beg_pos;
-        } else {
-          // For other kinds of calls we record position of the parenthesis as
-          // position of the call.  Note that this is extremely important for
-          // expressions of the form function(){...}() for which call position
-          // should not point to the closing brace otherwise it will intersect
-          // with positions recorded for function literal and confuse debugger.
-          pos = scanner().peek_location().beg_pos;
-        }
+        int pos = scanner().location().beg_pos;
         ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
 
         // Keep track of eval() calls since they disable all local variable
         // optimizations.
         // The calls that need special treatment are the
-        // direct eval calls. These calls are all of the form eval(...), with
-        // no explicit receiver.
+        // direct (i.e. not aliased) eval calls. These calls are all of the
+        // form eval(...) with no explicit receiver object where eval is not
+        // declared in the current scope chain.
         // These calls are marked as potentially direct eval calls. Whether
         // they are actually direct calls to eval is determined at run time.
+        // TODO(994): In ES5, it doesn't matter if the "eval" var is declared
+        // in the local scope chain. It only matters that it's called "eval",
+        // is called without a receiver and it refers to the original eval
+        // function.
         VariableProxy* callee = result->AsVariableProxy();
         if (callee != NULL &&
             callee->IsVariable(isolate()->factory()->eval_symbol())) {
-          top_scope_->DeclarationScope()->RecordEvalCall();
+          Handle<String> name = callee->name();
+          Variable* var = top_scope_->Lookup(name);
+          if (var == NULL) {
+            top_scope_->DeclarationScope()->RecordEvalCall();
+          }
         }
-        result = factory()->NewCall(result, args, pos);
+        result = NewCall(result, args, pos);
         break;
       }
 
@@ -3401,8 +2832,10 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result =
-            factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+        result = new(zone()) Property(isolate(),
+                                      result,
+                                      NewLiteral(name),
+                                      pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -3438,8 +2871,10 @@
 
   if (!stack->is_empty()) {
     int last = stack->pop();
-    result = factory()->NewCallNew(
-        result, new(zone()) ZoneList<Expression*>(0), last);
+    result = new(zone()) CallNew(isolate(),
+                                 result,
+                                 new(zone()) ZoneList<Expression*>(0),
+                                 last);
   }
   return result;
 }
@@ -3491,7 +2926,7 @@
         Consume(Token::LBRACK);
         int pos = scanner().location().beg_pos;
         Expression* index = ParseExpression(true, CHECK_OK);
-        result = factory()->NewProperty(result, index, pos);
+        result = new(zone()) Property(isolate(), result, index, pos);
         if (fni_ != NULL) {
           if (index->IsPropertyName()) {
             fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
@@ -3507,8 +2942,10 @@
         Consume(Token::PERIOD);
         int pos = scanner().location().beg_pos;
         Handle<String> name = ParseIdentifierName(CHECK_OK);
-        result =
-            factory()->NewProperty(result, factory()->NewLiteral(name), pos);
+        result = new(zone()) Property(isolate(),
+                                      result,
+                                      NewLiteral(name),
+                                      pos);
         if (fni_ != NULL) fni_->PushLiteralName(name);
         break;
       }
@@ -3517,7 +2954,7 @@
         // Consume one of the new prefixes (already parsed).
         ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
         int last = stack->pop();
-        result = factory()->NewCallNew(result, args, last);
+        result = new(zone()) CallNew(isolate(), result, args, last);
         break;
       }
       default:
@@ -3536,7 +2973,7 @@
 
   Expect(Token::DEBUGGER, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
-  return factory()->NewDebuggerStatement();
+  return new(zone()) DebuggerStatement();
 }
 
 
@@ -3562,9 +2999,9 @@
       return ReportMessage("unexpected_reserved",
                            Vector<const char*>::empty());
     case Token::FUTURE_STRICT_RESERVED_WORD:
-      return ReportMessage(top_scope_->is_classic_mode() ?
-                               "unexpected_token_identifier" :
-                               "unexpected_strict_reserved",
+      return ReportMessage(top_scope_->is_strict_mode() ?
+                               "unexpected_strict_reserved" :
+                               "unexpected_token_identifier",
                            Vector<const char*>::empty());
     default:
       const char* name = Token::String(token);
@@ -3601,37 +3038,35 @@
   switch (peek()) {
     case Token::THIS: {
       Consume(Token::THIS);
-      result = factory()->NewVariableProxy(top_scope_->receiver());
+      result = new(zone()) VariableProxy(isolate(), top_scope_->receiver());
       break;
     }
 
     case Token::NULL_LITERAL:
       Consume(Token::NULL_LITERAL);
-      result = factory()->NewLiteral(isolate()->factory()->null_value());
+      result = new(zone()) Literal(
+          isolate(), isolate()->factory()->null_value());
       break;
 
     case Token::TRUE_LITERAL:
       Consume(Token::TRUE_LITERAL);
-      result = factory()->NewLiteral(isolate()->factory()->true_value());
+      result = new(zone()) Literal(
+          isolate(), isolate()->factory()->true_value());
       break;
 
     case Token::FALSE_LITERAL:
       Consume(Token::FALSE_LITERAL);
-      result = factory()->NewLiteral(isolate()->factory()->false_value());
+      result = new(zone()) Literal(
+          isolate(), isolate()->factory()->false_value());
       break;
 
     case Token::IDENTIFIER:
     case Token::FUTURE_STRICT_RESERVED_WORD: {
       Handle<String> name = ParseIdentifier(CHECK_OK);
       if (fni_ != NULL) fni_->PushVariableName(name);
-      // The name may refer to a module instance object, so its type is unknown.
-#ifdef DEBUG
-      if (FLAG_print_interface_details)
-        PrintF("# Variable %s ", name->ToAsciiArray());
-#endif
-      Interface* interface = Interface::NewUnknown();
-      result = top_scope_->NewUnresolved(
-          factory(), name, scanner().location().beg_pos, interface);
+      result = top_scope_->NewUnresolved(name,
+                                         inside_with(),
+                                         scanner().location().beg_pos);
       break;
     }
 
@@ -3641,14 +3076,14 @@
       double value = StringToDouble(isolate()->unicode_cache(),
                                     scanner().literal_ascii_string(),
                                     ALLOW_HEX | ALLOW_OCTALS);
-      result = factory()->NewNumberLiteral(value);
+      result = NewNumberLiteral(value);
       break;
     }
 
     case Token::STRING: {
       Consume(Token::STRING);
       Handle<String> symbol = GetSymbol(CHECK_OK);
-      result = factory()->NewLiteral(symbol);
+      result = NewLiteral(symbol);
       if (fni_ != NULL) fni_->PushLiteralName(symbol);
       break;
     }
@@ -3746,14 +3181,11 @@
   Expect(Token::RBRACK, CHECK_OK);
 
   // Update the scope information before the pre-parsing bailout.
-  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
+  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
 
-  // Allocate a fixed array to hold all the object literals.
-  Handle<FixedArray> object_literals =
+  // Allocate a fixed array with all the literals.
+  Handle<FixedArray> literals =
       isolate()->factory()->NewFixedArray(values->length(), TENURED);
-  Handle<FixedDoubleArray> double_literals;
-  ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
-  bool has_only_undefined_values = true;
 
   // Fill in the literals.
   bool is_simple = true;
@@ -3765,85 +3197,21 @@
     }
     Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
     if (boilerplate_value->IsUndefined()) {
-      object_literals->set_the_hole(i);
-      if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-        double_literals->set_the_hole(i);
-      }
+      literals->set_the_hole(i);
       is_simple = false;
     } else {
-      // Examine each literal element, and adjust the ElementsKind if the
-      // literal element is not of a type that can be stored in the current
-      // ElementsKind.  Start with FAST_SMI_ONLY_ELEMENTS, and transition to
-      // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary.  Always remember
-      // the tagged value, no matter what the ElementsKind is in case we
-      // ultimately end up in FAST_ELEMENTS.
-      has_only_undefined_values = false;
-      object_literals->set(i, *boilerplate_value);
-      if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-        // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
-        // FAST_ELEMENTS is required.
-        if (!boilerplate_value->IsSmi()) {
-          if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) {
-            // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to
-            // avoid over-allocating in TENURED space.
-            double_literals = isolate()->factory()->NewFixedDoubleArray(
-                values->length(), TENURED);
-            // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the
-            // FAST_DOUBLE_ELEMENTS array so that they are in sync.
-            for (int j = 0; j < i; ++j) {
-              Object* smi_value = object_literals->get(j);
-              if (smi_value->IsTheHole()) {
-                double_literals->set_the_hole(j);
-              } else {
-                double_literals->set(j, Smi::cast(smi_value)->value());
-              }
-            }
-            double_literals->set(i, boilerplate_value->Number());
-            elements_kind = FAST_DOUBLE_ELEMENTS;
-          } else {
-            elements_kind = FAST_ELEMENTS;
-          }
-        }
-      } else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
-        // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays
-        // until the first value is seen that can't be stored as a double.
-        if (boilerplate_value->IsNumber()) {
-          double_literals->set(i, boilerplate_value->Number());
-        } else {
-          elements_kind = FAST_ELEMENTS;
-        }
-      }
+      literals->set(i, *boilerplate_value);
     }
   }
 
-  // Very small array literals that don't have a concrete hint about their type
-  // from a constant value should default to the slow case to avoid lots of
-  // elements transitions on really small objects.
-  if (has_only_undefined_values && values->length() <= 2) {
-    elements_kind = FAST_ELEMENTS;
-  }
-
   // Simple and shallow arrays can be lazily copied, we transform the
   // elements array to a copy-on-write array.
-  if (is_simple && depth == 1 && values->length() > 0 &&
-      elements_kind != FAST_DOUBLE_ELEMENTS) {
-    object_literals->set_map(isolate()->heap()->fixed_cow_array_map());
+  if (is_simple && depth == 1 && values->length() > 0) {
+    literals->set_map(isolate()->heap()->fixed_cow_array_map());
   }
 
-  Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
-      ? Handle<FixedArrayBase>(double_literals)
-      : Handle<FixedArrayBase>(object_literals);
-
-  // Remember both the literal's constant values as well as the ElementsKind
-  // in a 2-element FixedArray.
-  Handle<FixedArray> literals =
-      isolate()->factory()->NewFixedArray(2, TENURED);
-
-  literals->set(0, Smi::FromInt(elements_kind));
-  literals->set(1, *element_values);
-
-  return factory()->NewArrayLiteral(
-      literals, values, literal_index, is_simple, depth);
+  return new(zone()) ArrayLiteral(
+      isolate(), literals, values, literal_index, is_simple, depth);
 }
 
 
@@ -3915,13 +3283,19 @@
   return isolate()->factory()->undefined_value();
 }
 
+// Defined in ast.cc
+bool IsEqualString(void* first, void* second);
+bool IsEqualNumber(void* first, void* second);
+
+
 // Validation per 11.1.5 Object Initialiser
 class ObjectLiteralPropertyChecker {
  public:
-  ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) :
-    props_(Literal::Match),
+  ObjectLiteralPropertyChecker(Parser* parser, bool strict) :
+    props(&IsEqualString),
+    elems(&IsEqualNumber),
     parser_(parser),
-    language_mode_(language_mode) {
+    strict_(strict) {
   }
 
   void CheckProperty(
@@ -3948,9 +3322,10 @@
     }
   }
 
-  HashMap props_;
+  HashMap props;
+  HashMap elems;
   Parser* parser_;
-  LanguageMode language_mode_;
+  bool strict_;
 };
 
 
@@ -3958,14 +3333,49 @@
     ObjectLiteral::Property* property,
     Scanner::Location loc,
     bool* ok) {
+
   ASSERT(property != NULL);
-  Literal* literal = property->key();
-  HashMap::Entry* entry = props_.Lookup(literal, literal->Hash(), true);
+
+  Literal *lit = property->key();
+  Handle<Object> handle = lit->handle();
+
+  uint32_t hash;
+  HashMap* map;
+  void* key;
+
+  if (handle->IsSymbol()) {
+    Handle<String> name(String::cast(*handle));
+    if (name->AsArrayIndex(&hash)) {
+      Handle<Object> key_handle = FACTORY->NewNumberFromUint(hash);
+      key = key_handle.location();
+      map = &elems;
+    } else {
+      key = handle.location();
+      hash = name->Hash();
+      map = &props;
+    }
+  } else if (handle->ToArrayIndex(&hash)) {
+    key = handle.location();
+    map = &elems;
+  } else {
+    ASSERT(handle->IsNumber());
+    double num = handle->Number();
+    char arr[100];
+    Vector<char> buffer(arr, ARRAY_SIZE(arr));
+    const char* str = DoubleToCString(num, buffer);
+    Handle<String> name = FACTORY->NewStringFromAscii(CStrVector(str));
+    key = name.location();
+    hash = name->Hash();
+    map = &props;
+  }
+
+  // Lookup property previously defined, if any.
+  HashMap::Entry* entry = map->Lookup(key, hash, true);
   intptr_t prev = reinterpret_cast<intptr_t> (entry->value);
   intptr_t curr = GetPropertyKind(property);
 
-  // Duplicate data properties are illegal in strict or extended mode.
-  if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) {
+  // Duplicate data properties are illegal in strict mode.
+  if (strict_ && (curr & prev & kData) != 0) {
     parser_->ReportMessageAt(loc, "strict_duplicate_property",
                              Vector<const char*>::empty());
     *ok = false;
@@ -4076,9 +3486,11 @@
                              RelocInfo::kNoPosition,
                              FunctionLiteral::ANONYMOUS_EXPRESSION,
                              CHECK_OK);
-    // Allow any number of parameters for compatibilty with JSC.
+    // Allow any number of parameters for compatiabilty with JSC.
     // Specification only allows zero parameters for get and one for set.
-    return factory()->NewObjectLiteralProperty(is_getter, value);
+    ObjectLiteral::Property* property =
+        new(zone()) ObjectLiteral::Property(is_getter, value);
+    return property;
   } else {
     ReportUnexpectedToken(next);
     *ok = false;
@@ -4099,7 +3511,7 @@
   int number_of_boilerplate_properties = 0;
   bool has_function = false;
 
-  ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode());
+  ObjectLiteralPropertyChecker checker(this, top_scope_->is_strict_mode());
 
   Expect(Token::LBRACE, CHECK_OK);
 
@@ -4143,7 +3555,7 @@
         }
         // Failed to parse as get/set property, so it's just a property
         // called "get" or "set".
-        key = factory()->NewLiteral(id);
+        key = NewLiteral(id);
         break;
       }
       case Token::STRING: {
@@ -4152,10 +3564,10 @@
         if (fni_ != NULL) fni_->PushLiteralName(string);
         uint32_t index;
         if (!string.is_null() && string->AsArrayIndex(&index)) {
-          key = factory()->NewNumberLiteral(index);
+          key = NewNumberLiteral(index);
           break;
         }
-        key = factory()->NewLiteral(string);
+        key = NewLiteral(string);
         break;
       }
       case Token::NUMBER: {
@@ -4164,14 +3576,14 @@
         double value = StringToDouble(isolate()->unicode_cache(),
                                       scanner().literal_ascii_string(),
                                       ALLOW_HEX | ALLOW_OCTALS);
-        key = factory()->NewNumberLiteral(value);
+        key = NewNumberLiteral(value);
         break;
       }
       default:
         if (Token::IsKeyword(next)) {
           Consume(next);
           Handle<String> string = GetSymbol(CHECK_OK);
-          key = factory()->NewLiteral(string);
+          key = NewLiteral(string);
         } else {
           // Unexpected token.
           Token::Value next = Next();
@@ -4185,15 +3597,13 @@
     Expression* value = ParseAssignmentExpression(true, CHECK_OK);
 
     ObjectLiteral::Property* property =
-        new(zone()) ObjectLiteral::Property(key, value, isolate());
+        new(zone()) ObjectLiteral::Property(key, value);
 
-    // Mark top-level object literals that contain function literals and
-    // pretenure the literal so it can be added as a constant function
-    // property.
-    if (top_scope_->DeclarationScope()->is_global_scope() &&
-        value->AsFunctionLiteral() != NULL) {
+    // Mark object literals that contain function literals and pretenure the
+    // literal so it can be added as a constant function property.
+    if (value->AsFunctionLiteral() != NULL) {
       has_function = true;
-      value->AsFunctionLiteral()->set_pretenure();
+      value->AsFunctionLiteral()->set_pretenure(true);
     }
 
     // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
@@ -4213,7 +3623,7 @@
   Expect(Token::RBRACE, CHECK_OK);
 
   // Computation of literal_index must happen before pre parse bailout.
-  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
+  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
 
   Handle<FixedArray> constant_properties = isolate()->factory()->NewFixedArray(
       number_of_boilerplate_properties * 2, TENURED);
@@ -4226,13 +3636,14 @@
                                        &is_simple,
                                        &fast_elements,
                                        &depth);
-  return factory()->NewObjectLiteral(constant_properties,
-                                     properties,
-                                     literal_index,
-                                     is_simple,
-                                     fast_elements,
-                                     depth,
-                                     has_function);
+  return new(zone()) ObjectLiteral(isolate(),
+                                   constant_properties,
+                                   properties,
+                                   literal_index,
+                                   is_simple,
+                                   fast_elements,
+                                   depth,
+                                   has_function);
 }
 
 
@@ -4244,14 +3655,15 @@
     return NULL;
   }
 
-  int literal_index = current_function_state_->NextMaterializedLiteralIndex();
+  int literal_index = lexical_scope_->NextMaterializedLiteralIndex();
 
   Handle<String> js_pattern = NextLiteralString(TENURED);
   scanner().ScanRegExpFlags();
   Handle<String> js_flags = NextLiteralString(TENURED);
   Next();
 
-  return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index);
+  return new(zone()) RegExpLiteral(
+      isolate(), js_pattern, js_flags, literal_index);
 }
 
 
@@ -4279,98 +3691,6 @@
 }
 
 
-class SingletonLogger : public ParserRecorder {
- public:
-  SingletonLogger() : has_error_(false), start_(-1), end_(-1) { }
-  virtual ~SingletonLogger() { }
-
-  void Reset() { has_error_ = false; }
-
-  virtual void LogFunction(int start,
-                           int end,
-                           int literals,
-                           int properties,
-                           LanguageMode mode) {
-    ASSERT(!has_error_);
-    start_ = start;
-    end_ = end;
-    literals_ = literals;
-    properties_ = properties;
-    mode_ = mode;
-  };
-
-  // Logs a symbol creation of a literal or identifier.
-  virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
-  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
-
-  // Logs an error message and marks the log as containing an error.
-  // Further logging will be ignored, and ExtractData will return a vector
-  // representing the error only.
-  virtual void LogMessage(int start,
-                          int end,
-                          const char* message,
-                          const char* argument_opt) {
-    has_error_ = true;
-    start_ = start;
-    end_ = end;
-    message_ = message;
-    argument_opt_ = argument_opt;
-  }
-
-  virtual int function_position() { return 0; }
-
-  virtual int symbol_position() { return 0; }
-
-  virtual int symbol_ids() { return -1; }
-
-  virtual Vector<unsigned> ExtractData() {
-    UNREACHABLE();
-    return Vector<unsigned>();
-  }
-
-  virtual void PauseRecording() { }
-
-  virtual void ResumeRecording() { }
-
-  bool has_error() { return has_error_; }
-
-  int start() { return start_; }
-  int end() { return end_; }
-  int literals() {
-    ASSERT(!has_error_);
-    return literals_;
-  }
-  int properties() {
-    ASSERT(!has_error_);
-    return properties_;
-  }
-  LanguageMode language_mode() {
-    ASSERT(!has_error_);
-    return mode_;
-  }
-  const char* message() {
-    ASSERT(has_error_);
-    return message_;
-  }
-  const char* argument_opt() {
-    ASSERT(has_error_);
-    return argument_opt_;
-  }
-
- private:
-  bool has_error_;
-  int start_;
-  int end_;
-  // For function entries.
-  int literals_;
-  int properties_;
-  LanguageMode mode_;
-  // For error messages.
-  const char* message_;
-  const char* argument_opt_;
-};
-
-
 FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
                                               bool name_is_strict_reserved,
                                               int function_token_position,
@@ -4393,26 +3713,26 @@
   // Function declarations are function scoped in normal mode, so they are
   // hoisted. In harmony block scoping mode they are block scoped, so they
   // are not hoisted.
-  Scope* scope = (type == FunctionLiteral::DECLARATION && !is_extended_mode())
-      ? NewScope(top_scope_->DeclarationScope(), FUNCTION_SCOPE)
-      : NewScope(top_scope_, FUNCTION_SCOPE);
-  ZoneList<Statement*>* body = NULL;
-  int materialized_literal_count = -1;
-  int expected_property_count = -1;
-  int handler_count = 0;
+  Scope* scope = (type == FunctionLiteral::DECLARATION &&
+                  !harmony_block_scoping_)
+      ? NewScope(top_scope_->DeclarationScope(), Scope::FUNCTION_SCOPE, false)
+      : NewScope(top_scope_, Scope::FUNCTION_SCOPE, inside_with());
+  ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(8);
+  int materialized_literal_count;
+  int expected_property_count;
+  int start_pos;
+  int end_pos;
   bool only_simple_this_property_assignments;
   Handle<FixedArray> this_property_assignments;
-  FunctionLiteral::ParameterFlag duplicate_parameters =
-      FunctionLiteral::kNoDuplicateParameters;
-  AstProperties ast_properties;
+  bool has_duplicate_parameters = false;
   // Parse function body.
-  { FunctionState function_state(this, scope, isolate());
+  { LexicalScope lexical_scope(this, scope, isolate());
     top_scope_->SetScopeName(function_name);
 
     //  FormalParameterList ::
     //    '(' (Identifier)*[','] ')'
     Expect(Token::LPAREN, CHECK_OK);
-    scope->set_start_position(scanner().location().beg_pos);
+    start_pos = scanner().location().beg_pos;
     Scanner::Location name_loc = Scanner::Location::invalid();
     Scanner::Location dupe_loc = Scanner::Location::invalid();
     Scanner::Location reserved_loc = Scanner::Location::invalid();
@@ -4429,14 +3749,17 @@
         name_loc = scanner().location();
       }
       if (!dupe_loc.IsValid() && top_scope_->IsDeclared(param_name)) {
-        duplicate_parameters = FunctionLiteral::kHasDuplicateParameters;
+        has_duplicate_parameters = true;
         dupe_loc = scanner().location();
       }
       if (!reserved_loc.IsValid() && is_strict_reserved) {
         reserved_loc = scanner().location();
       }
 
-      top_scope_->DeclareParameter(param_name, VAR);
+      top_scope_->DeclareParameter(param_name,
+                                   harmony_block_scoping_
+                                   ? Variable::LET
+                                   : Variable::VAR);
       num_parameters++;
       if (num_parameters > kMaxNumFunctionParameters) {
         ReportMessageAt(scanner().location(), "too_many_parameters",
@@ -4457,130 +3780,71 @@
     // NOTE: We create a proxy and resolve it here so that in the
     // future we can change the AST to only refer to VariableProxies
     // instead of Variables and Proxis as is the case now.
-    Variable* fvar = NULL;
-    Token::Value fvar_init_op = Token::INIT_CONST;
     if (type == FunctionLiteral::NAMED_EXPRESSION) {
-      VariableMode fvar_mode;
-      if (is_extended_mode()) {
-        fvar_mode = CONST_HARMONY;
-        fvar_init_op = Token::INIT_CONST_HARMONY;
-      } else {
-        fvar_mode = CONST;
-      }
-      fvar =
-          top_scope_->DeclareFunctionVar(function_name, fvar_mode, factory());
+      Variable* fvar = top_scope_->DeclareFunctionVar(function_name);
+      VariableProxy* fproxy =
+          top_scope_->NewUnresolved(function_name, inside_with());
+      fproxy->BindTo(fvar);
+      body->Add(new(zone()) ExpressionStatement(
+          new(zone()) Assignment(isolate(),
+                                 Token::INIT_CONST,
+                                 fproxy,
+                                 new(zone()) ThisFunction(isolate()),
+                                 RelocInfo::kNoPosition)));
     }
 
-    // Determine whether the function will be lazily compiled.
-    // The heuristics are:
-    // - It must not have been prohibited by the caller to Parse (some callers
-    //   need a full AST).
-    // - The outer scope must be trivial (only global variables in scope).
-    // - The function mustn't be a function expression with an open parenthesis
-    //   before; we consider that a hint that the function will be called
-    //   immediately, and it would be a waste of time to make it lazily
-    //   compiled.
-    // These are all things we can know at this point, without looking at the
-    // function itself.
+    // Determine if the function will be lazily compiled. The mode can only
+    // be PARSE_LAZILY if the --lazy flag is true.  We will not lazily
+    // compile if we do not have preparser data for the function.
     bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
                                top_scope_->outer_scope()->is_global_scope() &&
                                top_scope_->HasTrivialOuterContext() &&
-                               !parenthesized_function_);
+                               !parenthesized_function_ &&
+                               pre_data() != NULL);
     parenthesized_function_ = false;  // The bit was set for this function only.
 
     if (is_lazily_compiled) {
       int function_block_pos = scanner().location().beg_pos;
-      FunctionEntry entry;
-      if (pre_data_ != NULL) {
-        // If we have pre_data_, we use it to skip parsing the function body.
-        // the preparser data contains the information we need to construct the
-        // lazy function.
-        entry = pre_data()->GetFunctionEntry(function_block_pos);
-        if (entry.is_valid()) {
-          if (entry.end_pos() <= function_block_pos) {
-            // End position greater than end of stream is safe, and hard
-            // to check.
-            ReportInvalidPreparseData(function_name, CHECK_OK);
-          }
-          scanner().SeekForward(entry.end_pos() - 1);
-
-          scope->set_end_position(entry.end_pos());
-          Expect(Token::RBRACE, CHECK_OK);
-          isolate()->counters()->total_preparse_skipped()->Increment(
-              scope->end_position() - function_block_pos);
-          materialized_literal_count = entry.literal_count();
-          expected_property_count = entry.property_count();
-          top_scope_->SetLanguageMode(entry.language_mode());
-          only_simple_this_property_assignments = false;
-          this_property_assignments = isolate()->factory()->empty_fixed_array();
-        } else {
-          is_lazily_compiled = false;
-        }
+      FunctionEntry entry = pre_data()->GetFunctionEntry(function_block_pos);
+      if (!entry.is_valid()) {
+        // There is no preparser data for the function, we will not lazily
+        // compile after all.
+        is_lazily_compiled = false;
       } else {
-        // With no preparser data, we partially parse the function, without
-        // building an AST. This gathers the data needed to build a lazy
-        // function.
-        SingletonLogger logger;
-        preparser::PreParser::PreParseResult result =
-            LazyParseFunctionLiteral(&logger);
-        if (result == preparser::PreParser::kPreParseStackOverflow) {
-          // Propagate stack overflow.
-          stack_overflow_ = true;
-          *ok = false;
-          return NULL;
+        end_pos = entry.end_pos();
+        if (end_pos <= function_block_pos) {
+          // End position greater than end of stream is safe, and hard to check.
+          ReportInvalidPreparseData(function_name, CHECK_OK);
         }
-        if (logger.has_error()) {
-          const char* arg = logger.argument_opt();
-          Vector<const char*> args;
-          if (arg != NULL) {
-            args = Vector<const char*>(&arg, 1);
-          }
-          ReportMessageAt(Scanner::Location(logger.start(), logger.end()),
-                          logger.message(), args);
-          *ok = false;
-          return NULL;
-        }
-        scope->set_end_position(logger.end());
-        Expect(Token::RBRACE, CHECK_OK);
         isolate()->counters()->total_preparse_skipped()->Increment(
-            scope->end_position() - function_block_pos);
-        materialized_literal_count = logger.literals();
-        expected_property_count = logger.properties();
-        top_scope_->SetLanguageMode(logger.language_mode());
+            end_pos - function_block_pos);
+        // Seek to position just before terminal '}'.
+        scanner().SeekForward(end_pos - 1);
+        materialized_literal_count = entry.literal_count();
+        expected_property_count = entry.property_count();
+        if (entry.strict_mode()) top_scope_->EnableStrictMode();
         only_simple_this_property_assignments = false;
         this_property_assignments = isolate()->factory()->empty_fixed_array();
+        Expect(Token::RBRACE, CHECK_OK);
       }
     }
 
     if (!is_lazily_compiled) {
-      body = new(zone()) ZoneList<Statement*>(8);
-      if (fvar != NULL) {
-        VariableProxy* fproxy =
-            top_scope_->NewUnresolved(factory(), function_name);
-        fproxy->BindTo(fvar);
-        body->Add(factory()->NewExpressionStatement(
-            factory()->NewAssignment(fvar_init_op,
-                                     fproxy,
-                                     factory()->NewThisFunction(),
-                                     RelocInfo::kNoPosition)));
-      }
-      ParseSourceElements(body, Token::RBRACE, false, CHECK_OK);
+      ParseSourceElements(body, Token::RBRACE, CHECK_OK);
 
-      materialized_literal_count = function_state.materialized_literal_count();
-      expected_property_count = function_state.expected_property_count();
-      handler_count = function_state.handler_count();
+      materialized_literal_count = lexical_scope.materialized_literal_count();
+      expected_property_count = lexical_scope.expected_property_count();
       only_simple_this_property_assignments =
-          function_state.only_simple_this_property_assignments();
-      this_property_assignments = function_state.this_property_assignments();
+          lexical_scope.only_simple_this_property_assignments();
+      this_property_assignments = lexical_scope.this_property_assignments();
 
       Expect(Token::RBRACE, CHECK_OK);
-      scope->set_end_position(scanner().location().end_pos);
+      end_pos = scanner().location().end_pos;
     }
 
     // Validate strict mode.
-    if (!top_scope_->is_classic_mode()) {
+    if (top_scope_->is_strict_mode()) {
       if (IsEvalOrArguments(function_name)) {
-        int start_pos = scope->start_position();
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -4603,7 +3867,6 @@
         return NULL;
       }
       if (name_is_strict_reserved) {
-        int start_pos = scope->start_position();
         int position = function_token_position != RelocInfo::kNoPosition
             ? function_token_position
             : (start_pos > 0 ? start_pos - 1 : start_pos);
@@ -4619,60 +3882,35 @@
         *ok = false;
         return NULL;
       }
-      CheckOctalLiteral(scope->start_position(),
-                        scope->end_position(),
-                        CHECK_OK);
+      CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
     }
-    ast_properties = *factory()->visitor()->ast_properties();
   }
 
-  if (is_extended_mode()) {
+  if (harmony_block_scoping_) {
     CheckConflictingVarDeclarations(scope, CHECK_OK);
   }
 
   FunctionLiteral* function_literal =
-      factory()->NewFunctionLiteral(function_name,
-                                    scope,
-                                    body,
-                                    materialized_literal_count,
-                                    expected_property_count,
-                                    handler_count,
-                                    only_simple_this_property_assignments,
-                                    this_property_assignments,
-                                    num_parameters,
-                                    duplicate_parameters,
-                                    type,
-                                    FunctionLiteral::kIsFunction);
+      new(zone()) FunctionLiteral(isolate(),
+                                  function_name,
+                                  scope,
+                                  body,
+                                  materialized_literal_count,
+                                  expected_property_count,
+                                  only_simple_this_property_assignments,
+                                  this_property_assignments,
+                                  num_parameters,
+                                  start_pos,
+                                  end_pos,
+                                  type,
+                                  has_duplicate_parameters);
   function_literal->set_function_token_position(function_token_position);
-  function_literal->set_ast_properties(&ast_properties);
 
   if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
   return function_literal;
 }
 
 
-preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
-    SingletonLogger* logger) {
-  HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
-  ASSERT_EQ(Token::LBRACE, scanner().current_token());
-
-  if (reusable_preparser_ == NULL) {
-    intptr_t stack_limit = isolate()->stack_guard()->real_climit();
-    bool do_allow_lazy = true;
-    reusable_preparser_ = new preparser::PreParser(&scanner_,
-                                                   NULL,
-                                                   stack_limit,
-                                                   do_allow_lazy,
-                                                   allow_natives_syntax_,
-                                                   allow_modules_);
-  }
-  preparser::PreParser::PreParseResult result =
-      reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
-                                                logger);
-  return result;
-}
-
-
 Expression* Parser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
@@ -4715,7 +3953,7 @@
   }
 
   // We have a valid intrinsics call or a call to a builtin.
-  return factory()->NewCallRuntime(name, function, args);
+  return new(zone()) CallRuntime(isolate(), name, function, args);
 }
 
 
@@ -4770,32 +4008,25 @@
 }
 
 
-void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
-  Expect(Token::IDENTIFIER, ok);
-  if (!*ok) return;
-  Handle<String> symbol = GetSymbol(ok);
-  if (!*ok) return;
-  if (!symbol->IsEqualTo(CStrVector(keyword))) {
-    *ok = false;
-    ReportUnexpectedToken(scanner().current_token());
-  }
-}
-
-
 Literal* Parser::GetLiteralUndefined() {
-  return factory()->NewLiteral(isolate()->factory()->undefined_value());
+  return NewLiteral(isolate()->factory()->undefined_value());
 }
 
 
 Literal* Parser::GetLiteralTheHole() {
-  return factory()->NewLiteral(isolate()->factory()->the_hole_value());
+  return NewLiteral(isolate()->factory()->the_hole_value());
+}
+
+
+Literal* Parser::GetLiteralNumber(double value) {
+  return NewNumberLiteral(value);
 }
 
 
 // Parses an identifier that is valid for the current scope, in particular it
 // fails on strict mode future reserved keywords in a strict scope.
 Handle<String> Parser::ParseIdentifier(bool* ok) {
-  if (!top_scope_->is_classic_mode()) {
+  if (top_scope_->is_strict_mode()) {
     Expect(Token::IDENTIFIER, ok);
   } else if (!Check(Token::IDENTIFIER)) {
     Expect(Token::FUTURE_STRICT_RESERVED_WORD, ok);
@@ -4833,21 +4064,12 @@
 }
 
 
-void Parser::MarkAsLValue(Expression* expression) {
-  VariableProxy* proxy = expression != NULL
-      ? expression->AsVariableProxy()
-      : NULL;
-
-  if (proxy != NULL) proxy->MarkAsLValue();
-}
-
-
 // Checks LHS expression for assignment and prefix/postfix increment/decrement
 // in strict mode.
 void Parser::CheckStrictModeLValue(Expression* expression,
                                    const char* error,
                                    bool* ok) {
-  ASSERT(!top_scope_->is_classic_mode());
+  ASSERT(top_scope_->is_strict_mode());
   VariableProxy* lhs = expression != NULL
       ? expression->AsVariableProxy()
       : NULL;
@@ -4966,6 +4188,11 @@
 }
 
 
+Literal* Parser::NewNumberLiteral(double number) {
+  return NewLiteral(isolate()->factory()->NewNumber(number, TENURED));
+}
+
+
 Expression* Parser::NewThrowReferenceError(Handle<String> type) {
   return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(),
                        type, HandleVector<Object>(NULL, 0));
@@ -5005,15 +4232,19 @@
       elements->set(i, *element);
     }
   }
-  Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
-      elements, FAST_ELEMENTS, TENURED);
+  Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
+                                                                       TENURED);
 
   ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
-  args->Add(factory()->NewLiteral(type));
-  args->Add(factory()->NewLiteral(array));
-  CallRuntime* call_constructor =
-      factory()->NewCallRuntime(constructor, NULL, args);
-  return factory()->NewThrow(call_constructor, scanner().location().beg_pos);
+  args->Add(NewLiteral(type));
+  args->Add(NewLiteral(array));
+  CallRuntime* call_constructor = new(zone()) CallRuntime(isolate(),
+                                                          constructor,
+                                                          NULL,
+                                                          args);
+  return new(zone()) Throw(isolate(),
+                           call_constructor,
+                           scanner().location().beg_pos);
 }
 
 // ----------------------------------------------------------------------------
@@ -5890,21 +5121,19 @@
 
 
 // Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
-                                  int flags,
-                                  ParserRecorder* recorder) {
+static ScriptDataImpl* DoPreParse(UC16CharacterStream* source,
+                                  bool allow_lazy,
+                                  ParserRecorder* recorder,
+                                  bool harmony_block_scoping) {
   Isolate* isolate = Isolate::Current();
-  HistogramTimerScope timer(isolate->counters()->pre_parse());
-  Scanner scanner(isolate->unicode_cache());
-  scanner.SetHarmonyScoping(FLAG_harmony_scoping);
+  JavaScriptScanner scanner(isolate->unicode_cache());
+  scanner.SetHarmonyBlockScoping(harmony_block_scoping);
   scanner.Initialize(source);
   intptr_t stack_limit = isolate->stack_guard()->real_climit();
-  preparser::PreParser::PreParseResult result =
-      preparser::PreParser::PreParseProgram(&scanner,
-                                            recorder,
-                                            flags,
-                                            stack_limit);
-  if (result == preparser::PreParser::kPreParseStackOverflow) {
+  if (!preparser::PreParser::PreParseProgram(&scanner,
+                                             recorder,
+                                             allow_lazy,
+                                             stack_limit)) {
     isolate->StackOverflow();
     return NULL;
   }
@@ -5918,38 +5147,27 @@
 
 // Preparse, but only collect data that is immediately useful,
 // even if the preparser data is only used once.
-ScriptDataImpl* ParserApi::PartialPreParse(Handle<String> source,
+ScriptDataImpl* ParserApi::PartialPreParse(UC16CharacterStream* source,
                                            v8::Extension* extension,
-                                           int flags) {
+                                           bool harmony_block_scoping) {
   bool allow_lazy = FLAG_lazy && (extension == NULL);
   if (!allow_lazy) {
     // Partial preparsing is only about lazily compiled functions.
     // If we don't allow lazy compilation, the log data will be empty.
     return NULL;
   }
-  flags |= kAllowLazy;
   PartialParserRecorder recorder;
-  int source_length = source->length();
-  if (source->IsExternalTwoByteString()) {
-    ExternalTwoByteStringUtf16CharacterStream stream(
-        Handle<ExternalTwoByteString>::cast(source), 0, source_length);
-    return DoPreParse(&stream, flags, &recorder);
-  } else {
-    GenericStringUtf16CharacterStream stream(source, 0, source_length);
-    return DoPreParse(&stream, flags, &recorder);
-  }
+  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
 }
 
 
-ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
+ScriptDataImpl* ParserApi::PreParse(UC16CharacterStream* source,
                                     v8::Extension* extension,
-                                    int flags) {
+                                    bool harmony_block_scoping) {
   Handle<Script> no_script;
-  if (FLAG_lazy && (extension == NULL)) {
-    flags |= kAllowLazy;
-  }
+  bool allow_lazy = FLAG_lazy && (extension == NULL);
   CompleteParserRecorder recorder;
-  return DoPreParse(source, flags, &recorder);
+  return DoPreParse(source, allow_lazy, &recorder, harmony_block_scoping);
 }
 
 
@@ -5975,33 +5193,29 @@
 }
 
 
-bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
+bool ParserApi::Parse(CompilationInfo* info) {
   ASSERT(info->function() == NULL);
   FunctionLiteral* result = NULL;
   Handle<Script> script = info->script();
-  ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
-  if (!info->is_native() && FLAG_harmony_scoping) {
-    // Harmony scoping is requested.
-    parsing_flags |= EXTENDED_MODE;
-  }
-  if (!info->is_native() && FLAG_harmony_modules) {
-    parsing_flags |= kAllowModules;
-  }
-  if (FLAG_allow_natives_syntax || info->is_native()) {
-    // We require %identifier(..) syntax.
-    parsing_flags |= kAllowNativesSyntax;
-  }
+  bool harmony_block_scoping = !info->is_native() &&
+                               FLAG_harmony_block_scoping;
   if (info->is_lazy()) {
-    ASSERT(!info->is_eval());
-    Parser parser(script, parsing_flags, NULL, NULL);
-    if (info->shared_info()->is_function()) {
-      result = parser.ParseLazy(info);
-    } else {
-      result = parser.ParseProgram(info);
-    }
+    bool allow_natives_syntax =
+        FLAG_allow_natives_syntax ||
+        info->is_native();
+    Parser parser(script, allow_natives_syntax, NULL, NULL);
+    parser.SetHarmonyBlockScoping(harmony_block_scoping);
+    result = parser.ParseLazy(info);
   } else {
+    // Whether we allow %identifier(..) syntax.
+    bool allow_natives_syntax =
+        info->is_native() || FLAG_allow_natives_syntax;
     ScriptDataImpl* pre_data = info->pre_parse_data();
-    Parser parser(script, parsing_flags, info->extension(), pre_data);
+    Parser parser(script,
+                  allow_natives_syntax,
+                  info->extension(),
+                  pre_data);
+    parser.SetHarmonyBlockScoping(harmony_block_scoping);
     if (pre_data != NULL && pre_data->has_error()) {
       Scanner::Location loc = pre_data->MessageLocation();
       const char* message = pre_data->BuildMessage();
@@ -6014,7 +5228,10 @@
       DeleteArray(args.start());
       ASSERT(info->isolate()->has_pending_exception());
     } else {
-      result = parser.ParseProgram(info);
+      Handle<String> source = Handle<String>(String::cast(script->source()));
+      result = parser.ParseProgram(source,
+                                   info->is_global(),
+                                   info->StrictMode());
     }
   }
   info->SetFunction(result);
diff --git a/src/parser.h b/src/parser.h
index b4d8825..3312f2f 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,7 +33,6 @@
 #include "preparse-data-format.h"
 #include "preparse-data.h"
 #include "scopes.h"
-#include "preparser.h"
 
 namespace v8 {
 namespace internal {
@@ -43,6 +42,7 @@
 class ParserLog;
 class PositionStack;
 class Target;
+class LexicalScope;
 
 template <typename T> class ZoneListWrapper;
 
@@ -67,36 +67,26 @@
 
 class FunctionEntry BASE_EMBEDDED {
  public:
-  enum {
-    kStartPositionIndex,
-    kEndPositionIndex,
-    kLiteralCountIndex,
-    kPropertyCountIndex,
-    kLanguageModeIndex,
-    kSize
-  };
+  explicit FunctionEntry(Vector<unsigned> backing) : backing_(backing) { }
+  FunctionEntry() : backing_(Vector<unsigned>::empty()) { }
 
-  explicit FunctionEntry(Vector<unsigned> backing)
-    : backing_(backing) { }
+  int start_pos() { return backing_[kStartPosOffset]; }
+  int end_pos() { return backing_[kEndPosOffset]; }
+  int literal_count() { return backing_[kLiteralCountOffset]; }
+  int property_count() { return backing_[kPropertyCountOffset]; }
+  bool strict_mode() { return backing_[kStrictModeOffset] != 0; }
 
-  FunctionEntry() : backing_() { }
+  bool is_valid() { return backing_.length() > 0; }
 
-  int start_pos() { return backing_[kStartPositionIndex]; }
-  int end_pos() { return backing_[kEndPositionIndex]; }
-  int literal_count() { return backing_[kLiteralCountIndex]; }
-  int property_count() { return backing_[kPropertyCountIndex]; }
-  LanguageMode language_mode() {
-    ASSERT(backing_[kLanguageModeIndex] == CLASSIC_MODE ||
-           backing_[kLanguageModeIndex] == STRICT_MODE ||
-           backing_[kLanguageModeIndex] == EXTENDED_MODE);
-    return static_cast<LanguageMode>(backing_[kLanguageModeIndex]);
-  }
-
-  bool is_valid() { return !backing_.is_empty(); }
+  static const int kSize = 5;
 
  private:
   Vector<unsigned> backing_;
-  bool owns_data_;
+  static const int kStartPosOffset = 0;
+  static const int kEndPosOffset = 1;
+  static const int kLiteralCountOffset = 2;
+  static const int kPropertyCountOffset = 3;
+  static const int kStrictModeOffset = 4;
 };
 
 
@@ -108,7 +98,7 @@
 
   // Create an empty ScriptDataImpl that is guaranteed to not satisfy
   // a SanityCheck.
-  ScriptDataImpl() : owns_store_(false) { }
+  ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
 
   virtual ~ScriptDataImpl();
   virtual int Length();
@@ -169,24 +159,24 @@
   // Parses the source code represented by the compilation info and sets its
   // function literal.  Returns false (and deallocates any allocated AST
   // nodes) if parsing failed.
-  static bool Parse(CompilationInfo* info, int flags);
+  static bool Parse(CompilationInfo* info);
 
   // Generic preparser generating full preparse data.
-  static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
+  static ScriptDataImpl* PreParse(UC16CharacterStream* source,
                                   v8::Extension* extension,
-                                  int flags);
+                                  bool harmony_block_scoping);
 
   // Preparser that only does preprocessing that makes sense if only used
   // immediately after.
-  static ScriptDataImpl* PartialPreParse(Handle<String> source,
+  static ScriptDataImpl* PartialPreParse(UC16CharacterStream* source,
                                          v8::Extension* extension,
-                                         int flags);
+                                         bool harmony_block_scoping);
 };
 
 // ----------------------------------------------------------------------------
 // REGEXP PARSING
 
-// A BufferedZoneList is an automatically growing list, just like (and backed
+// A BuffferedZoneList is an automatically growing list, just like (and backed
 // by) a ZoneList, that is optimized for the case of adding and removing
 // a single element. The last element added is stored outside the backing list,
 // and if no more than one element is ever added, the ZoneList isn't even
@@ -425,22 +415,19 @@
 // ----------------------------------------------------------------------------
 // JAVASCRIPT PARSING
 
-// Forward declaration.
-class SingletonLogger;
-
 class Parser {
  public:
   Parser(Handle<Script> script,
-         int parsing_flags,  // Combination of ParsingFlags
+         bool allow_natives_syntax,
          v8::Extension* extension,
          ScriptDataImpl* pre_data);
-  virtual ~Parser() {
-    delete reusable_preparser_;
-    reusable_preparser_ = NULL;
-  }
+  virtual ~Parser() { }
 
   // Returns NULL if parsing failed.
-  FunctionLiteral* ParseProgram(CompilationInfo* info);
+  FunctionLiteral* ParseProgram(Handle<String> source,
+                                bool in_global_context,
+                                StrictModeFlag strict_mode);
+
   FunctionLiteral* ParseLazy(CompilationInfo* info);
 
   void ReportMessageAt(Scanner::Location loc,
@@ -449,6 +436,7 @@
   void ReportMessageAt(Scanner::Location loc,
                        const char* message,
                        Vector<Handle<String> > args);
+  void SetHarmonyBlockScoping(bool block_scoping);
 
  private:
   // Limit on number of function parameters is chosen arbitrarily.
@@ -457,120 +445,38 @@
   // should be checked.
   static const int kMaxNumFunctionParameters = 32766;
   static const int kMaxNumFunctionLocals = 32767;
-
+  FunctionLiteral* ParseLazy(CompilationInfo* info,
+                             UC16CharacterStream* source,
+                             ZoneScope* zone_scope);
   enum Mode {
     PARSE_LAZILY,
     PARSE_EAGERLY
   };
 
   enum VariableDeclarationContext {
-    kModuleElement,
-    kBlockElement,
+    kSourceElement,
     kStatement,
     kForStatement
   };
 
-  // If a list of variable declarations includes any initializers.
-  enum VariableDeclarationProperties {
-    kHasInitializers,
-    kHasNoInitializers
-  };
-
-  class BlockState;
-
-  class FunctionState BASE_EMBEDDED {
-   public:
-    FunctionState(Parser* parser,
-                  Scope* scope,
-                  Isolate* isolate);
-    ~FunctionState();
-
-    int NextMaterializedLiteralIndex() {
-      return next_materialized_literal_index_++;
-    }
-    int materialized_literal_count() {
-      return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
-    }
-
-    int NextHandlerIndex() { return next_handler_index_++; }
-    int handler_count() { return next_handler_index_; }
-
-    void SetThisPropertyAssignmentInfo(
-        bool only_simple_this_property_assignments,
-        Handle<FixedArray> this_property_assignments) {
-      only_simple_this_property_assignments_ =
-          only_simple_this_property_assignments;
-      this_property_assignments_ = this_property_assignments;
-    }
-    bool only_simple_this_property_assignments() {
-      return only_simple_this_property_assignments_;
-    }
-    Handle<FixedArray> this_property_assignments() {
-      return this_property_assignments_;
-    }
-
-    void AddProperty() { expected_property_count_++; }
-    int expected_property_count() { return expected_property_count_; }
-
-    AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
-
-   private:
-    // Used to assign an index to each literal that needs materialization in
-    // the function.  Includes regexp literals, and boilerplate for object and
-    // array literals.
-    int next_materialized_literal_index_;
-
-    // Used to assign a per-function index to try and catch handlers.
-    int next_handler_index_;
-
-    // Properties count estimation.
-    int expected_property_count_;
-
-    // Keeps track of assignments to properties of this. Used for
-    // optimizing constructors.
-    bool only_simple_this_property_assignments_;
-    Handle<FixedArray> this_property_assignments_;
-
-    Parser* parser_;
-    FunctionState* outer_function_state_;
-    Scope* outer_scope_;
-    int saved_ast_node_id_;
-    AstNodeFactory<AstConstructionVisitor> factory_;
-  };
-
-
-
-
-  FunctionLiteral* ParseLazy(CompilationInfo* info,
-                             Utf16CharacterStream* source,
-                             ZoneScope* zone_scope);
-
   Isolate* isolate() { return isolate_; }
   Zone* zone() { return isolate_->zone(); }
 
   // Called by ParseProgram after setting up the scanner.
-  FunctionLiteral* DoParseProgram(CompilationInfo* info,
-                                  Handle<String> source,
+  FunctionLiteral* DoParseProgram(Handle<String> source,
+                                  bool in_global_context,
+                                  StrictModeFlag strict_mode,
                                   ZoneScope* zone_scope);
 
   // Report syntax error
   void ReportUnexpectedToken(Token::Value token);
   void ReportInvalidPreparseData(Handle<String> name, bool* ok);
   void ReportMessage(const char* message, Vector<const char*> args);
-  void ReportMessage(const char* message, Vector<Handle<String> > args);
 
-  bool inside_with() const { return top_scope_->inside_with(); }
-  Scanner& scanner()  { return scanner_; }
+  bool inside_with() const { return with_nesting_level_ > 0; }
+  JavaScriptScanner& scanner()  { return scanner_; }
   Mode mode() const { return mode_; }
   ScriptDataImpl* pre_data() const { return pre_data_; }
-  bool is_extended_mode() {
-    ASSERT(top_scope_ != NULL);
-    return top_scope_->is_extended_mode();
-  }
-  Scope* DeclarationScope(VariableMode mode) {
-    return (mode == LET || mode == CONST_HARMONY)
-        ? top_scope_ : top_scope_->DeclarationScope();
-  }
 
   // Check if the given string is 'eval' or 'arguments'.
   bool IsEvalOrArguments(Handle<String> string);
@@ -580,28 +486,16 @@
   // By making the 'exception handling' explicit, we are forced to check
   // for failure at the call sites.
   void* ParseSourceElements(ZoneList<Statement*>* processor,
-                            int end_token, bool is_eval, bool* ok);
-  Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
-  Block* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
-  Module* ParseModule(bool* ok);
-  Module* ParseModuleLiteral(bool* ok);
-  Module* ParseModulePath(bool* ok);
-  Module* ParseModuleVariable(bool* ok);
-  Module* ParseModuleUrl(bool* ok);
-  Module* ParseModuleSpecifier(bool* ok);
-  Block* ParseImportDeclaration(bool* ok);
-  Statement* ParseExportDeclaration(bool* ok);
-  Statement* ParseBlockElement(ZoneStringList* labels, bool* ok);
+                            int end_token, bool* ok);
+  Statement* ParseSourceElement(ZoneStringList* labels, bool* ok);
   Statement* ParseStatement(ZoneStringList* labels, bool* ok);
-  Statement* ParseFunctionDeclaration(ZoneStringList* names, bool* ok);
+  Statement* ParseFunctionDeclaration(bool* ok);
   Statement* ParseNativeDeclaration(bool* ok);
   Block* ParseBlock(ZoneStringList* labels, bool* ok);
+  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
   Block* ParseVariableStatement(VariableDeclarationContext var_context,
-                                ZoneStringList* names,
                                 bool* ok);
   Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                   VariableDeclarationProperties* decl_props,
-                                   ZoneStringList* names,
                                    Handle<String>* out,
                                    bool* ok);
   Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
@@ -621,9 +515,6 @@
   TryStatement* ParseTryStatement(bool* ok);
   DebuggerStatement* ParseDebuggerStatement(bool* ok);
 
-  // Support for hamony block scoped bindings.
-  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
-
   Expression* ParseExpression(bool accept_IN, bool* ok);
   Expression* ParseAssignmentExpression(bool accept_IN, bool* ok);
   Expression* ParseConditionalExpression(bool accept_IN, bool* ok);
@@ -642,6 +533,11 @@
   ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
   Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
 
+  Expression* NewCompareNode(Token::Value op,
+                             Expression* x,
+                             Expression* y,
+                             int position);
+
   // Populate the constant properties fixed array for a materialized object
   // literal.
   void BuildObjectLiteralConstantProperties(
@@ -704,7 +600,6 @@
   void Expect(Token::Value token, bool* ok);
   bool Check(Token::Value token);
   void ExpectSemicolon(bool* ok);
-  void ExpectContextualKeyword(const char* keyword, bool* ok);
 
   Handle<String> LiteralString(PretenureFlag tenured) {
     if (scanner().is_literal_ascii()) {
@@ -712,7 +607,7 @@
           scanner().literal_ascii_string(), tenured);
     } else {
       return isolate_->factory()->NewStringFromTwoByte(
-            scanner().literal_utf16_string(), tenured);
+            scanner().literal_uc16_string(), tenured);
     }
   }
 
@@ -722,7 +617,7 @@
           scanner().next_literal_ascii_string(), tenured);
     } else {
       return isolate_->factory()->NewStringFromTwoByte(
-          scanner().next_literal_utf16_string(), tenured);
+          scanner().next_literal_uc16_string(), tenured);
     }
   }
 
@@ -731,6 +626,7 @@
   // Get odd-ball literals.
   Literal* GetLiteralUndefined();
   Literal* GetLiteralTheHole();
+  Literal* GetLiteralNumber(double value);
 
   Handle<String> ParseIdentifier(bool* ok);
   Handle<String> ParseIdentifierOrStrictReservedWord(
@@ -740,11 +636,6 @@
                                                bool* is_set,
                                                bool* ok);
 
-  // Determine if the expression is a variable proxy and mark it as being used
-  // in an assignment or with a increment/decrement operator. This is currently
-  // used on for the statically checking assignments to harmony const bindings.
-  void MarkAsLValue(Expression* expression);
-
   // Strict mode validation of LValue expressions
   void CheckStrictModeLValue(Expression* expression,
                              const char* error,
@@ -765,10 +656,10 @@
   void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
 
   // Parser support
-  VariableProxy* NewUnresolved(Handle<String> name,
-                               VariableMode mode,
-                               Interface* interface = Interface::NewValue());
-  void Declare(Declaration* declaration, bool resolve, bool* ok);
+  VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
+                         FunctionLiteral* fun,
+                         bool resolve,
+                         bool* ok);
 
   bool TargetStackContainsLabel(Handle<String> label);
   BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
@@ -778,12 +669,30 @@
 
   // Factory methods.
 
-  Scope* NewScope(Scope* parent, ScopeType type);
+  Statement* EmptyStatement() {
+    static v8::internal::EmptyStatement empty;
+    return &empty;
+  }
+
+  Scope* NewScope(Scope* parent, Scope::Type type, bool inside_with);
 
   Handle<String> LookupSymbol(int symbol_id);
 
   Handle<String> LookupCachedSymbol(int symbol_id);
 
+  Expression* NewCall(Expression* expression,
+                      ZoneList<Expression*>* arguments,
+                      int pos) {
+    return new(zone()) Call(isolate(), expression, arguments, pos);
+  }
+
+  inline Literal* NewLiteral(Handle<Object> handle) {
+    return new(zone()) Literal(isolate(), handle);
+  }
+
+  // Create a number literal.
+  Literal* NewNumberLiteral(double value);
+
   // Generate AST node that throw a ReferenceError with the given type.
   Expression* NewThrowReferenceError(Handle<String> type);
 
@@ -803,39 +712,33 @@
                             Handle<String> type,
                             Vector< Handle<Object> > arguments);
 
-  preparser::PreParser::PreParseResult LazyParseFunctionLiteral(
-       SingletonLogger* logger);
-
-  AstNodeFactory<AstConstructionVisitor>* factory() {
-    return current_function_state_->factory();
-  }
-
   Isolate* isolate_;
   ZoneList<Handle<String> > symbol_cache_;
 
   Handle<Script> script_;
-  Scanner scanner_;
-  preparser::PreParser* reusable_preparser_;
+  JavaScriptScanner scanner_;
+
   Scope* top_scope_;
-  FunctionState* current_function_state_;
+  int with_nesting_level_;
+
+  LexicalScope* lexical_scope_;
+  Mode mode_;
+
   Target* target_stack_;  // for break, continue statements
+  bool allow_natives_syntax_;
   v8::Extension* extension_;
+  bool is_pre_parsing_;
   ScriptDataImpl* pre_data_;
   FuncNameInferrer* fni_;
-
-  Mode mode_;
-  bool allow_natives_syntax_;
-  bool allow_lazy_;
-  bool allow_modules_;
   bool stack_overflow_;
   // If true, the next (and immediately following) function literal is
   // preceded by a parenthesis.
   // Heuristically that means that the function will be called immediately,
   // so never lazily compile it.
   bool parenthesized_function_;
+  bool harmony_block_scoping_;
 
-  friend class BlockState;
-  friend class FunctionState;
+  friend class LexicalScope;
 };
 
 
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index fa6fce0..a72f5da 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -61,7 +61,7 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::SetUp() {
+void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly can cause an overflow and the seed to be
@@ -114,7 +114,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
+// and verification).  The estimate is conservative, ie, not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -290,7 +290,7 @@
       }
       LOG(isolate, SharedLibraryEvent(lib_name, start, end));
     } else {
-      // Entry not describing executable data. Skip to end of line to set up
+      // Entry not describing executable data. Skip to end of line to setup
       // reading the next entry.
       do {
         c = getc(fp);
@@ -355,17 +355,6 @@
 }
 
 
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_READONLY | PAGE_GUARD)) {
-    return false;
-  }
-  return true;
-}
-
-
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() : thread_(kNoThread) {}
@@ -376,9 +365,16 @@
 
 
 Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size()) {
-  set_name(options.name());
+    : data_(new PlatformData),
+      stack_size_(options.stack_size) {
+  set_name(options.name);
+}
+
+
+Thread::Thread(const char* name)
+    : data_(new PlatformData),
+      stack_size_(0) {
+  set_name(name);
 }
 
 
@@ -621,14 +617,12 @@
 
 class SamplerThread : public Thread {
  public:
-  static const int kSamplerThreadStackSize = 64 * KB;
-
   explicit SamplerThread(int interval)
-      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
+      : Thread("SamplerThread"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -639,7 +633,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -725,15 +719,14 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static LazyMutex mutex_;
+  static Mutex* mutex_;
   static SamplerThread* instance_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
 };
 
 
-LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 2a9e174..685ec3c 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -79,7 +79,7 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::SetUp() {
+void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly can cause an overflow and the seed to be
@@ -128,7 +128,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
+// and verification).  The estimate is conservative, ie, not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -333,132 +333,44 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = ReserveRegion(size);
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
   size_ = size;
 }
 
 
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
+  return address_ != MAP_FAILED;
 }
 
 
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
 
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(base, size);
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
+  return mmap(address, size, PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
 }
 
 
@@ -470,8 +382,15 @@
 
 Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      stack_size_(options.stack_size()) {
-  set_name(options.name());
+      stack_size_(options.stack_size) {
+  set_name(options.name);
+}
+
+
+Thread::Thread(const char* name)
+    : data_(new PlatformData),
+      stack_size_(0) {
+  set_name(name);
 }
 
 
@@ -716,14 +635,12 @@
     FULL_INTERVAL
   };
 
-  static const int kSignalSenderStackSize = 64 * KB;
-
   explicit SignalSender(int interval)
-      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
+      : Thread("SignalSender"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Install a signal handler.
@@ -743,7 +660,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -836,16 +753,15 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static LazyMutex mutex_;
+  static Mutex* mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 08f4495..032cdaa 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -78,7 +78,31 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::SetUp() {
+static void* GetRandomMmapAddr() {
+  Isolate* isolate = Isolate::UncheckedCurrent();
+  // Note that the current isolate isn't set up in a call path via
+  // CpuFeatures::Probe. We don't care about randomization in this case because
+  // the code page is immediately freed.
+  if (isolate != NULL) {
+#ifdef V8_TARGET_ARCH_X64
+    uint64_t rnd1 = V8::RandomPrivate(isolate);
+    uint64_t rnd2 = V8::RandomPrivate(isolate);
+    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+    raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+    uint32_t raw_addr = V8::RandomPrivate(isolate);
+    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
+    raw_addr &= 0x3ffff000;
+    raw_addr += 0x20000000;
+#endif
+    return reinterpret_cast<void*>(raw_addr);
+  }
+  return NULL;
+}
+
+
+void OS::Setup() {
   // Seed the random number generator. We preserve microsecond resolution.
   uint64_t seed = Ticks() ^ (getpid() << 16);
   srandom(static_cast<unsigned int>(seed));
@@ -187,15 +211,15 @@
 // pair r0, r1 is loaded with 0.0. If -mfloat-abi=hard is pased to GCC then
 // calling this will return 1.0 and otherwise 0.0.
 static void ArmUsingHardFloatHelper() {
-  asm("mov r0, #0":::"r0");
+  asm("mov r0, #0");
 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
   // Load 0x3ff00000 into r1 using instructions available in both ARM
   // and Thumb mode.
-  asm("mov r1, #3":::"r1");
-  asm("mov r2, #255":::"r2");
-  asm("lsl r1, r1, #8":::"r1");
-  asm("orr r1, r1, r2":::"r1");
-  asm("lsl r1, r1, #20":::"r1");
+  asm("mov r1, #3");
+  asm("mov r2, #255");
+  asm("lsl r1, r1, #8");
+  asm("orr r1, r1, r2");
+  asm("lsl r1, r1, #20");
   // For vmov d0, r0, r1 use ARM mode.
 #ifdef __thumb__
   asm volatile(
@@ -209,12 +233,12 @@
     "    adr r3, 2f+1    \n\t"
     "    bx  r3          \n\t"
     "    .THUMB          \n"
-    "2:                  \n\t":::"r3");
+    "2:                  \n\t");
 #else
   asm("vmov d0, r0, r1");
 #endif  // __thumb__
 #endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
-  asm("mov r1, #0":::"r1");
+  asm("mov r1, #0");
 }
 
 
@@ -326,7 +350,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
+// and verification).  The estimate is conservative, ie, not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -357,9 +381,9 @@
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
-  const size_t msize = RoundUp(requested, AllocateAlignment());
+  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = OS::GetRandomMmapAddr();
+  void* addr = GetRandomMmapAddr();
   void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
   if (mbase == MAP_FAILED) {
     LOG(i::Isolate::Current(),
@@ -388,9 +412,6 @@
 
 void OS::Abort() {
   // Redirect to std abort to signal abnormal program termination.
-  if (FLAG_break_on_abort) {
-    DebugBreak();
-  }
   abort();
 }
 
@@ -432,12 +453,7 @@
   int size = ftell(file);
 
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
@@ -452,18 +468,13 @@
     return NULL;
   }
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
+  if (memory_) munmap(memory_, size_);
   fclose(file_);
 }
 
@@ -515,7 +526,7 @@
       }
       LOG(isolate, SharedLibraryEvent(lib_name, start, end));
     } else {
-      // Entry not describing executable data. Skip to end of line to set up
+      // Entry not describing executable data. Skip to end of line to setup
       // reading the next entry.
       do {
         c = getc(fp);
@@ -542,14 +553,10 @@
   // kernel log.
   int size = sysconf(_SC_PAGESIZE);
   FILE* f = fopen(kGCFakeMmap, "w+");
-  void* addr = mmap(OS::GetRandomMmapAddr(),
-                    size,
-                    PROT_READ | PROT_EXEC,
-                    MAP_PRIVATE,
-                    fileno(f),
-                    0);
+  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+                    fileno(f), 0);
   ASSERT(addr != MAP_FAILED);
-  OS::Free(addr, size);
+  munmap(addr, size);
   fclose(f);
 }
 
@@ -591,132 +598,44 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = ReserveRegion(size);
+  address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
   size_ = size;
 }
 
 
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
+  return address_ != MAP_FAILED;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(base, size);
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
+  return mmap(address, size, PROT_NONE,
               MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
 }
 
 
@@ -729,8 +648,15 @@
 
 Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      stack_size_(options.stack_size()) {
-  set_name(options.name());
+      stack_size_(options.stack_size) {
+  set_name(options.name);
+}
+
+
+Thread::Thread(const char* name)
+    : data_(new PlatformData()),
+      stack_size_(0) {
+  set_name(name);
 }
 
 
@@ -770,8 +696,7 @@
     pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
     attr_ptr = &attr;
   }
-  int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
-  CHECK_EQ(0, result);
+  pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
   ASSERT(data_->thread_ != kNoThread);
 }
 
@@ -930,7 +855,7 @@
 }
 
 
-#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__))
+#if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__) || defined(__i386__))
 // Android runs a fairly new Linux kernel, so signal info is there,
 // but the C library doesn't have the structs defined.
 
@@ -952,38 +877,7 @@
   __sigset_t uc_sigmask;
 } ucontext_t;
 enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
-
-#elif !defined(__GLIBC__) && defined(__mips__)
-// MIPS version of sigcontext, for Android bionic.
-struct sigcontext {
-  uint32_t regmask;
-  uint32_t status;
-  uint64_t pc;
-  uint64_t gregs[32];
-  uint64_t fpregs[32];
-  uint32_t acx;
-  uint32_t fpc_csr;
-  uint32_t fpc_eir;
-  uint32_t used_math;
-  uint32_t dsp;
-  uint64_t mdhi;
-  uint64_t mdlo;
-  uint32_t hi1;
-  uint32_t lo1;
-  uint32_t hi2;
-  uint32_t lo2;
-  uint32_t hi3;
-  uint32_t lo3;
-};
-typedef uint32_t __sigset_t;
-typedef struct sigcontext mcontext_t;
-typedef struct ucontext {
-  uint32_t uc_flags;
-  struct ucontext* uc_link;
-  stack_t uc_stack;
-  mcontext_t uc_mcontext;
-  __sigset_t uc_sigmask;
-} ucontext_t;
+enum X86Registers {REG_EIP = 14, REG_ESP = 7, REG_EBP = 6};
 
 #endif
 
@@ -999,6 +893,7 @@
 
 
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+#ifndef V8_HOST_ARCH_MIPS
   USE(info);
   if (signal != SIGPROF) return;
   Isolate* isolate = Isolate::UncheckedCurrent();
@@ -1040,14 +935,15 @@
   sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
   sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
   sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif  // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#endif
 #elif V8_HOST_ARCH_MIPS
-  sample->pc = reinterpret_cast<Address>(mcontext.pc);
-  sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
-  sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif  // V8_HOST_ARCH_*
+  sample.pc = reinterpret_cast<Address>(mcontext.pc);
+  sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+  sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#endif
   sampler->SampleStack(sample);
   sampler->Tick(sample);
+#endif
 }
 
 
@@ -1069,10 +965,8 @@
     FULL_INTERVAL
   };
 
-  static const int kSignalSenderStackSize = 64 * KB;
-
   explicit SignalSender(int interval)
-      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
+      : Thread("SignalSender"),
         vm_tgid_(getpid()),
         interval_(interval) {}
 
@@ -1093,7 +987,7 @@
   }
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
@@ -1106,7 +1000,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -1187,9 +1081,6 @@
     // occuring during signal delivery.
     useconds_t interval = interval_ * 1000 - 100;
     if (full_or_half == HALF_INTERVAL) interval /= 2;
-#if defined(ANDROID)
-    usleep(interval);
-#else
     int result = usleep(interval);
 #ifdef DEBUG
     if (result != 0 && errno != EINTR) {
@@ -1199,9 +1090,8 @@
               errno);
       ASSERT(result == 0 || errno == EINTR);
     }
-#endif  // DEBUG
+#endif
     USE(result);
-#endif  // ANDROID
   }
 
   const int vm_tgid_;
@@ -1209,17 +1099,16 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static LazyMutex mutex_;
+  static Mutex* mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
 
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index bfcaab0..6be941a 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -75,7 +75,7 @@
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on MacOSX since a pthread_t is
+// 0 is never a valid thread id on MacOSX since a ptread_t is
 // a pointer.
 static const pthread_t kNoThread = (pthread_t) 0;
 
@@ -93,9 +93,13 @@
 static Mutex* limit_mutex = NULL;
 
 
-void OS::SetUp() {
-  // Seed the random number generator. We preserve microsecond resolution.
-  uint64_t seed = Ticks() ^ (getpid() << 16);
+void OS::Setup() {
+  // Seed the random number generator.
+  // Convert the current time to a 64-bit integer first, before converting it
+  // to an unsigned. Going directly will cause an overflow and the seed to be
+  // set to all ones. The seed will be identical for different instances that
+  // call this setup code within the same millisecond.
+  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
 }
@@ -103,7 +107,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
+// and verification).  The estimate is conservative, ie, not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -144,12 +148,9 @@
                    bool is_executable) {
   const size_t msize = RoundUp(requested, getpagesize());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(OS::GetRandomMmapAddr(),
-                     msize,
-                     prot,
+  void* mbase = mmap(NULL, msize, prot,
                      MAP_PRIVATE | MAP_ANON,
-                     kMmapFd,
-                     kMmapFdOffset);
+                     kMmapFd, kMmapFdOffset);
   if (mbase == MAP_FAILED) {
     LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
     return NULL;
@@ -206,12 +207,7 @@
   int size = ftell(file);
 
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
@@ -226,18 +222,13 @@
     return NULL;
   }
   void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-          size,
-          PROT_READ | PROT_WRITE,
-          MAP_SHARED,
-          fileno(file),
-          0);
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
   return new PosixMemoryMappedFile(file, memory, size);
 }
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
+  if (memory_) munmap(memory_, size_);
   fclose(file_);
 }
 
@@ -343,108 +334,33 @@
 }
 
 
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
+VirtualMemory::VirtualMemory(size_t size) {
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
+  size_ = size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
   }
 }
 
 
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
 bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
+  return address_ != MAP_FAILED;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
-                                 size_t size,
-                                 bool is_executable) {
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address,
-                         size,
-                         prot,
+  if (MAP_FAILED == mmap(address, size, prot,
                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
+                         kMmapFd, kMmapFdOffset)) {
     return false;
   }
 
@@ -454,22 +370,9 @@
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
-  return mmap(address,
-              size,
-              PROT_NONE,
+  return mmap(address, size, PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
-  return munmap(address, size) == 0;
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
 }
 
 
@@ -479,11 +382,17 @@
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-
 Thread::Thread(const Options& options)
     : data_(new PlatformData),
-      stack_size_(options.stack_size()) {
-  set_name(options.name());
+      stack_size_(options.stack_size) {
+  set_name(options.name);
+}
+
+
+Thread::Thread(const char* name)
+    : data_(new PlatformData),
+      stack_size_(0) {
+  set_name(name);
 }
 
 
@@ -736,17 +645,14 @@
   thread_act_t profiled_thread_;
 };
 
-
 class SamplerThread : public Thread {
  public:
-  static const int kSamplerThreadStackSize = 64 * KB;
-
   explicit SamplerThread(int interval)
-      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
+      : Thread("SamplerThread"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -757,7 +663,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -854,17 +760,16 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static LazyMutex mutex_;
+  static Mutex* mutex_;
   static SamplerThread* instance_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
 };
 
 #undef REGISTER_FIELD
 
 
-LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index e05345c..8c2a863 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -55,32 +55,8 @@
 }
 
 
-double fast_sin(double x) {
-  UNIMPLEMENTED();
-  return 0;
-}
-
-
-double fast_cos(double x) {
-  UNIMPLEMENTED();
-  return 0;
-}
-
-
-double fast_tan(double x) {
-  UNIMPLEMENTED();
-  return 0;
-}
-
-
-double fast_log(double x) {
-  UNIMPLEMENTED();
-  return 0;
-}
-
-
 // Initialize OS class early in the V8 startup.
-void OS::SetUp() {
+void OS::Setup() {
   // Seed the random number generator.
   UNIMPLEMENTED();
 }
@@ -319,12 +295,6 @@
 }
 
 
-bool VirtualMemory::Guard(void* address) {
-  UNIMPLEMENTED();
-  return false;
-}
-
-
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() {
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index b79cb71..973329b 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,107 +25,87 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Platform specific code for OpenBSD and NetBSD goes here. For the POSIX
-// comaptible parts the implementation is in platform-posix.cc.
+// Platform specific code for OpenBSD goes here. For the POSIX comaptible parts
+// the implementation is in platform-posix.cc.
 
 #include <pthread.h>
 #include <semaphore.h>
 #include <signal.h>
 #include <sys/time.h>
 #include <sys/resource.h>
-#include <sys/syscall.h>
 #include <sys/types.h>
 #include <stdlib.h>
 
 #include <sys/types.h>  // mmap & munmap
 #include <sys/mman.h>   // mmap & munmap
 #include <sys/stat.h>   // open
-#include <fcntl.h>      // open
-#include <unistd.h>     // sysconf
+#include <sys/fcntl.h>  // open
+#include <unistd.h>     // getpagesize
 #include <execinfo.h>   // backtrace, backtrace_symbols
 #include <strings.h>    // index
 #include <errno.h>
 #include <stdarg.h>
+#include <limits.h>
 
 #undef MAP_TYPE
 
 #include "v8.h"
+#include "v8threads.h"
 
 #include "platform.h"
-#include "v8threads.h"
 #include "vm-state-inl.h"
 
 
 namespace v8 {
 namespace internal {
 
-// 0 is never a valid thread id on Linux and OpenBSD since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
+// 0 is never a valid thread id on OpenBSD since tids and pids share a
+// name space and pid 0 is used to kill the group (see man 2 kill).
 static const pthread_t kNoThread = (pthread_t) 0;
 
 
 double ceiling(double x) {
-  return ceil(x);
+    // Correct as on OS X
+    if (-1.0 < x && x < 0.0) {
+        return -0.0;
+    } else {
+        return ceil(x);
+    }
 }
 
 
 static Mutex* limit_mutex = NULL;
 
 
-static void* GetRandomMmapAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
-    uint64_t rnd1 = V8::RandomPrivate(isolate);
-    uint64_t rnd2 = V8::RandomPrivate(isolate);
-    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
-    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
-    // the hint address to 46 bits to give the kernel a fighting chance of
-    // fulfilling our placement request.
-    raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
-    uint32_t raw_addr = V8::RandomPrivate(isolate);
-    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-    // variety of ASLR modes (PAE kernel, NX compat mode, etc).
-    raw_addr &= 0x3ffff000;
-    raw_addr += 0x20000000;
-#endif
-    return reinterpret_cast<void*>(raw_addr);
-  }
-  return NULL;
-}
-
-
-void OS::SetUp() {
-  // Seed the random number generator. We preserve microsecond resolution.
-  uint64_t seed = Ticks() ^ (getpid() << 16);
+void OS::Setup() {
+  // Seed the random number generator.
+  // Convert the current time to a 64-bit integer first, before converting it
+  // to an unsigned. Going directly can cause an overflow and the seed to be
+  // set to all ones. The seed will be identical for different instances that
+  // call this setup code within the same millisecond.
+  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
   srandom(static_cast<unsigned int>(seed));
   limit_mutex = CreateMutex();
 }
 
 
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+  __asm__ __volatile__("" : : : "memory");
+  *ptr = value;
+}
+
+
 uint64_t OS::CpuFeaturesImpliedByPlatform() {
-  return 0;
+  return 0;  // OpenBSD runs on anything.
 }
 
 
 int OS::ActivationFrameAlignment() {
-  // With gcc 4.4 the tree vectorization optimizer can generate code
-  // that requires 16 byte alignment such as movdqa on x86.
+  // 16 byte alignment on OpenBSD
   return 16;
 }
 
 
-void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-  __asm__ __volatile__("" : : : "memory");
-  // An x86 store acts as a release barrier.
-  *ptr = value;
-}
-
-
 const char* OS::LocalTimezone(double time) {
   if (isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -146,7 +126,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
+// and verification).  The estimate is conservative, ie, not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -170,20 +150,19 @@
 
 
 size_t OS::AllocateAlignment() {
-  return sysconf(_SC_PAGESIZE);
+  return getpagesize();
 }
 
 
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, AllocateAlignment());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = GetRandomMmapAddr();
-  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+                   bool executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
   if (mbase == MAP_FAILED) {
-    LOG(i::Isolate::Current(),
-        StringEvent("OS::Allocate", "mmap failed"));
+    LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed"));
     return NULL;
   }
   *allocated = msize;
@@ -192,9 +171,9 @@
 }
 
 
-void OS::Free(void* address, const size_t size) {
+void OS::Free(void* buf, const size_t length) {
   // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
+  int result = munmap(buf, length);
   USE(result);
   ASSERT(result == 0);
 }
@@ -213,7 +192,13 @@
 
 
 void OS::DebugBreak() {
+#if (defined(__arm__) || defined(__thumb__))
+# if defined(CAN_USE_ARMV5_INSTRUCTIONS)
+  asm("bkpt 0");
+# endif
+#else
   asm("int $3");
+#endif
 }
 
 
@@ -260,95 +245,61 @@
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
+  if (memory_) munmap(memory_, size_);
   fclose(file_);
 }
 
 
-void OS::LogSharedLibraryAddresses() {
-  // This function assumes that the layout of the file is as follows:
-  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
-  // If we encounter an unexpected situation we abort scanning further entries.
-  FILE* fp = fopen("/proc/self/maps", "r");
-  if (fp == NULL) return;
-
-  // Allocate enough room to be able to store a full file name.
-  const int kLibNameLen = FILENAME_MAX + 1;
-  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
-  i::Isolate* isolate = ISOLATE;
-  // This loop will terminate once the scanning hits an EOF.
-  while (true) {
-    uintptr_t start, end;
-    char attr_r, attr_w, attr_x, attr_p;
-    // Parse the addresses and permission bits at the beginning of the line.
-    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
-    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
-    int c;
-    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
-      // Found a read-only executable entry. Skip characters until we reach
-      // the beginning of the filename or the end of the line.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n') && (c != '/'));
-      if (c == EOF) break;  // EOF: Was unexpected, just exit.
-
-      // Process the filename if found.
-      if (c == '/') {
-        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
-
-        // Read to the end of the line. Exit if the read fails.
-        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
-        // Drop the newline character read by fgets. We do not need to check
-        // for a zero-length string because we know that we at least read the
-        // '/' character.
-        lib_name[strlen(lib_name) - 1] = '\0';
-      } else {
-        // No library name found, just record the raw address range.
-        snprintf(lib_name, kLibNameLen,
-                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
-      }
-      LOG(isolate, SharedLibraryEvent(lib_name, start, end));
-    } else {
-      // Entry not describing executable data. Skip to end of line to set up
-      // reading the next entry.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n'));
-      if (c == EOF) break;
-    }
-  }
-  free(lib_name);
-  fclose(fp);
+static unsigned StringToLong(char* buffer) {
+  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
 }
 
 
-static const char kGCFakeMmap[] = "/tmp/__v8_gc__";
+void OS::LogSharedLibraryAddresses() {
+  static const int MAP_LENGTH = 1024;
+  int fd = open("/proc/self/maps", O_RDONLY);
+  if (fd < 0) return;
+  while (true) {
+    char addr_buffer[11];
+    addr_buffer[0] = '0';
+    addr_buffer[1] = 'x';
+    addr_buffer[10] = 0;
+    int result = read(fd, addr_buffer + 2, 8);
+    if (result < 8) break;
+    unsigned start = StringToLong(addr_buffer);
+    result = read(fd, addr_buffer + 2, 1);
+    if (result < 1) break;
+    if (addr_buffer[2] != '-') break;
+    result = read(fd, addr_buffer + 2, 8);
+    if (result < 8) break;
+    unsigned end = StringToLong(addr_buffer);
+    char buffer[MAP_LENGTH];
+    int bytes_read = -1;
+    do {
+      bytes_read++;
+      if (bytes_read >= MAP_LENGTH - 1)
+        break;
+      result = read(fd, buffer + bytes_read, 1);
+      if (result < 1) break;
+    } while (buffer[bytes_read] != '\n');
+    buffer[bytes_read] = 0;
+    // Ignore mappings that are not executable.
+    if (buffer[3] != 'x') continue;
+    char* start_of_path = index(buffer, '/');
+    // There may be no filename in this line.  Skip to next.
+    if (start_of_path == NULL) continue;
+    buffer[bytes_read] = 0;
+    LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
+  }
+  close(fd);
+}
 
 
 void OS::SignalCodeMovingGC() {
-  // Support for ll_prof.py.
-  //
-  // The Linux profiler built into the kernel logs all mmap's with
-  // PROT_EXEC so that analysis tools can properly attribute ticks. We
-  // do a mmap with a name known by ll_prof.py and immediately munmap
-  // it. This injects a GC marker into the stream of events generated
-  // by the kernel and allows us to synchronize V8 code log and the
-  // kernel log.
-  int size = sysconf(_SC_PAGESIZE);
-  FILE* f = fopen(kGCFakeMmap, "w+");
-  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
-                    fileno(f), 0);
-  ASSERT(addr != MAP_FAILED);
-  OS::Free(addr, size);
-  fclose(f);
 }
 
 
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  // backtrace is a glibc extension.
   int frames_size = frames.length();
   ScopedVector<void*> addresses(frames_size);
 
@@ -380,146 +331,64 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = ReserveRegion(size);
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
   size_ = size;
 }
 
 
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
+  return address_ != MAP_FAILED;
 }
 
 
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
 
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(base, size);
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
+  return mmap(address, size, PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
 }
 
 
 class Thread::PlatformData : public Malloced {
  public:
-  PlatformData() : thread_(kNoThread) {}
-
   pthread_t thread_;  // Thread handle for pthread.
 };
 
+
 Thread::Thread(const Options& options)
-    : data_(new PlatformData()),
-      stack_size_(options.stack_size()) {
-  set_name(options.name());
+    : data_(new PlatformData),
+      stack_size_(options.stack_size) {
+  set_name(options.name);
+}
+
+
+Thread::Thread(const char* name)
+    : data_(new PlatformData),
+      stack_size_(0) {
+  set_name(name);
 }
 
 
@@ -533,11 +402,6 @@
   // This is also initialized by the first argument to pthread_create() but we
   // don't know which thread will run first (the original thread or the new
   // one) so we initialize it here too.
-#ifdef PR_SET_NAME
-  prctl(PR_SET_NAME,
-        reinterpret_cast<unsigned long>(thread->name()),  // NOLINT
-        0, 0, 0);
-#endif
   thread->data()->thread_ = pthread_self();
   ASSERT(thread->data()->thread_ != kNoThread);
   thread->Run();
@@ -613,7 +477,6 @@
     ASSERT(result == 0);
     result = pthread_mutex_init(&mutex_, &attrs);
     ASSERT(result == 0);
-    USE(result);
   }
 
   virtual ~OpenBSDMutex() { pthread_mutex_destroy(&mutex_); }
@@ -670,14 +533,6 @@
 }
 
 
-#ifndef TIMEVAL_TO_TIMESPEC
-#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
-    (ts)->tv_sec = (tv)->tv_sec;                                    \
-    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
-} while (false)
-#endif
-
-
 bool OpenBSDSemaphore::Wait(int timeout) {
   const long kOneSecondMicros = 1000000;  // NOLINT
 
@@ -711,15 +566,29 @@
   }
 }
 
+
 Semaphore* OS::CreateSemaphore(int count) {
   return new OpenBSDSemaphore(count);
 }
 
 
 static pthread_t GetThreadID() {
-  return pthread_self();
+  pthread_t thread_id = pthread_self();
+  return thread_id;
 }
 
+
+class Sampler::PlatformData : public Malloced {
+ public:
+  PlatformData() : vm_tid_(GetThreadID()) {}
+
+  pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+  pthread_t vm_tid_;
+};
+
+
 static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
   USE(info);
   if (signal != SIGPROF) return;
@@ -741,20 +610,8 @@
   if (sample == NULL) sample = &sample_obj;
 
   // Extracting the sample from the context is extremely machine dependent.
-  sample->state = isolate->current_vm_state();
   ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#ifdef __NetBSD__
-  mcontext_t& mcontext = ucontext->uc_mcontext;
-#if V8_HOST_ARCH_IA32
-  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
-  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
-  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
-#elif V8_HOST_ARCH_X64
-  sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
-  sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
-  sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
-#endif  // V8_HOST_ARCH
-#else  // OpenBSD
+  sample->state = isolate->current_vm_state();
 #if V8_HOST_ARCH_IA32
   sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
   sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
@@ -763,24 +620,16 @@
   sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
   sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
   sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif  // V8_HOST_ARCH
-#endif  // __NetBSD__
+#elif V8_HOST_ARCH_ARM
+  sample->pc = reinterpret_cast<Address>(ucontext->sc_r15);
+  sample->sp = reinterpret_cast<Address>(ucontext->sc_r13);
+  sample->fp = reinterpret_cast<Address>(ucontext->sc_r11);
+#endif
   sampler->SampleStack(sample);
   sampler->Tick(sample);
 }
 
 
-class Sampler::PlatformData : public Malloced {
- public:
-  PlatformData() : vm_tid_(GetThreadID()) {}
-
-  pthread_t vm_tid() const { return vm_tid_; }
-
- private:
-  pthread_t vm_tid_;
-};
-
-
 class SignalSender : public Thread {
  public:
   enum SleepInterval {
@@ -788,35 +637,23 @@
     FULL_INTERVAL
   };
 
-  static const int kSignalSenderStackSize = 64 * KB;
-
   explicit SignalSender(int interval)
-      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
-        vm_tgid_(getpid()),
+      : Thread("SignalSender"),
         interval_(interval) {}
 
-  static void InstallSignalHandler() {
-    struct sigaction sa;
-    sa.sa_sigaction = ProfilerSignalHandler;
-    sigemptyset(&sa.sa_mask);
-    sa.sa_flags = SA_RESTART | SA_SIGINFO;
-    signal_handler_installed_ =
-        (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-  }
-
-  static void RestoreSignalHandler() {
-    if (signal_handler_installed_) {
-      sigaction(SIGPROF, &old_signal_handler_, 0);
-      signal_handler_installed_ = false;
-    }
-  }
-
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
-      // Start a thread that will send SIGPROF signal to VM threads,
-      // when CPU profiling will be enabled.
+      // Install a signal handler.
+      struct sigaction sa;
+      sa.sa_sigaction = ProfilerSignalHandler;
+      sigemptyset(&sa.sa_mask);
+      sa.sa_flags = SA_RESTART | SA_SIGINFO;
+      signal_handler_installed_ =
+          (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+
+      // Start a thread that sends SIGPROF signal to VM threads.
       instance_ = new SignalSender(sampler->interval());
       instance_->Start();
     } else {
@@ -825,13 +662,18 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
       delete instance_;
       instance_ = NULL;
-      RestoreSignalHandler();
+
+      // Restore the old signal handler.
+      if (signal_handler_installed_) {
+        sigaction(SIGPROF, &old_signal_handler_, 0);
+        signal_handler_installed_ = false;
+      }
     }
   }
 
@@ -843,11 +685,6 @@
       bool cpu_profiling_enabled =
           (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
       bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
-      if (cpu_profiling_enabled && !signal_handler_installed_) {
-        InstallSignalHandler();
-      } else if (!cpu_profiling_enabled && signal_handler_installed_) {
-        RestoreSignalHandler();
-      }
       // When CPU profiling is enabled both JavaScript and C++ code is
       // profiled. We must not suspend.
       if (!cpu_profiling_enabled) {
@@ -914,22 +751,19 @@
     USE(result);
   }
 
-  const int vm_tgid_;
   const int interval_;
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static LazyMutex mutex_;
+  static Mutex* mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index a729b66..52cf029 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -46,14 +46,13 @@
 
 #undef MAP_TYPE
 
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#if defined(ANDROID)
 #define LOG_TAG "v8"
-#include <android/log.h>
+#include <utils/Log.h>  // LOG_PRI_VA
 #endif
 
 #include "v8.h"
 
-#include "codegen.h"
 #include "platform.h"
 
 namespace v8 {
@@ -71,12 +70,6 @@
 }
 
 
-intptr_t OS::CommitPageSize() {
-  static intptr_t page_size = getpagesize();
-  return page_size;
-}
-
-
 #ifndef __CYGWIN__
 // Get rid of writable permission on code allocations.
 void OS::ProtectCode(void* address, const size_t size) {
@@ -91,34 +84,6 @@
 #endif  // __CYGWIN__
 
 
-void* OS::GetRandomMmapAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-#ifdef V8_TARGET_ARCH_X64
-    uint64_t rnd1 = V8::RandomPrivate(isolate);
-    uint64_t rnd2 = V8::RandomPrivate(isolate);
-    uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
-    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
-    // the hint address to 46 bits to give the kernel a fighting chance of
-    // fulfilling our placement request.
-    raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
-    uint32_t raw_addr = V8::RandomPrivate(isolate);
-    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-    // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
-    // 10.6 and 10.7.
-    raw_addr &= 0x3ffff000;
-    raw_addr += 0x20000000;
-#endif
-    return reinterpret_cast<void*>(raw_addr);
-  }
-  return NULL;
-}
-
-
 // ----------------------------------------------------------------------------
 // Math functions
 
@@ -127,27 +92,6 @@
 }
 
 
-#define UNARY_MATH_FUNCTION(name, generator)             \
-static UnaryMathFunction fast_##name##_function = NULL;  \
-V8_DECLARE_ONCE(fast_##name##_init_once);                \
-void init_fast_##name##_function() {                     \
-  fast_##name##_function = generator;                    \
-}                                                        \
-double fast_##name(double x) {                           \
-  CallOnce(&fast_##name##_init_once,                     \
-           &init_fast_##name##_function);                \
-  return (*fast_##name##_function)(x);                   \
-}
-
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-
-#undef MATH_FUNCTION
-
-
 double OS::nan_value() {
   // NAN from math.h is defined in C99 and not in POSIX.
   return NAN;
@@ -238,7 +182,7 @@
 
 void OS::VPrint(const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vprintf(format, args);
 #endif
@@ -255,7 +199,7 @@
 
 void OS::VFPrint(FILE* out, const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vfprintf(out, format, args);
 #endif
@@ -272,7 +216,7 @@
 
 void OS::VPrintError(const char* format, va_list args) {
 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+  LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
 #else
   vfprintf(stderr, format, args);
 #endif
@@ -305,14 +249,14 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 static OS::MemCopyFunction memcopy_function = NULL;
-static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
 // Defined in codegen-ia32.cc.
 OS::MemCopyFunction CreateMemCopyFunction();
 
 // Copy memory area to disjoint memory area.
 void OS::MemCopy(void* dest, const void* src, size_t size) {
   if (memcopy_function == NULL) {
-    ScopedLock lock(memcopy_function_mutex.Pointer());
+    ScopedLock lock(memcopy_function_mutex);
     if (memcopy_function == NULL) {
       OS::MemCopyFunction temp = CreateMemCopyFunction();
       MemoryBarrier();
@@ -483,7 +427,7 @@
 }
 
 
-bool Socket::SetUp() {
+bool Socket::Setup() {
   // Nothing to do on POSIX.
   return true;
 }
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 50ad353..035d394 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -53,7 +53,6 @@
 #include "v8.h"
 
 #include "platform.h"
-#include "v8threads.h"
 #include "vm-state-inl.h"
 
 
@@ -90,7 +89,7 @@
 
 
 static Mutex* limit_mutex = NULL;
-void OS::SetUp() {
+void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly will cause an overflow and the seed to be
@@ -140,7 +139,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
+// and verification).  The estimate is conservative, ie, not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -323,132 +322,43 @@
 static const int kMmapFdOffset = 0;
 
 
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = ReserveRegion(size);
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
   size_ = size;
 }
 
 
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
+  return address_ != MAP_FAILED;
 }
 
 
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
 
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(base, size);
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
 }
 
 
@@ -459,11 +369,17 @@
   pthread_t thread_;  // Thread handle for pthread.
 };
 
-
 Thread::Thread(const Options& options)
     : data_(new PlatformData()),
-      stack_size_(options.stack_size()) {
-  set_name(options.name());
+      stack_size_(options.stack_size) {
+  set_name(options.name);
+}
+
+
+Thread::Thread(const char* name)
+    : data_(new PlatformData()),
+      stack_size_(0) {
+  set_name(name);
 }
 
 
@@ -710,10 +626,8 @@
     FULL_INTERVAL
   };
 
-  static const int kSignalSenderStackSize = 64 * KB;
-
   explicit SignalSender(int interval)
-      : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
+      : Thread("SignalSender"),
         interval_(interval) {}
 
   static void InstallSignalHandler() {
@@ -733,7 +647,7 @@
   }
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
@@ -746,7 +660,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -840,16 +754,15 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static LazyMutex mutex_;
+  static Mutex* mutex_;
   static SignalSender* instance_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(SignalSender);
 };
 
-LazyMutex SignalSender::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
 SignalSender* SignalSender::instance_ = NULL;
 struct sigaction SignalSender::old_signal_handler_;
 bool SignalSender::signal_handler_installed_ = false;
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 2801b71..97788e2 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,7 +32,6 @@
 
 #include "v8.h"
 
-#include "codegen.h"
 #include "platform.h"
 #include "vm-state-inl.h"
 
@@ -59,26 +58,21 @@
 }
 
 
-int fopen_s(FILE** pFile, const char* filename, const char* mode) {
-  *pFile = fopen(filename, mode);
-  return *pFile != NULL ? 0 : 1;
-}
-
-
-#ifndef __MINGW64_VERSION_MAJOR
-
 // Not sure this the correct interpretation of _mkgmtime
 time_t _mkgmtime(tm* timeptr) {
   return mktime(timeptr);
 }
 
 
+int fopen_s(FILE** pFile, const char* filename, const char* mode) {
+  *pFile = fopen(filename, mode);
+  return *pFile != NULL ? 0 : 1;
+}
+
+
 #define _TRUNCATE 0
 #define STRUNCATE 80
 
-#endif  // __MINGW64_VERSION_MAJOR
-
-
 int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
                  const char* format, va_list argptr) {
   ASSERT(count == _TRUNCATE);
@@ -113,16 +107,11 @@
 }
 
 
-#ifndef __MINGW64_VERSION_MAJOR
-
 inline void MemoryBarrier() {
   int barrier = 0;
   __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
 }
 
-#endif  // __MINGW64_VERSION_MAJOR
-
-
 #endif  // __MINGW32__
 
 // Generate a pseudo-random number in the range 0-2^31-1. Usually
@@ -149,14 +138,14 @@
 
 #if defined(V8_TARGET_ARCH_IA32)
 static OS::MemCopyFunction memcopy_function = NULL;
-static LazyMutex memcopy_function_mutex = LAZY_MUTEX_INITIALIZER;
+static Mutex* memcopy_function_mutex = OS::CreateMutex();
 // Defined in codegen-ia32.cc.
 OS::MemCopyFunction CreateMemCopyFunction();
 
 // Copy memory area to disjoint memory area.
 void OS::MemCopy(void* dest, const void* src, size_t size) {
   if (memcopy_function == NULL) {
-    ScopedLock lock(memcopy_function_mutex.Pointer());
+    ScopedLock lock(memcopy_function_mutex);
     if (memcopy_function == NULL) {
       OS::MemCopyFunction temp = CreateMemCopyFunction();
       MemoryBarrier();
@@ -175,16 +164,19 @@
 #ifdef _WIN64
 typedef double (*ModuloFunction)(double, double);
 static ModuloFunction modulo_function = NULL;
-V8_DECLARE_ONCE(modulo_function_init_once);
+static Mutex* modulo_function_mutex = OS::CreateMutex();
 // Defined in codegen-x64.cc.
 ModuloFunction CreateModuloFunction();
 
-void init_modulo_function() {
-  modulo_function = CreateModuloFunction();
-}
-
 double modulo(double x, double y) {
-  CallOnce(&modulo_function_init_once, &init_modulo_function);
+  if (modulo_function == NULL) {
+    ScopedLock lock(modulo_function_mutex);
+    if (modulo_function == NULL) {
+      ModuloFunction temp = CreateModuloFunction();
+      MemoryBarrier();
+      modulo_function = temp;
+    }
+  }
   // Note: here we rely on dependent reads being ordered. This is true
   // on all architectures we currently support.
   return (*modulo_function)(x, y);
@@ -204,31 +196,9 @@
 
 #endif  // _WIN64
 
-
-#define UNARY_MATH_FUNCTION(name, generator)             \
-static UnaryMathFunction fast_##name##_function = NULL;  \
-V8_DECLARE_ONCE(fast_##name##_init_once);                \
-void init_fast_##name##_function() {                     \
-  fast_##name##_function = generator;                    \
-}                                                        \
-double fast_##name(double x) {                           \
-  CallOnce(&fast_##name##_init_once,                     \
-           &init_fast_##name##_function);                \
-  return (*fast_##name##_function)(x);                   \
-}
-
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
-UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
-
-#undef MATH_FUNCTION
-
-
 // ----------------------------------------------------------------------------
 // The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
+// a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
 // timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
 // January 1, 1970.
 
@@ -558,7 +528,7 @@
 }
 
 
-void OS::SetUp() {
+void OS::Setup() {
   // Seed the random number generator.
   // Convert the current time to a 64-bit integer first, before converting it
   // to an unsigned. Going directly can cause an overflow and the seed to be
@@ -806,7 +776,7 @@
 
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
-// and verification).  The estimate is conservative, i.e., not all addresses in
+// and verification).  The estimate is conservative, ie, not all addresses in
 // 'allocated' space are actually allocated to our heap.  The range is
 // [lowest, highest), inclusive on the low and and exclusive on the high end.
 static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
@@ -861,62 +831,43 @@
 }
 
 
-static void* GetRandomAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-    // The address range used to randomize RWX allocations in OS::Allocate
-    // Try not to map pages into the default range that windows loads DLLs
-    // Use a multiple of 64k to prevent committing unused memory.
-    // Note: This does not guarantee RWX regions will be within the
-    // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
-    static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
-    static const intptr_t kAllocationRandomAddressMin = 0x04000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
-    uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
-        | kAllocationRandomAddressMin;
-    address &= kAllocationRandomAddressMax;
-    return reinterpret_cast<void *>(address);
-  }
-  return NULL;
-}
-
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
-  LPVOID base = NULL;
-
-  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
-    // For exectutable pages try and randomize the allocation address
-    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
-      base = VirtualAlloc(GetRandomAddr(), size, action, protection);
-    }
-  }
-
-  // After three attempts give up and let the OS find an address to use.
-  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
-  return base;
-}
-
-
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
+  // The address range used to randomize RWX allocations in OS::Allocate
+  // Try not to map pages into the default range that windows loads DLLs
+  // Use a multiple of 64k to prevent committing unused memory.
+  // Note: This does not guarantee RWX regions will be within the
+  // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+  static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+  static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+  static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+  static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+
   // VirtualAlloc rounds allocated size to page size automatically.
   size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
+  intptr_t address = 0;
 
   // Windows XP SP2 allows Data Excution Prevention (DEP).
   int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
 
-  LPVOID mbase = RandomizedVirtualAlloc(msize,
-                                        MEM_COMMIT | MEM_RESERVE,
-                                        prot);
+  // For exectutable pages try and randomize the allocation address
+  if (prot == PAGE_EXECUTE_READWRITE &&
+      msize >= static_cast<size_t>(Page::kPageSize)) {
+    address = (V8::RandomPrivate(Isolate::Current()) << kPageSizeBits)
+      | kAllocationRandomAddressMin;
+    address &= kAllocationRandomAddressMax;
+  }
+
+  LPVOID mbase = VirtualAlloc(reinterpret_cast<void *>(address),
+                              msize,
+                              MEM_COMMIT | MEM_RESERVE,
+                              prot);
+  if (mbase == NULL && address != 0)
+    mbase = VirtualAlloc(NULL, msize, MEM_COMMIT | MEM_RESERVE, prot);
 
   if (mbase == NULL) {
     LOG(ISOLATE, StringEvent("OS::Allocate", "VirtualAlloc failed"));
@@ -938,11 +889,6 @@
 }
 
 
-intptr_t OS::CommitPageSize() {
-  return 4096;
-}
-
-
 void OS::ProtectCode(void* address, const size_t size) {
   DWORD old_protect;
   VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
@@ -961,11 +907,15 @@
 
 
 void OS::Abort() {
-  if (IsDebuggerPresent() || FLAG_break_on_abort) {
-    DebugBreak();
-  } else {
+  if (!IsDebuggerPresent()) {
+#ifdef _MSC_VER
     // Make the MSVCRT do a silent abort.
-    raise(SIGABRT);
+    _set_abort_behavior(0, _WRITE_ABORT_MSG);
+    _set_abort_behavior(0, _CALL_REPORTFAULT);
+#endif  // _MSC_VER
+    abort();
+  } else {
+    DebugBreak();
   }
 }
 
@@ -1447,108 +1397,38 @@
 }
 
 
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* address = ReserveRegion(request_size);
-  if (address == NULL) return;
-  Address base = RoundUp(static_cast<Address>(address), alignment);
-  // Try reducing the size by freeing and then reallocating a specific area.
-  bool result = ReleaseRegion(address, request_size);
-  USE(result);
-  ASSERT(result);
-  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
-  if (address != NULL) {
-    request_size = size;
-    ASSERT(base == static_cast<Address>(address));
-  } else {
-    // Resizing failed, just go with a bigger area.
-    address = ReserveRegion(request_size);
-    if (address == NULL) return;
-  }
-  address_ = address;
-  size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address_, size_);
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
 bool VirtualMemory::IsReserved() {
   return address_ != NULL;
 }
 
 
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
+VirtualMemory::VirtualMemory(size_t size) {
+  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
+  size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+  }
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  if (CommitRegion(address, size, is_executable)) {
-    UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
-    return true;
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
+    return false;
   }
-  return false;
+
+  UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
+  return true;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   ASSERT(IsReserved());
-  return UncommitRegion(address, size);
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
-  return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_READONLY | PAGE_GUARD)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return VirtualFree(base, 0, MEM_RELEASE) != 0;
+  return VirtualFree(address, size, MEM_DECOMMIT) != false;
 }
 
 
@@ -1573,7 +1453,6 @@
  public:
   explicit PlatformData(HANDLE thread) : thread_(thread) {}
   HANDLE thread_;
-  unsigned thread_id_;
 };
 
 
@@ -1581,9 +1460,16 @@
 // handle until it is started.
 
 Thread::Thread(const Options& options)
-    : stack_size_(options.stack_size()) {
+    : stack_size_(options.stack_size) {
   data_ = new PlatformData(kNoThread);
-  set_name(options.name());
+  set_name(options.name);
+}
+
+
+Thread::Thread(const char* name)
+    : stack_size_(0) {
+  data_ = new PlatformData(kNoThread);
+  set_name(name);
 }
 
 
@@ -1610,15 +1496,13 @@
                      ThreadEntry,
                      this,
                      0,
-                     &data_->thread_id_));
+                     NULL));
 }
 
 
 // Wait for thread to terminate.
 void Thread::Join() {
-  if (data_->thread_id_ != GetCurrentThreadId()) {
-    WaitForSingleObject(data_->thread_, INFINITE);
-  }
+  WaitForSingleObject(data_->thread_, INFINITE);
 }
 
 
@@ -1873,7 +1757,7 @@
 }
 
 
-bool Socket::SetUp() {
+bool Socket::Setup() {
   // Initialize Winsock32
   int err;
   WSADATA winsock_data;
@@ -1949,14 +1833,12 @@
 
 class SamplerThread : public Thread {
  public:
-  static const int kSamplerThreadStackSize = 64 * KB;
-
   explicit SamplerThread(int interval)
-      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
+      : Thread("SamplerThread"),
         interval_(interval) {}
 
   static void AddActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::AddActiveSampler(sampler);
     if (instance_ == NULL) {
       instance_ = new SamplerThread(sampler->interval());
@@ -1967,7 +1849,7 @@
   }
 
   static void RemoveActiveSampler(Sampler* sampler) {
-    ScopedLock lock(mutex_.Pointer());
+    ScopedLock lock(mutex_);
     SamplerRegistry::RemoveActiveSampler(sampler);
     if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
       RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
@@ -2053,15 +1935,14 @@
   RuntimeProfilerRateLimiter rate_limiter_;
 
   // Protects the process wide state below.
-  static LazyMutex mutex_;
+  static Mutex* mutex_;
   static SamplerThread* instance_;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(SamplerThread);
 };
 
 
-LazyMutex SamplerThread::mutex_ = LAZY_MUTEX_INITIALIZER;
+Mutex* SamplerThread::mutex_ = OS::CreateMutex();
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
diff --git a/src/platform.h b/src/platform.h
index 4ec6057..034fe34 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -79,7 +79,6 @@
 #endif  // WIN32
 
 #include "atomicops.h"
-#include "lazy-instance.h"
 #include "platform-tls.h"
 #include "utils.h"
 #include "v8globals.h"
@@ -97,13 +96,6 @@
 double ceiling(double x);
 double modulo(double x, double y);
 
-// Custom implementation of sin, cos, tan and log.
-double fast_sin(double input);
-double fast_cos(double input);
-double fast_tan(double input);
-double fast_log(double input);
-double fast_sqrt(double input);
-
 // Forward declarations.
 class Socket;
 
@@ -117,7 +109,7 @@
 class OS {
  public:
   // Initializes the platform OS support. Called once at VM startup.
-  static void SetUp();
+  static void Setup();
 
   // Returns the accumulated user time for thread. This routine
   // can be used for profiling. The implementation should
@@ -180,19 +172,12 @@
                         bool is_executable);
   static void Free(void* address, const size_t size);
 
-  // This is the granularity at which the ProtectCode(...) call can set page
-  // permissions.
-  static intptr_t CommitPageSize();
-
   // Mark code segments non-writable.
   static void ProtectCode(void* address, const size_t size);
 
   // Assign memory as a guard page so that access will cause an exception.
   static void Guard(void* address, const size_t size);
 
-  // Generate a random address to be used for hinting mmap().
-  static void* GetRandomMmapAddr();
-
   // Get the Alignment guaranteed by Allocate().
   static size_t AllocateAlignment();
 
@@ -316,46 +301,23 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
 };
 
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by assignment or copy-contructing. This removes the reserved memory
-// from the original object.
+
 class VirtualMemory {
  public:
-  // Empty VirtualMemory object, controlling no reserved memory.
-  VirtualMemory();
-
   // Reserves virtual memory with size.
   explicit VirtualMemory(size_t size);
-
-  // Reserves virtual memory containing an area of the given size that
-  // is aligned per alignment. This may not be at the position returned
-  // by address().
-  VirtualMemory(size_t size, size_t alignment);
-
-  // Releases the reserved memory, if any, controlled by this VirtualMemory
-  // object.
   ~VirtualMemory();
 
   // Returns whether the memory has been reserved.
   bool IsReserved();
 
-  // Initialize or resets an embedded VirtualMemory object.
-  void Reset();
-
   // Returns the start address of the reserved memory.
-  // If the memory was reserved with an alignment, this address is not
-  // necessarily aligned. The user might need to round it up to a multiple of
-  // the alignment to get the start of the aligned block.
   void* address() {
     ASSERT(IsReserved());
     return address_;
   }
 
-  // Returns the size of the reserved memory. The returned value is only
-  // meaningful when IsReserved() returns true.
-  // If the memory was reserved with an alignment, this size may be larger
-  // than the requested size.
+  // Returns the size of the reserved memory.
   size_t size() { return size_; }
 
   // Commits real memory. Returns whether the operation succeeded.
@@ -364,46 +326,11 @@
   // Uncommit real memory.  Returns whether the operation succeeded.
   bool Uncommit(void* address, size_t size);
 
-  // Creates a single guard page at the given address.
-  bool Guard(void* address);
-
-  void Release() {
-    ASSERT(IsReserved());
-    // Notice: Order is important here. The VirtualMemory object might live
-    // inside the allocated region.
-    void* address = address_;
-    size_t size = size_;
-    Reset();
-    bool result = ReleaseRegion(address, size);
-    USE(result);
-    ASSERT(result);
-  }
-
-  // Assign control of the reserved region to a different VirtualMemory object.
-  // The old object is no longer functional (IsReserved() returns false).
-  void TakeControl(VirtualMemory* from) {
-    ASSERT(!IsReserved());
-    address_ = from->address_;
-    size_ = from->size_;
-    from->Reset();
-  }
-
-  static void* ReserveRegion(size_t size);
-
-  static bool CommitRegion(void* base, size_t size, bool is_executable);
-
-  static bool UncommitRegion(void* base, size_t size);
-
-  // Must be called with a base pointer that has been returned by ReserveRegion
-  // and the same size it was reserved with.
-  static bool ReleaseRegion(void* base, size_t size);
-
  private:
   void* address_;  // Start address of the virtual memory.
   size_t size_;  // Size of the virtual memory.
 };
 
-
 // ----------------------------------------------------------------------------
 // Thread
 //
@@ -423,22 +350,16 @@
     LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
   };
 
-  class Options {
-   public:
-    Options() : name_("v8:<unknown>"), stack_size_(0) {}
-    Options(const char* name, int stack_size = 0)
-        : name_(name), stack_size_(stack_size) {}
+  struct Options {
+    Options() : name("v8:<unknown>"), stack_size(0) {}
 
-    const char* name() const { return name_; }
-    int stack_size() const { return stack_size_; }
-
-   private:
-    const char* name_;
-    int stack_size_;
+    const char* name;
+    int stack_size;
   };
 
   // Create new thread.
   explicit Thread(const Options& options);
+  explicit Thread(const char* name);
   virtual ~Thread();
 
   // Start new thread by calling the Run() method in the new thread.
@@ -494,7 +415,7 @@
   PlatformData* data() { return data_; }
 
  private:
-  void set_name(const char* name);
+  void set_name(const char *name);
 
   PlatformData* data_;
 
@@ -530,24 +451,6 @@
   virtual bool TryLock() = 0;
 };
 
-struct CreateMutexTrait {
-  static Mutex* Create() {
-    return OS::CreateMutex();
-  }
-};
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-//   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-//   void my_function() {
-//     ScopedLock my_lock(my_mutex.Pointer());
-//     // Do something.
-//   }
-//
-typedef LazyDynamicInstance<Mutex, CreateMutexTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
 
 // ----------------------------------------------------------------------------
 // ScopedLock
@@ -588,7 +491,7 @@
   virtual void Wait() = 0;
 
   // Suspends the calling thread until the counter is non zero or the timeout
-  // time has passed. If timeout happens the return value is false and the
+  // time has passsed. If timeout happens the return value is false and the
   // counter is unchanged. Otherwise the semaphore counter is decremented and
   // true is returned. The timeout value is specified in microseconds.
   virtual bool Wait(int timeout) = 0;
@@ -597,30 +500,6 @@
   virtual void Signal() = 0;
 };
 
-template <int InitialValue>
-struct CreateSemaphoreTrait {
-  static Semaphore* Create() {
-    return OS::CreateSemaphore(InitialValue);
-  }
-};
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-//   // The following semaphore starts at 0.
-//   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-//   void my_function() {
-//     // Do something with my_semaphore.Pointer().
-//   }
-//
-template <int InitialValue>
-struct LazySemaphore {
-  typedef typename LazyDynamicInstance<
-      Semaphore, CreateSemaphoreTrait<InitialValue> >::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
 
 // ----------------------------------------------------------------------------
 // Socket
@@ -652,7 +531,7 @@
 
   virtual bool IsValid() const = 0;
 
-  static bool SetUp();
+  static bool Setup();
   static int LastError();
   static uint16_t HToN(uint16_t value);
   static uint16_t NToH(uint16_t value);
diff --git a/src/preparse-data.h b/src/preparse-data.h
index f347430..c6503c4 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -49,11 +49,11 @@
                            int end,
                            int literals,
                            int properties,
-                           LanguageMode language_mode) = 0;
+                           int strict_mode) = 0;
 
   // Logs a symbol creation of a literal or identifier.
   virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
-  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
+  virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
 
   // Logs an error message and marks the log as containing an error.
   // Further logging will be ignored, and ExtractData will return a vector
@@ -89,12 +89,12 @@
                            int end,
                            int literals,
                            int properties,
-                           LanguageMode language_mode) {
+                           int strict_mode) {
     function_store_.Add(start);
     function_store_.Add(end);
     function_store_.Add(literals);
     function_store_.Add(properties);
-    function_store_.Add(language_mode);
+    function_store_.Add(strict_mode);
   }
 
   // Logs an error message and marks the log as containing an error.
@@ -149,7 +149,7 @@
  public:
   PartialParserRecorder() : FunctionLoggingParserRecorder() { }
   virtual void LogAsciiSymbol(int start, Vector<const char> literal) { }
-  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) { }
+  virtual void LogUC16Symbol(int start, Vector<const uc16> literal) { }
   virtual ~PartialParserRecorder() { }
   virtual Vector<unsigned> ExtractData();
   virtual int symbol_position() { return 0; }
@@ -171,7 +171,7 @@
     LogSymbol(start, hash, true, Vector<const byte>::cast(literal));
   }
 
-  virtual void LogUtf16Symbol(int start, Vector<const uc16> literal) {
+  virtual void LogUC16Symbol(int start, Vector<const uc16> literal) {
     if (!is_recording_) return;
     int hash = vector_hash(literal);
     LogSymbol(start, hash, false, Vector<const byte>::cast(literal));
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 6e8556a..899489e 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -46,10 +46,10 @@
 namespace internal {
 
 // UTF16Buffer based on a v8::UnicodeInputStream.
-class InputStreamUtf16Buffer : public Utf16CharacterStream {
+class InputStreamUTF16Buffer : public UC16CharacterStream {
  public:
-  /* The InputStreamUtf16Buffer maintains an internal buffer
-   * that is filled in chunks from the Utf16CharacterStream.
+  /* The InputStreamUTF16Buffer maintains an internal buffer
+   * that is filled in chunks from the UC16CharacterStream.
    * It also maintains unlimited pushback capability, but optimized
    * for small pushbacks.
    * The pushback_buffer_ pointer points to the limit of pushbacks
@@ -60,8 +60,8 @@
    * new buffer. When this buffer is read to the end again, the cursor is
    * switched back to the internal buffer
    */
-  explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream)
-      : Utf16CharacterStream(),
+  explicit InputStreamUTF16Buffer(v8::UnicodeInputStream* stream)
+      : UC16CharacterStream(),
         stream_(stream),
         pushback_buffer_(buffer_),
         pushback_buffer_end_cache_(NULL),
@@ -70,7 +70,7 @@
     buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize;
   }
 
-  virtual ~InputStreamUtf16Buffer() {
+  virtual ~InputStreamUTF16Buffer() {
     if (pushback_buffer_backing_ != NULL) {
       DeleteArray(pushback_buffer_backing_);
     }
@@ -127,18 +127,12 @@
     uc16* buffer_start = buffer_ + kPushBackSize;
     buffer_cursor_ = buffer_end_ = buffer_start;
     while ((value = stream_->Next()) >= 0) {
-      if (value >
-          static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
-        buffer_start[buffer_end_++ - buffer_start] =
-            unibrow::Utf16::LeadSurrogate(value);
-        buffer_start[buffer_end_++ - buffer_start] =
-            unibrow::Utf16::TrailSurrogate(value);
-      } else {
-        // buffer_end_ is a const pointer, but buffer_ is writable.
-        buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
+      if (value > static_cast<int32_t>(unibrow::Utf8::kMaxThreeByteChar)) {
+        value = unibrow::Utf8::kBadChar;
       }
-      // Stop one before the end of the buffer in case we get a surrogate pair.
-      if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break;
+      // buffer_end_ is a const pointer, but buffer_ is writable.
+      buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value);
+      if (buffer_end_ == buffer_ + kPushBackSize + kBufferSize) break;
     }
     return buffer_end_ > buffer_start;
   }
@@ -185,16 +179,16 @@
 
 
 PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
-  internal::InputStreamUtf16Buffer buffer(input);
+  internal::InputStreamUTF16Buffer buffer(input);
   uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
   internal::UnicodeCache unicode_cache;
-  internal::Scanner scanner(&unicode_cache);
+  internal::JavaScriptScanner scanner(&unicode_cache);
   scanner.Initialize(&buffer);
   internal::CompleteParserRecorder recorder;
   preparser::PreParser::PreParseResult result =
       preparser::PreParser::PreParseProgram(&scanner,
                                             &recorder,
-                                            internal::kAllowLazy,
+                                            true,
                                             stack_limit);
   if (result == preparser::PreParser::kPreParseStackOverflow) {
     return PreParserData::StackOverflow();
diff --git a/src/preparser.cc b/src/preparser.cc
index 20d3b9c..6021ebd 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -52,34 +52,6 @@
 
 namespace preparser {
 
-PreParser::PreParseResult PreParser::PreParseLazyFunction(
-    i::LanguageMode mode, i::ParserRecorder* log) {
-  log_ = log;
-  // Lazy functions always have trivial outer scopes (no with/catch scopes).
-  Scope top_scope(&scope_, kTopLevelScope);
-  set_language_mode(mode);
-  Scope function_scope(&scope_, kFunctionScope);
-  ASSERT_EQ(i::Token::LBRACE, scanner_->current_token());
-  bool ok = true;
-  int start_position = scanner_->peek_location().beg_pos;
-  ParseLazyFunctionLiteralBody(&ok);
-  if (stack_overflow_) return kPreParseStackOverflow;
-  if (!ok) {
-    ReportUnexpectedToken(scanner_->current_token());
-  } else {
-    ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
-    if (!is_classic_mode()) {
-      int end_pos = scanner_->location().end_pos;
-      CheckOctalLiteral(start_position, end_pos, &ok);
-      if (ok) {
-        CheckDelayedStrictModeViolation(start_position, end_pos, &ok);
-      }
-    }
-  }
-  return kPreParseSuccess;
-}
-
-
 // Preparsing checks a JavaScript program and emits preparse-data that helps
 // a later parsing to be faster.
 // See preparser-data.h for the data.
@@ -100,7 +72,7 @@
   if (token == i::Token::ILLEGAL && stack_overflow_) {
     return;
   }
-  i::Scanner::Location source_location = scanner_->location();
+  i::JavaScriptScanner::Location source_location = scanner_->location();
 
   // Four of the tokens are treated specially
   switch (token) {
@@ -145,21 +117,8 @@
 
 
 PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
-  // (Ecma 262 5th Edition, clause 14):
-  // SourceElement:
-  //    Statement
-  //    FunctionDeclaration
-  //
-  // In harmony mode we allow additionally the following productions
-  // SourceElement:
-  //    LetDeclaration
-  //    ConstDeclaration
-
   switch (peek()) {
-    case i::Token::FUNCTION:
-      return ParseFunctionDeclaration(ok);
     case i::Token::LET:
-    case i::Token::CONST:
       return ParseVariableStatement(kSourceElement, ok);
     default:
       return ParseStatement(ok);
@@ -177,8 +136,7 @@
     Statement statement = ParseSourceElement(CHECK_OK);
     if (allow_directive_prologue) {
       if (statement.IsUseStrictLiteral()) {
-        set_language_mode(harmony_scoping_ ?
-                          i::EXTENDED_MODE : i::STRICT_MODE);
+        set_strict_mode();
       } else if (!statement.IsStringLiteral()) {
         allow_directive_prologue = false;
       }
@@ -227,7 +185,6 @@
       return ParseBlock(ok);
 
     case i::Token::CONST:
-    case i::Token::LET:
     case i::Token::VAR:
       return ParseVariableStatement(kStatement, ok);
 
@@ -268,19 +225,8 @@
     case i::Token::TRY:
       return ParseTryStatement(ok);
 
-    case i::Token::FUNCTION: {
-      i::Scanner::Location start_location = scanner_->peek_location();
-      Statement statement = ParseFunctionDeclaration(CHECK_OK);
-      i::Scanner::Location end_location = scanner_->location();
-      if (!is_classic_mode()) {
-        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                        "strict_function", NULL);
-        *ok = false;
-        return Statement::Default();
-      } else {
-        return statement;
-      }
-    }
+    case i::Token::FUNCTION:
+      return ParseFunctionDeclaration(ok);
 
     case i::Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
@@ -325,10 +271,14 @@
   //
   Expect(i::Token::LBRACE, CHECK_OK);
   while (peek() != i::Token::RBRACE) {
-    if (is_extended_mode()) {
-      ParseSourceElement(CHECK_OK);
-    } else {
-      ParseStatement(CHECK_OK);
+    i::Scanner::Location start_location = scanner_->peek_location();
+    Statement statement = ParseSourceElement(CHECK_OK);
+    i::Scanner::Location end_location = scanner_->location();
+    if (strict_mode() && statement.IsFunctionDeclaration()) {
+      ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                      "strict_function", NULL);
+      *ok = false;
+      return Statement::Default();
     }
   }
   Expect(i::Token::RBRACE, ok);
@@ -344,7 +294,6 @@
 
   Statement result = ParseVariableDeclarations(var_context,
                                                NULL,
-                                               NULL,
                                                CHECK_OK);
   ExpectSemicolon(CHECK_OK);
   return result;
@@ -358,73 +307,22 @@
 // of 'for-in' loops.
 PreParser::Statement PreParser::ParseVariableDeclarations(
     VariableDeclarationContext var_context,
-    VariableDeclarationProperties* decl_props,
     int* num_decl,
     bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const') (Identifier ('=' AssignmentExpression)?)+[',']
-  //
-  // The ES6 Draft Rev3 specifies the following grammar for const declarations
-  //
-  // ConstDeclaration ::
-  //   const ConstBinding (',' ConstBinding)* ';'
-  // ConstBinding ::
-  //   Identifier '=' AssignmentExpression
-  //
-  // TODO(ES6):
-  // ConstBinding ::
-  //   BindingPattern '=' AssignmentExpression
-  bool require_initializer = false;
+
   if (peek() == i::Token::VAR) {
     Consume(i::Token::VAR);
   } else if (peek() == i::Token::CONST) {
-    // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads:
-    //
-    // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';'
-    //
-    // * It is a Syntax Error if the code that matches this production is not
-    //   contained in extended code.
-    //
-    // However disallowing const in classic mode will break compatibility with
-    // existing pages. Therefore we keep allowing const with the old
-    // non-harmony semantics in classic mode.
-    Consume(i::Token::CONST);
-    switch (language_mode()) {
-      case i::CLASSIC_MODE:
-        break;
-      case i::STRICT_MODE: {
-        i::Scanner::Location location = scanner_->peek_location();
-        ReportMessageAt(location, "strict_const", NULL);
-        *ok = false;
-        return Statement::Default();
-      }
-      case i::EXTENDED_MODE:
-        if (var_context != kSourceElement &&
-            var_context != kForStatement) {
-          i::Scanner::Location location = scanner_->peek_location();
-          ReportMessageAt(location.beg_pos, location.end_pos,
-                          "unprotected_const", NULL);
-          *ok = false;
-          return Statement::Default();
-        }
-        require_initializer = true;
-        break;
-    }
-  } else if (peek() == i::Token::LET) {
-    // ES6 Draft Rev4 section 12.2.1:
-    //
-    // LetDeclaration : let LetBindingList ;
-    //
-    // * It is a Syntax Error if the code that matches this production is not
-    //   contained in extended code.
-    if (!is_extended_mode()) {
+    if (strict_mode()) {
       i::Scanner::Location location = scanner_->peek_location();
-      ReportMessageAt(location.beg_pos, location.end_pos,
-                      "illegal_let", NULL);
+      ReportMessageAt(location, "strict_const", NULL);
       *ok = false;
       return Statement::Default();
     }
-    Consume(i::Token::LET);
+    Consume(i::Token::CONST);
+  } else if (peek() == i::Token::LET) {
     if (var_context != kSourceElement &&
         var_context != kForStatement) {
       i::Scanner::Location location = scanner_->peek_location();
@@ -433,6 +331,7 @@
       *ok = false;
       return Statement::Default();
     }
+    Consume(i::Token::LET);
   } else {
     *ok = false;
     return Statement::Default();
@@ -447,7 +346,7 @@
     // Parse variable name.
     if (nvars > 0) Consume(i::Token::COMMA);
     Identifier identifier  = ParseIdentifier(CHECK_OK);
-    if (!is_classic_mode() && !identifier.IsValidStrictVariable()) {
+    if (strict_mode() && !identifier.IsValidStrictVariable()) {
       StrictModeIdentifierViolation(scanner_->location(),
                                     "strict_var_name",
                                     identifier,
@@ -455,10 +354,9 @@
       return Statement::Default();
     }
     nvars++;
-    if (peek() == i::Token::ASSIGN || require_initializer) {
+    if (peek() == i::Token::ASSIGN) {
       Expect(i::Token::ASSIGN, CHECK_OK);
       ParseAssignmentExpression(var_context != kForStatement, CHECK_OK);
-      if (decl_props != NULL) *decl_props = kHasInitializers;
     }
   } while (peek() == i::Token::COMMA);
 
@@ -474,11 +372,18 @@
 
   Expression expr = ParseExpression(true, CHECK_OK);
   if (expr.IsRawIdentifier()) {
-    ASSERT(!expr.AsIdentifier().IsFutureReserved());
-    ASSERT(is_classic_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
-    if (peek() == i::Token::COLON) {
+    if (peek() == i::Token::COLON &&
+        (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
       Consume(i::Token::COLON);
-      return ParseStatement(ok);
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseStatement(CHECK_OK);
+      if (strict_mode() && statement.IsFunctionDeclaration()) {
+        i::Scanner::Location end_location = scanner_->location();
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+      }
+      return Statement::Default();
     }
     // Preparsing is disabled for extensions (because the extension details
     // aren't passed to lazily compiled functions), so we don't
@@ -571,7 +476,7 @@
   // WithStatement ::
   //   'with' '(' Expression ')' Statement
   Expect(i::Token::WITH, CHECK_OK);
-  if (!is_classic_mode()) {
+  if (strict_mode()) {
     i::Scanner::Location location = scanner_->location();
     ReportMessageAt(location, "strict_mode_with", NULL);
     *ok = false;
@@ -608,7 +513,15 @@
       Expect(i::Token::DEFAULT, CHECK_OK);
       Expect(i::Token::COLON, CHECK_OK);
     } else {
-      ParseStatement(CHECK_OK);
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseStatement(CHECK_OK);
+      if (strict_mode() && statement.IsFunctionDeclaration()) {
+        i::Scanner::Location end_location = scanner_->location();
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+        return Statement::Default();
+      }
     }
     token = peek();
   }
@@ -654,14 +567,9 @@
   if (peek() != i::Token::SEMICOLON) {
     if (peek() == i::Token::VAR || peek() == i::Token::CONST ||
         peek() == i::Token::LET) {
-      bool is_let = peek() == i::Token::LET;
       int decl_count;
-      VariableDeclarationProperties decl_props = kHasNoInitializers;
-      ParseVariableDeclarations(
-          kForStatement, &decl_props, &decl_count, CHECK_OK);
-      bool accept_IN = decl_count == 1 &&
-          !(is_let && decl_props == kHasInitializers);
-      if (peek() == i::Token::IN && accept_IN) {
+      ParseVariableDeclarations(kForStatement, &decl_count, CHECK_OK);
+      if (peek() == i::Token::IN && decl_count == 1) {
         Expect(i::Token::IN, CHECK_OK);
         ParseExpression(true, CHECK_OK);
         Expect(i::Token::RPAREN, CHECK_OK);
@@ -706,7 +614,7 @@
 
   Expect(i::Token::THROW, CHECK_OK);
   if (scanner_->HasAnyLineTerminatorBeforeNext()) {
-    i::Scanner::Location pos = scanner_->location();
+    i::JavaScriptScanner::Location pos = scanner_->location();
     ReportMessageAt(pos, "newline_after_throw", NULL);
     *ok = false;
     return Statement::Default();
@@ -741,7 +649,7 @@
     Consume(i::Token::CATCH);
     Expect(i::Token::LPAREN, CHECK_OK);
     Identifier id = ParseIdentifier(CHECK_OK);
-    if (!is_classic_mode() && !id.IsValidStrictVariable()) {
+    if (strict_mode() && !id.IsValidStrictVariable()) {
       StrictModeIdentifierViolation(scanner_->location(),
                                     "strict_catch_variable",
                                     id,
@@ -819,8 +727,7 @@
     return expression;
   }
 
-  if (!is_classic_mode() &&
-      expression.IsIdentifier() &&
+  if (strict_mode() && expression.IsIdentifier() &&
       expression.AsIdentifier().IsEvalOrArguments()) {
     i::Scanner::Location after = scanner_->location();
     ReportMessageAt(before.beg_pos, after.end_pos,
@@ -908,8 +815,7 @@
     op = Next();
     i::Scanner::Location before = scanner_->peek_location();
     Expression expression = ParseUnaryExpression(CHECK_OK);
-    if (!is_classic_mode() &&
-        expression.IsIdentifier() &&
+    if (strict_mode() && expression.IsIdentifier() &&
         expression.AsIdentifier().IsEvalOrArguments()) {
       i::Scanner::Location after = scanner_->location();
       ReportMessageAt(before.beg_pos, after.end_pos,
@@ -931,8 +837,7 @@
   Expression expression = ParseLeftHandSideExpression(CHECK_OK);
   if (!scanner_->HasAnyLineTerminatorBeforeNext() &&
       i::Token::IsCountOp(peek())) {
-    if (!is_classic_mode() &&
-        expression.IsIdentifier() &&
+    if (strict_mode() && expression.IsIdentifier() &&
         expression.AsIdentifier().IsEvalOrArguments()) {
       i::Scanner::Location after = scanner_->location();
       ReportMessageAt(before.beg_pos, after.end_pos,
@@ -1119,7 +1024,7 @@
     }
 
     case i::Token::FUTURE_STRICT_RESERVED_WORD:
-      if (!is_classic_mode()) {
+      if (strict_mode()) {
         Next();
         i::Scanner::Location location = scanner_->location();
         ReportMessageAt(location, "strict_reserved_word", NULL);
@@ -1214,12 +1119,12 @@
     old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(),
                                       type);
   } else {
-    old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type);
+    old_type = finder->AddUC16Symbol(scanner_->literal_uc16_string(), type);
   }
   if (HasConflict(old_type, type)) {
     if (IsDataDataConflict(old_type, type)) {
       // Both are data properties.
-      if (is_classic_mode()) return;
+      if (!strict_mode()) return;
       ReportMessageAt(scanner_->location(),
                       "strict_duplicate_property", NULL);
     } else if (IsDataAccessorConflict(old_type, type)) {
@@ -1387,7 +1292,7 @@
           duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1);
     } else {
       prev_value =
-          duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1);
+          duplicate_finder.AddUC16Symbol(scanner_->literal_uc16_string(), 1);
     }
 
     if (prev_value != 0) {
@@ -1402,6 +1307,9 @@
   }
   Expect(i::Token::RPAREN, CHECK_OK);
 
+  Expect(i::Token::LBRACE, CHECK_OK);
+  int function_block_pos = scanner_->location().beg_pos;
+
   // Determine if the function will be lazily compiled.
   // Currently only happens to top-level functions.
   // Optimistically assume that all top-level functions are lazily compiled.
@@ -1410,15 +1318,26 @@
                              !parenthesized_function_);
   parenthesized_function_ = false;
 
-  Expect(i::Token::LBRACE, CHECK_OK);
   if (is_lazily_compiled) {
-    ParseLazyFunctionLiteralBody(CHECK_OK);
-  } else {
+    log_->PauseRecording();
     ParseSourceElements(i::Token::RBRACE, ok);
-  }
-  Expect(i::Token::RBRACE, CHECK_OK);
+    log_->ResumeRecording();
+    if (!*ok) Expression::Default();
 
-  if (!is_classic_mode()) {
+    Expect(i::Token::RBRACE, CHECK_OK);
+
+    // Position right after terminal '}'.
+    int end_pos = scanner_->location().end_pos;
+    log_->LogFunction(function_block_pos, end_pos,
+                      function_scope.materialized_literal_count(),
+                      function_scope.expected_properties(),
+                      strict_mode() ? 1 : 0);
+  } else {
+    ParseSourceElements(i::Token::RBRACE, CHECK_OK);
+    Expect(i::Token::RBRACE, CHECK_OK);
+  }
+
+  if (strict_mode()) {
     int end_position = scanner_->location().end_pos;
     CheckOctalLiteral(start_position, end_position, CHECK_OK);
     CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK);
@@ -1429,31 +1348,11 @@
 }
 
 
-void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
-  int body_start = scanner_->location().beg_pos;
-  log_->PauseRecording();
-  ParseSourceElements(i::Token::RBRACE, ok);
-  log_->ResumeRecording();
-  if (!*ok) return;
-
-  // Position right after terminal '}'.
-  ASSERT_EQ(i::Token::RBRACE, scanner_->peek());
-  int body_end = scanner_->peek_location().end_pos;
-  log_->LogFunction(body_start, body_end,
-                    scope_->materialized_literal_count(),
-                    scope_->expected_properties(),
-                    language_mode());
-}
-
-
 PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
+
   Expect(i::Token::MOD, CHECK_OK);
-  if (!allow_natives_syntax_) {
-    *ok = false;
-    return Expression::Default();
-  }
   ParseIdentifier(CHECK_OK);
   ParseArguments(ok);
 
@@ -1485,7 +1384,7 @@
   if (scanner_->is_literal_ascii()) {
     log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string());
   } else {
-    log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string());
+    log_->LogUC16Symbol(identifier_pos, scanner_->literal_uc16_string());
   }
 }
 
@@ -1536,16 +1435,9 @@
       ReportMessageAt(location.beg_pos, location.end_pos,
                       "reserved_word", NULL);
       *ok = false;
-      return GetIdentifierSymbol();
     }
-    case i::Token::FUTURE_STRICT_RESERVED_WORD:
-      if (!is_classic_mode()) {
-        i::Scanner::Location location = scanner_->location();
-        ReportMessageAt(location.beg_pos, location.end_pos,
-                        "strict_reserved_word", NULL);
-        *ok = false;
-      }
       // FALLTHROUGH
+    case i::Token::FUTURE_STRICT_RESERVED_WORD:
     case i::Token::IDENTIFIER:
       return GetIdentifierSymbol();
     default:
@@ -1558,7 +1450,7 @@
 void PreParser::SetStrictModeViolation(i::Scanner::Location location,
                                        const char* type,
                                        bool* ok) {
-  if (!is_classic_mode()) {
+  if (strict_mode()) {
     ReportMessageAt(location, type, NULL);
     *ok = false;
     return;
@@ -1598,7 +1490,7 @@
   } else if (identifier.IsFutureStrictReserved()) {
     type = "strict_reserved_word";
   }
-  if (!is_classic_mode()) {
+  if (strict_mode()) {
     ReportMessageAt(location, type, NULL);
     *ok = false;
     return;
@@ -1657,7 +1549,7 @@
   return AddSymbol(i::Vector<const byte>::cast(key), true, value);
 }
 
-int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) {
+int DuplicateFinder::AddUC16Symbol(i::Vector<const uint16_t> key, int value) {
   return AddSymbol(i::Vector<const byte>::cast(key), false, value);
 }
 
diff --git a/src/preparser.h b/src/preparser.h
index f3a4347..b97b7cf 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,6 @@
 #ifndef V8_PREPARSER_H
 #define V8_PREPARSER_H
 
-#include "hashmap.h"
 #include "token.h"
 #include "scanner.h"
 
@@ -65,7 +64,7 @@
         map_(&Match) { }
 
   int AddAsciiSymbol(i::Vector<const char> key, int value);
-  int AddUtf16Symbol(i::Vector<const uint16_t> key, int value);
+  int AddUC16Symbol(i::Vector<const uint16_t> key, int value);
   // Add a a number literal by converting it (if necessary)
   // to the string that ToString(ToNumber(literal)) would generate.
   // and then adding that string with AddAsciiSymbol.
@@ -111,54 +110,19 @@
     kPreParseSuccess
   };
 
-
-  PreParser(i::Scanner* scanner,
-            i::ParserRecorder* log,
-            uintptr_t stack_limit,
-            bool allow_lazy,
-            bool allow_natives_syntax,
-            bool allow_modules)
-      : scanner_(scanner),
-        log_(log),
-        scope_(NULL),
-        stack_limit_(stack_limit),
-        strict_mode_violation_location_(i::Scanner::Location::invalid()),
-        strict_mode_violation_type_(NULL),
-        stack_overflow_(false),
-        allow_lazy_(allow_lazy),
-        allow_modules_(allow_modules),
-        allow_natives_syntax_(allow_natives_syntax),
-        parenthesized_function_(false),
-        harmony_scoping_(scanner->HarmonyScoping()) { }
-
   ~PreParser() {}
 
   // Pre-parse the program from the character stream; returns true on
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  static PreParseResult PreParseProgram(i::Scanner* scanner,
+  static PreParseResult PreParseProgram(i::JavaScriptScanner* scanner,
                                         i::ParserRecorder* log,
-                                        int flags,
+                                        bool allow_lazy,
                                         uintptr_t stack_limit) {
-    bool allow_lazy = (flags & i::kAllowLazy) != 0;
-    bool allow_natives_syntax = (flags & i::kAllowNativesSyntax) != 0;
-    bool allow_modules = (flags & i::kAllowModules) != 0;
-    return PreParser(scanner, log, stack_limit, allow_lazy,
-                     allow_natives_syntax, allow_modules).PreParse();
+    return PreParser(scanner, log, stack_limit, allow_lazy).PreParse();
   }
 
-  // Parses a single function literal, from the opening parentheses before
-  // parameters to the closing brace after the body.
-  // Returns a FunctionEntry describing the body of the funciton in enough
-  // detail that it can be lazily compiled.
-  // The scanner is expected to have matched the "function" keyword and
-  // parameters, and have consumed the initial '{'.
-  // At return, unless an error occured, the scanner is positioned before the
-  // the final '}'.
-  PreParseResult PreParseLazyFunction(i::LanguageMode mode,
-                                      i::ParserRecorder* log);
-
  private:
   // Used to detect duplicates in object literals. Each of the values
   // kGetterProperty, kSetterProperty and kValueProperty represents
@@ -215,12 +179,6 @@
     kForStatement
   };
 
-  // If a list of variable declarations includes any initializers.
-  enum VariableDeclarationProperties {
-    kHasInitializers,
-    kHasNoInitializers
-  };
-
   class Expression;
 
   class Identifier {
@@ -450,8 +408,7 @@
           materialized_literal_count_(0),
           expected_properties_(0),
           with_nesting_count_(0),
-          language_mode_(
-              (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE) {
+          strict_((prev_ != NULL) && prev_->is_strict()) {
       *variable = this;
     }
     ~Scope() { *variable_ = prev_; }
@@ -461,15 +418,8 @@
     int expected_properties() { return expected_properties_; }
     int materialized_literal_count() { return materialized_literal_count_; }
     bool IsInsideWith() { return with_nesting_count_ != 0; }
-    bool is_classic_mode() {
-      return language_mode_ == i::CLASSIC_MODE;
-    }
-    i::LanguageMode language_mode() {
-      return language_mode_;
-    }
-    void set_language_mode(i::LanguageMode language_mode) {
-      language_mode_ = language_mode;
-    }
+    bool is_strict() { return strict_; }
+    void set_strict() { strict_ = true; }
     void EnterWith() { with_nesting_count_++; }
     void LeaveWith() { with_nesting_count_--; }
 
@@ -480,9 +430,25 @@
     int materialized_literal_count_;
     int expected_properties_;
     int with_nesting_count_;
-    i::LanguageMode language_mode_;
+    bool strict_;
   };
 
+  // Private constructor only used in PreParseProgram.
+  PreParser(i::JavaScriptScanner* scanner,
+            i::ParserRecorder* log,
+            uintptr_t stack_limit,
+            bool allow_lazy)
+      : scanner_(scanner),
+        log_(log),
+        scope_(NULL),
+        stack_limit_(stack_limit),
+        strict_mode_violation_location_(i::Scanner::Location::invalid()),
+        strict_mode_violation_type_(NULL),
+        stack_overflow_(false),
+        allow_lazy_(true),
+        parenthesized_function_(false),
+        harmony_block_scoping_(scanner->HarmonyBlockScoping()) { }
+
   // Preparse the program. Only called in PreParseProgram after creating
   // the instance.
   PreParseResult PreParse() {
@@ -493,7 +459,7 @@
     if (stack_overflow_) return kPreParseStackOverflow;
     if (!ok) {
       ReportUnexpectedToken(scanner_->current_token());
-    } else if (!scope_->is_classic_mode()) {
+    } else if (scope_->is_strict()) {
       CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
     }
     return kPreParseSuccess;
@@ -527,7 +493,6 @@
   Statement ParseVariableStatement(VariableDeclarationContext var_context,
                                    bool* ok);
   Statement ParseVariableDeclarations(VariableDeclarationContext var_context,
-                                      VariableDeclarationProperties* decl_props,
                                       int* num_decl,
                                       bool* ok);
   Statement ParseExpressionOrLabelledStatement(bool* ok);
@@ -562,7 +527,6 @@
 
   Arguments ParseArguments(bool* ok);
   Expression ParseFunctionLiteral(bool* ok);
-  void ParseLazyFunctionLiteralBody(bool* ok);
 
   Identifier ParseIdentifier(bool* ok);
   Identifier ParseIdentifierName(bool* ok);
@@ -598,19 +562,11 @@
 
   bool peek_any_identifier();
 
-  void set_language_mode(i::LanguageMode language_mode) {
-    scope_->set_language_mode(language_mode);
+  void set_strict_mode() {
+    scope_->set_strict();
   }
 
-  bool is_classic_mode() {
-    return scope_->language_mode() == i::CLASSIC_MODE;
-  }
-
-  bool is_extended_mode() {
-    return scope_->language_mode() == i::EXTENDED_MODE;
-  }
-
-  i::LanguageMode language_mode() { return scope_->language_mode(); }
+  bool strict_mode() { return scope_->is_strict(); }
 
   void Consume(i::Token::Value token) { Next(); }
 
@@ -634,7 +590,7 @@
 
   void SetStrictModeViolation(i::Scanner::Location,
                               const char* type,
-                              bool* ok);
+                              bool *ok);
 
   void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok);
 
@@ -643,7 +599,7 @@
                                      Identifier identifier,
                                      bool* ok);
 
-  i::Scanner* scanner_;
+  i::JavaScriptScanner* scanner_;
   i::ParserRecorder* log_;
   Scope* scope_;
   uintptr_t stack_limit_;
@@ -651,10 +607,8 @@
   const char* strict_mode_violation_type_;
   bool stack_overflow_;
   bool allow_lazy_;
-  bool allow_modules_;
-  bool allow_natives_syntax_;
   bool parenthesized_function_;
-  bool harmony_scoping_;
+  bool harmony_block_scoping_;
 };
 } }  // v8::preparser
 
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 0d8dadc..663af28 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -58,70 +58,17 @@
 }
 
 
-void PrettyPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
+void PrettyPrinter::VisitDeclaration(Declaration* node) {
   Print("var ");
   PrintLiteral(node->proxy()->name(), false);
+  if (node->fun() != NULL) {
+    Print(" = ");
+    PrintFunctionLiteral(node->fun());
+  }
   Print(";");
 }
 
 
-void PrettyPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
-  Print("function ");
-  PrintLiteral(node->proxy()->name(), false);
-  Print(" = ");
-  PrintFunctionLiteral(node->fun());
-  Print(";");
-}
-
-
-void PrettyPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
-  Print("module ");
-  PrintLiteral(node->proxy()->name(), false);
-  Print(" = ");
-  Visit(node->module());
-  Print(";");
-}
-
-
-void PrettyPrinter::VisitImportDeclaration(ImportDeclaration* node) {
-  Print("import ");
-  PrintLiteral(node->proxy()->name(), false);
-  Print(" from ");
-  Visit(node->module());
-  Print(";");
-}
-
-
-void PrettyPrinter::VisitExportDeclaration(ExportDeclaration* node) {
-  Print("export ");
-  PrintLiteral(node->proxy()->name(), false);
-  Print(";");
-}
-
-
-void PrettyPrinter::VisitModuleLiteral(ModuleLiteral* node) {
-  VisitBlock(node->body());
-}
-
-
-void PrettyPrinter::VisitModuleVariable(ModuleVariable* node) {
-  Visit(node->proxy());
-}
-
-
-void PrettyPrinter::VisitModulePath(ModulePath* node) {
-  Visit(node->module());
-  Print(".");
-  PrintLiteral(node->name(), false);
-}
-
-
-void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) {
-  Print("at ");
-  PrintLiteral(node->url(), true);
-}
-
-
 void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
   Visit(node->expression());
   Print(";");
@@ -425,6 +372,13 @@
 }
 
 
+void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
+  Print("(");
+  Visit(node->expression());
+  Print("%s null)", Token::String(node->op()));
+}
+
+
 void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
   Print("<this-function>");
 }
@@ -500,7 +454,6 @@
 
 
 void PrettyPrinter::PrintStatements(ZoneList<Statement*>* statements) {
-  if (statements == NULL) return;
   for (int i = 0; i < statements->length(); i++) {
     if (i != 0) Print(" ");
     Visit(statements->at(i));
@@ -764,61 +717,20 @@
 }
 
 
-void AstPrinter::VisitVariableDeclaration(VariableDeclaration* node) {
-  PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
-                               node->proxy()->var(),
-                               node->proxy()->name());
-}
-
-
-void AstPrinter::VisitFunctionDeclaration(FunctionDeclaration* node) {
-  PrintIndented("FUNCTION ");
-  PrintLiteral(node->proxy()->name(), true);
-  Print(" = function ");
-  PrintLiteral(node->fun()->name(), false);
-  Print("\n");
-}
-
-
-void AstPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
-  IndentedScope indent(this, "MODULE");
-  PrintLiteralIndented("NAME", node->proxy()->name(), true);
-  Visit(node->module());
-}
-
-
-void AstPrinter::VisitImportDeclaration(ImportDeclaration* node) {
-  IndentedScope indent(this, "IMPORT");
-  PrintLiteralIndented("NAME", node->proxy()->name(), true);
-  Visit(node->module());
-}
-
-
-void AstPrinter::VisitExportDeclaration(ExportDeclaration* node) {
-  IndentedScope indent(this, "EXPORT ");
-  PrintLiteral(node->proxy()->name(), true);
-}
-
-
-void AstPrinter::VisitModuleLiteral(ModuleLiteral* node) {
-  VisitBlock(node->body());
-}
-
-
-void AstPrinter::VisitModuleVariable(ModuleVariable* node) {
-  Visit(node->proxy());
-}
-
-
-void AstPrinter::VisitModulePath(ModulePath* node) {
-  IndentedScope indent(this, "PATH");
-  PrintIndentedVisit("MODULE", node->module());
-  PrintLiteralIndented("NAME", node->name(), false);
-}
-
-
-void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
-  PrintLiteralIndented("URL", node->url(), true);
+void AstPrinter::VisitDeclaration(Declaration* node) {
+  if (node->fun() == NULL) {
+    // var or const declarations
+    PrintLiteralWithModeIndented(Variable::Mode2String(node->mode()),
+                                 node->proxy()->var(),
+                                 node->proxy()->name());
+  } else {
+    // function declarations
+    PrintIndented("FUNCTION ");
+    PrintLiteral(node->proxy()->name(), true);
+    Print(" = function ");
+    PrintLiteral(node->fun()->name(), false);
+    Print("\n");
+  }
 }
 
 
@@ -1108,10 +1020,416 @@
 }
 
 
+void AstPrinter::VisitCompareToNull(CompareToNull* node) {
+  const char* name = node->is_strict()
+      ? "COMPARE-TO-NULL-STRICT"
+      : "COMPARE-TO-NULL";
+  IndentedScope indent(this, name, node);
+  Visit(node->expression());
+}
+
+
 void AstPrinter::VisitThisFunction(ThisFunction* node) {
   IndentedScope indent(this, "THIS-FUNCTION");
 }
 
+
+TagScope::TagScope(JsonAstBuilder* builder, const char* name)
+    : builder_(builder), next_(builder->tag()), has_body_(false) {
+  if (next_ != NULL) {
+    next_->use();
+    builder->Print(",\n");
+  }
+  builder->set_tag(this);
+  builder->PrintIndented("[");
+  builder->Print("\"%s\"", name);
+  builder->increase_indent(JsonAstBuilder::kTagIndentSize);
+}
+
+
+TagScope::~TagScope() {
+  builder_->decrease_indent(JsonAstBuilder::kTagIndentSize);
+  if (has_body_) {
+    builder_->Print("\n");
+    builder_->PrintIndented("]");
+  } else {
+    builder_->Print("]");
+  }
+  builder_->set_tag(next_);
+}
+
+
+AttributesScope::AttributesScope(JsonAstBuilder* builder)
+    : builder_(builder), attribute_count_(0) {
+  builder->set_attributes(this);
+  builder->tag()->use();
+  builder->Print(",\n");
+  builder->PrintIndented("{");
+  builder->increase_indent(JsonAstBuilder::kAttributesIndentSize);
+}
+
+
+AttributesScope::~AttributesScope() {
+  builder_->decrease_indent(JsonAstBuilder::kAttributesIndentSize);
+  if (attribute_count_ > 1) {
+    builder_->Print("\n");
+    builder_->PrintIndented("}");
+  } else {
+    builder_->Print("}");
+  }
+  builder_->set_attributes(NULL);
+}
+
+
+const char* JsonAstBuilder::BuildProgram(FunctionLiteral* program) {
+  Init();
+  Visit(program);
+  Print("\n");
+  return Output();
+}
+
+
+void JsonAstBuilder::AddAttributePrefix(const char* name) {
+  if (attributes()->is_used()) {
+    Print(",\n");
+    PrintIndented("\"");
+  } else {
+    Print("\"");
+  }
+  Print("%s\":", name);
+  attributes()->use();
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, Handle<String> value) {
+  SmartArrayPointer<char> value_string = value->ToCString();
+  AddAttributePrefix(name);
+  Print("\"%s\"", *value_string);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, const char* value) {
+  AddAttributePrefix(name);
+  Print("\"%s\"", value);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, int value) {
+  AddAttributePrefix(name);
+  Print("%d", value);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, bool value) {
+  AddAttributePrefix(name);
+  Print(value ? "true" : "false");
+}
+
+
+void JsonAstBuilder::VisitBlock(Block* stmt) {
+  TagScope tag(this, "Block");
+  VisitStatements(stmt->statements());
+}
+
+
+void JsonAstBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  TagScope tag(this, "ExpressionStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  TagScope tag(this, "EmptyStatement");
+}
+
+
+void JsonAstBuilder::VisitIfStatement(IfStatement* stmt) {
+  TagScope tag(this, "IfStatement");
+  Visit(stmt->condition());
+  Visit(stmt->then_statement());
+  Visit(stmt->else_statement());
+}
+
+
+void JsonAstBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  TagScope tag(this, "ContinueStatement");
+}
+
+
+void JsonAstBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  TagScope tag(this, "BreakStatement");
+}
+
+
+void JsonAstBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  TagScope tag(this, "ReturnStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitWithStatement(WithStatement* stmt) {
+  TagScope tag(this, "WithStatement");
+  Visit(stmt->expression());
+  Visit(stmt->statement());
+}
+
+
+void JsonAstBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  TagScope tag(this, "SwitchStatement");
+}
+
+
+void JsonAstBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  TagScope tag(this, "DoWhileStatement");
+  Visit(stmt->body());
+  Visit(stmt->cond());
+}
+
+
+void JsonAstBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  TagScope tag(this, "WhileStatement");
+  Visit(stmt->cond());
+  Visit(stmt->body());
+}
+
+
+void JsonAstBuilder::VisitForStatement(ForStatement* stmt) {
+  TagScope tag(this, "ForStatement");
+  if (stmt->init() != NULL) Visit(stmt->init());
+  if (stmt->cond() != NULL) Visit(stmt->cond());
+  Visit(stmt->body());
+  if (stmt->next() != NULL) Visit(stmt->next());
+}
+
+
+void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
+  TagScope tag(this, "ForInStatement");
+  Visit(stmt->each());
+  Visit(stmt->enumerable());
+  Visit(stmt->body());
+}
+
+
+void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  TagScope tag(this, "TryCatchStatement");
+  { AttributesScope attributes(this);
+    AddAttribute("variable", stmt->variable()->name());
+  }
+  Visit(stmt->try_block());
+  Visit(stmt->catch_block());
+}
+
+
+void JsonAstBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  TagScope tag(this, "TryFinallyStatement");
+  Visit(stmt->try_block());
+  Visit(stmt->finally_block());
+}
+
+
+void JsonAstBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  TagScope tag(this, "DebuggerStatement");
+}
+
+
+void JsonAstBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  TagScope tag(this, "FunctionLiteral");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("name", expr->name());
+  }
+  VisitDeclarations(expr->scope()->declarations());
+  VisitStatements(expr->body());
+}
+
+
+void JsonAstBuilder::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  TagScope tag(this, "SharedFunctionInfoLiteral");
+}
+
+
+void JsonAstBuilder::VisitConditional(Conditional* expr) {
+  TagScope tag(this, "Conditional");
+}
+
+
+void JsonAstBuilder::VisitVariableProxy(VariableProxy* expr) {
+  TagScope tag(this, "Variable");
+  {
+    AttributesScope attributes(this);
+    Variable* var = expr->var();
+    AddAttribute("name", var->name());
+    switch (var->location()) {
+      case Variable::UNALLOCATED:
+        AddAttribute("location", "UNALLOCATED");
+        break;
+      case Variable::PARAMETER:
+        AddAttribute("location", "PARAMETER");
+        AddAttribute("index", var->index());
+        break;
+      case Variable::LOCAL:
+        AddAttribute("location", "LOCAL");
+        AddAttribute("index", var->index());
+        break;
+      case Variable::CONTEXT:
+        AddAttribute("location", "CONTEXT");
+        AddAttribute("index", var->index());
+        break;
+      case Variable::LOOKUP:
+        AddAttribute("location", "LOOKUP");
+        break;
+    }
+  }
+}
+
+
+void JsonAstBuilder::VisitLiteral(Literal* expr) {
+  TagScope tag(this, "Literal");
+  {
+    AttributesScope attributes(this);
+    Handle<Object> handle = expr->handle();
+    if (handle->IsString()) {
+      AddAttribute("handle", Handle<String>(String::cast(*handle)));
+    } else if (handle->IsSmi()) {
+      AddAttribute("handle", Smi::cast(*handle)->value());
+    }
+  }
+}
+
+
+void JsonAstBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  TagScope tag(this, "RegExpLiteral");
+}
+
+
+void JsonAstBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  TagScope tag(this, "ObjectLiteral");
+}
+
+
+void JsonAstBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  TagScope tag(this, "ArrayLiteral");
+}
+
+
+void JsonAstBuilder::VisitAssignment(Assignment* expr) {
+  TagScope tag(this, "Assignment");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->target());
+  Visit(expr->value());
+}
+
+
+void JsonAstBuilder::VisitThrow(Throw* expr) {
+  TagScope tag(this, "Throw");
+  Visit(expr->exception());
+}
+
+
+void JsonAstBuilder::VisitProperty(Property* expr) {
+  TagScope tag(this, "Property");
+  Visit(expr->obj());
+  Visit(expr->key());
+}
+
+
+void JsonAstBuilder::VisitCall(Call* expr) {
+  TagScope tag(this, "Call");
+  Visit(expr->expression());
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitCallNew(CallNew* expr) {
+  TagScope tag(this, "CallNew");
+  Visit(expr->expression());
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitCallRuntime(CallRuntime* expr) {
+  TagScope tag(this, "CallRuntime");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("name", expr->name());
+  }
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  TagScope tag(this, "UnaryOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->expression());
+}
+
+
+void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
+  TagScope tag(this, "CountOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("is_prefix", expr->is_prefix());
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->expression());
+}
+
+
+void JsonAstBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  TagScope tag(this, "BinaryOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+
+void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
+  TagScope tag(this, "CompareOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+
+void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
+  TagScope tag(this, "CompareToNull");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("is_strict", expr->is_strict());
+  }
+  Visit(expr->expression());
+}
+
+
+void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
+  TagScope tag(this, "ThisFunction");
+}
+
+
+void JsonAstBuilder::VisitDeclaration(Declaration* decl) {
+  TagScope tag(this, "Declaration");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("mode", Variable::Mode2String(decl->mode()));
+  }
+  Visit(decl->proxy());
+  if (decl->fun() != NULL) Visit(decl->fun());
+}
+
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 9ac7257..a26c48e 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -112,6 +112,107 @@
   int indent_;
 };
 
+
+// Forward declaration of helper classes.
+class TagScope;
+class AttributesScope;
+
+// Build a C string containing a JSON representation of a function's
+// AST. The representation is based on JsonML (www.jsonml.org).
+class JsonAstBuilder: public PrettyPrinter {
+ public:
+  JsonAstBuilder()
+      : indent_(0), top_tag_scope_(NULL), attributes_scope_(NULL) {
+  }
+  virtual ~JsonAstBuilder() {}
+
+  // Controls the indentation of subsequent lines of a tag body after
+  // the first line.
+  static const int kTagIndentSize = 2;
+
+  // Controls the indentation of subsequent lines of an attributes
+  // blocks's body after the first line.
+  static const int kAttributesIndentSize = 1;
+
+  // Construct a JSON representation of a function literal.
+  const char* BuildProgram(FunctionLiteral* program);
+
+  // Print text indented by the current indentation level.
+  void PrintIndented(const char* text) { Print("%*s%s", indent_, "", text); }
+
+  // Change the indentation level.
+  void increase_indent(int amount) { indent_ += amount; }
+  void decrease_indent(int amount) { indent_ -= amount; }
+
+  // The builder maintains a stack of opened AST node constructors.
+  // Each node constructor corresponds to a JsonML tag.
+  TagScope* tag() { return top_tag_scope_; }
+  void set_tag(TagScope* scope) { top_tag_scope_ = scope; }
+
+  // The builder maintains a pointer to the currently opened attributes
+  // of current AST node or NULL if the attributes are not opened.
+  AttributesScope* attributes() { return attributes_scope_; }
+  void set_attributes(AttributesScope* scope) { attributes_scope_ = scope; }
+
+  // Add an attribute to the currently opened attributes.
+  void AddAttribute(const char* name, Handle<String> value);
+  void AddAttribute(const char* name, const char* value);
+  void AddAttribute(const char* name, int value);
+  void AddAttribute(const char* name, bool value);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+  int indent_;
+  TagScope* top_tag_scope_;
+  AttributesScope* attributes_scope_;
+
+  // Utility function used by AddAttribute implementations.
+  void AddAttributePrefix(const char* name);
+};
+
+
+// The JSON AST builder keeps a stack of open element tags (AST node
+// constructors from the current iteration point to the root of the
+// AST).  TagScope is a helper class to manage the opening and closing
+// of tags, the indentation of their bodies, and comma separating their
+// contents.
+class TagScope BASE_EMBEDDED {
+ public:
+  TagScope(JsonAstBuilder* builder, const char* name);
+  ~TagScope();
+
+  void use() { has_body_ = true; }
+
+ private:
+  JsonAstBuilder* builder_;
+  TagScope* next_;
+  bool has_body_;
+};
+
+
+// AttributesScope is a helper class to manage the opening and closing
+// of attribute blocks, the indentation of their bodies, and comma
+// separating their contents. JsonAstBuilder::AddAttribute adds an
+// attribute to the currently open AttributesScope. They cannot be
+// nested so the builder keeps an optional single scope rather than a
+// stack.
+class AttributesScope BASE_EMBEDDED {
+ public:
+  explicit AttributesScope(JsonAstBuilder* builder);
+  ~AttributesScope();
+
+  bool is_used() { return attribute_count_ > 0; }
+  void use() { ++attribute_count_; }
+
+ private:
+  JsonAstBuilder* builder_;
+  int attribute_count_;
+};
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 65369be..88d6e87 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -95,23 +95,12 @@
 }
 
 
-SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
-  return kGcRootsFirstSubrootId + delta * kObjectIdStep;
-}
-
-
-HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
-  return reinterpret_cast<HeapObject*>(
-      reinterpret_cast<char*>(kFirstGcSubrootObject) +
-      delta * HeapObjectsMap::kObjectIdStep);
-}
-
-
-int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
-  return static_cast<int>(
-      (reinterpret_cast<char*>(subroot) -
-       reinterpret_cast<char*>(kFirstGcSubrootObject)) /
-      HeapObjectsMap::kObjectIdStep);
+uint64_t HeapEntry::id() {
+  union {
+    Id stored_id;
+    uint64_t returned_id;
+  } id_adaptor = {id_};
+  return id_adaptor.returned_id;
 }
 
 } }  // namespace v8::internal
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 2d0984e..e319efb 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -152,12 +152,9 @@
 
 const char* StringsStorage::GetName(String* name) {
   if (name->IsString()) {
-    int length = Min(kMaxNameSize, name->length());
-    SmartArrayPointer<char> data =
-        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
-    uint32_t hash =
-        HashSequentialString(*data, length, name->GetHeap()->HashSeed());
-    return AddOrDisposeString(data.Detach(), hash);
+    return AddOrDisposeString(
+        name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).Detach(),
+        name->Hash());
   }
   return "";
 }
@@ -496,6 +493,8 @@
 
 CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
+const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
+    CodeMap::CodeEntryInfo(NULL, 0);
 
 
 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
@@ -904,7 +903,7 @@
       entry++;
     }
 
-    for (const Address* stack_pos = sample.stack,
+    for (const Address *stack_pos = sample.stack,
            *stack_end = stack_pos + sample.frames_count;
          stack_pos != stack_end;
          ++stack_pos) {
@@ -944,7 +943,7 @@
 
 
 void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
-  ASSERT(type == kElement || type == kHidden || type == kWeak);
+  ASSERT(type == kElement || type == kHidden);
   child_index_ = child_index;
   type_ = type;
   index_ = index;
@@ -965,20 +964,25 @@
 void HeapEntry::Init(HeapSnapshot* snapshot,
                      Type type,
                      const char* name,
-                     SnapshotObjectId id,
+                     uint64_t id,
                      int self_size,
                      int children_count,
                      int retainers_count) {
   snapshot_ = snapshot;
   type_ = type;
-  painted_ = false;
+  painted_ = kUnpainted;
   name_ = name;
   self_size_ = self_size;
   retained_size_ = 0;
   children_count_ = children_count;
   retainers_count_ = retainers_count;
   dominator_ = NULL;
-  id_ = id;
+
+  union {
+    uint64_t set_id;
+    Id stored_id;
+  } id_adaptor = {id};
+  id_ = id_adaptor.stored_id;
 }
 
 
@@ -987,8 +991,8 @@
                                   const char* name,
                                   HeapEntry* entry,
                                   int retainer_index) {
-  children()[child_index].Init(child_index, type, name, entry);
-  entry->retainers()[retainer_index] = children_arr() + child_index;
+  children_arr()[child_index].Init(child_index, type, name, entry);
+  entry->retainers_arr()[retainer_index] = children_arr() + child_index;
 }
 
 
@@ -997,14 +1001,22 @@
                                     int index,
                                     HeapEntry* entry,
                                     int retainer_index) {
-  children()[child_index].Init(child_index, type, index, entry);
-  entry->retainers()[retainer_index] = children_arr() + child_index;
+  children_arr()[child_index].Init(child_index, type, index, entry);
+  entry->retainers_arr()[retainer_index] = children_arr() + child_index;
 }
 
 
 void HeapEntry::SetUnidirElementReference(
     int child_index, int index, HeapEntry* entry) {
-  children()[child_index].Init(child_index, index, entry);
+  children_arr()[child_index].Init(child_index, index, entry);
+}
+
+
+int HeapEntry::RetainedSize(bool exact) {
+  if (exact && (retained_size_ & kExactRetainedSizeTag) == 0) {
+    CalculateExactRetainedSize();
+  }
+  return retained_size_ & (~kExactRetainedSizeTag);
 }
 
 
@@ -1013,11 +1025,41 @@
 }
 
 
-void HeapEntry::Print(
-    const char* prefix, const char* edge_name, int max_depth, int indent) {
-  OS::Print("%6d %7d @%6llu %*c %s%s: ",
-            self_size(), retained_size(), id(),
-            indent, ' ', prefix, edge_name);
+template<class Visitor>
+void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
+  List<HeapEntry*> list(10);
+  list.Add(this);
+  this->paint_reachable();
+  visitor->Apply(this);
+  while (!list.is_empty()) {
+    HeapEntry* entry = list.RemoveLast();
+    Vector<HeapGraphEdge> children = entry->children();
+    for (int i = 0; i < children.length(); ++i) {
+      if (children[i].type() == HeapGraphEdge::kShortcut) continue;
+      HeapEntry* child = children[i].to();
+      if (!child->painted_reachable()) {
+        list.Add(child);
+        child->paint_reachable();
+        visitor->Apply(child);
+      }
+    }
+  }
+}
+
+
+class NullClass {
+ public:
+  void Apply(HeapEntry* entry) { }
+};
+
+void HeapEntry::PaintAllReachable() {
+  NullClass null;
+  ApplyAndPaintAllReachable(&null);
+}
+
+
+void HeapEntry::Print(int max_depth, int indent) {
+  OS::Print("%6d %6d [%llu] ", self_size(), RetainedSize(false), id());
   if (type() != kString) {
     OS::Print("%s %.40s\n", TypeAsString(), name_);
   } else {
@@ -1036,40 +1078,29 @@
   Vector<HeapGraphEdge> ch = children();
   for (int i = 0; i < ch.length(); ++i) {
     HeapGraphEdge& edge = ch[i];
-    const char* edge_prefix = "";
-    EmbeddedVector<char, 64> index;
-    const char* edge_name = index.start();
     switch (edge.type()) {
       case HeapGraphEdge::kContextVariable:
-        edge_prefix = "#";
-        edge_name = edge.name();
+        OS::Print("  %*c #%s: ", indent, ' ', edge.name());
         break;
       case HeapGraphEdge::kElement:
-        OS::SNPrintF(index, "%d", edge.index());
+        OS::Print("  %*c %d: ", indent, ' ', edge.index());
         break;
       case HeapGraphEdge::kInternal:
-        edge_prefix = "$";
-        edge_name = edge.name();
+        OS::Print("  %*c $%s: ", indent, ' ', edge.name());
         break;
       case HeapGraphEdge::kProperty:
-        edge_name = edge.name();
+        OS::Print("  %*c %s: ", indent, ' ', edge.name());
         break;
       case HeapGraphEdge::kHidden:
-        edge_prefix = "$";
-        OS::SNPrintF(index, "%d", edge.index());
+        OS::Print("  %*c $%d: ", indent, ' ', edge.index());
         break;
       case HeapGraphEdge::kShortcut:
-        edge_prefix = "^";
-        edge_name = edge.name();
-        break;
-      case HeapGraphEdge::kWeak:
-        edge_prefix = "w";
-        OS::SNPrintF(index, "%d", edge.index());
+        OS::Print("  %*c ^%s: ", indent, ' ', edge.name());
         break;
       default:
-        OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
+        OS::Print("!!! unknown edge type: %d ", edge.type());
     }
-    edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
+    edge.to()->Print(max_depth, indent + 2);
   }
 }
 
@@ -1085,21 +1116,73 @@
     case kRegExp: return "/regexp/";
     case kHeapNumber: return "/number/";
     case kNative: return "/native/";
-    case kSynthetic: return "/synthetic/";
     default: return "???";
   }
 }
 
 
-size_t HeapEntry::EntriesSize(int entries_count,
-                              int children_count,
-                              int retainers_count) {
+int HeapEntry::EntriesSize(int entries_count,
+                           int children_count,
+                           int retainers_count) {
   return sizeof(HeapEntry) * entries_count         // NOLINT
       + sizeof(HeapGraphEdge) * children_count     // NOLINT
       + sizeof(HeapGraphEdge*) * retainers_count;  // NOLINT
 }
 
 
+class RetainedSizeCalculator {
+ public:
+  RetainedSizeCalculator()
+      : retained_size_(0) {
+  }
+
+  int retained_size() const { return retained_size_; }
+
+  void Apply(HeapEntry** entry_ptr) {
+    if ((*entry_ptr)->painted_reachable()) {
+      retained_size_ += (*entry_ptr)->self_size();
+    }
+  }
+
+ private:
+  int retained_size_;
+};
+
+void HeapEntry::CalculateExactRetainedSize() {
+  // To calculate retained size, first we paint all reachable nodes in
+  // one color, then we paint (or re-paint) all nodes reachable from
+  // other nodes with a different color. Then we sum up self sizes of
+  // nodes painted with the first color.
+  snapshot()->ClearPaint();
+  PaintAllReachable();
+
+  List<HeapEntry*> list(10);
+  HeapEntry* root = snapshot()->root();
+  if (this != root) {
+    list.Add(root);
+    root->paint_reachable_from_others();
+  }
+  while (!list.is_empty()) {
+    HeapEntry* curr = list.RemoveLast();
+    Vector<HeapGraphEdge> children = curr->children();
+    for (int i = 0; i < children.length(); ++i) {
+      if (children[i].type() == HeapGraphEdge::kShortcut) continue;
+      HeapEntry* child = children[i].to();
+      if (child != this && child->not_painted_reachable_from_others()) {
+        list.Add(child);
+        child->paint_reachable_from_others();
+      }
+    }
+  }
+
+  RetainedSizeCalculator ret_size_calc;
+  snapshot()->IterateEntries(&ret_size_calc);
+  retained_size_ = ret_size_calc.retained_size();
+  ASSERT((retained_size_ & kExactRetainedSizeTag) == 0);
+  retained_size_ |= kExactRetainedSizeTag;
+}
+
+
 // It is very important to keep objects that form a heap snapshot
 // as small as possible.
 namespace {  // Avoid littering the global namespace.
@@ -1108,15 +1191,12 @@
 
 template <> struct SnapshotSizeConstants<4> {
   static const int kExpectedHeapGraphEdgeSize = 12;
-  static const int kExpectedHeapEntrySize = 32;
-  static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
+  static const int kExpectedHeapEntrySize = 36;
 };
 
 template <> struct SnapshotSizeConstants<8> {
   static const int kExpectedHeapGraphEdgeSize = 24;
   static const int kExpectedHeapEntrySize = 48;
-  static const uint64_t kMaxSerializableSnapshotRawSize =
-      static_cast<uint64_t>(6000) * MB;
 };
 
 }  // namespace
@@ -1134,18 +1214,14 @@
       natives_root_entry_(NULL),
       raw_entries_(NULL),
       entries_sorted_(false) {
-  STATIC_CHECK(
+  STATIC_ASSERT(
       sizeof(HeapGraphEdge) ==
       SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
-  STATIC_CHECK(
+  STATIC_ASSERT(
       sizeof(HeapEntry) ==
-      SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
-  for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
-    gc_subroot_entries_[i] = NULL;
-  }
+      SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize);  // NOLINT
 }
 
-
 HeapSnapshot::~HeapSnapshot() {
   DeleteArray(raw_entries_);
 }
@@ -1171,7 +1247,6 @@
   (*entry_ptr)->clear_paint();
 }
 
-
 void HeapSnapshot::ClearPaint() {
   entries_.Iterate(HeapEntryClearPaint);
 }
@@ -1200,15 +1275,13 @@
 }
 
 
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
-                                           int children_count,
-                                           int retainers_count) {
-  ASSERT(gc_subroot_entries_[tag] == NULL);
-  ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
-  return (gc_subroot_entries_[tag] = AddEntry(
+HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
+                                                 int retainers_count) {
+  ASSERT(natives_root_entry_ == NULL);
+  return (natives_root_entry_ = AddEntry(
       HeapEntry::kObject,
-      VisitorSynchronization::kTagNames[tag],
-      HeapObjectsMap::GetNthGcSubrootId(tag),
+      "(Native objects)",
+      HeapObjectsMap::kNativesRootObjectId,
       0,
       children_count,
       retainers_count));
@@ -1217,7 +1290,7 @@
 
 HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
                                   const char* name,
-                                  SnapshotObjectId id,
+                                  uint64_t id,
                                   int size,
                                   int children_count,
                                   int retainers_count) {
@@ -1249,7 +1322,7 @@
 }
 
 
-HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
+HeapEntry* HeapSnapshot::GetEntryById(uint64_t id) {
   List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
 
   // Perform a binary search by id.
@@ -1258,7 +1331,7 @@
   while (low <= high) {
     int mid =
         (static_cast<unsigned int>(low) + static_cast<unsigned int>(high)) >> 1;
-    SnapshotObjectId mid_id = entries_by_id->at(mid)->id();
+    uint64_t mid_id = entries_by_id->at(mid)->id();
     if (mid_id > id)
       high = mid - 1;
     else if (mid_id < id)
@@ -1277,7 +1350,6 @@
   return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
 }
 
-
 List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
   if (!entries_sorted_) {
     entries_.Sort(SortByIds);
@@ -1288,20 +1360,17 @@
 
 
 void HeapSnapshot::Print(int max_depth) {
-  root()->Print("", "", max_depth, 0);
+  root()->Print(max_depth, 0);
 }
 
 
 // We split IDs on evens for embedder objects (see
 // HeapObjectsMap::GenerateId) and odds for native objects.
-const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
-const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId =
-    HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
-    HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
-    HeapObjectsMap::kGcRootsFirstSubrootId +
-    VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
+const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
+const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
+// Increase kFirstAvailableObjectId if new 'special' objects appear.
+const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
 
 HeapObjectsMap::HeapObjectsMap()
     : initial_fill_mode_(true),
@@ -1321,13 +1390,13 @@
 }
 
 
-SnapshotObjectId HeapObjectsMap::FindObject(Address addr) {
+uint64_t HeapObjectsMap::FindObject(Address addr) {
   if (!initial_fill_mode_) {
-    SnapshotObjectId existing = FindEntry(addr);
+    uint64_t existing = FindEntry(addr);
     if (existing != 0) return existing;
   }
-  SnapshotObjectId id = next_id_;
-  next_id_ += kObjectIdStep;
+  uint64_t id = next_id_;
+  next_id_ += 2;
   AddEntry(addr, id);
   return id;
 }
@@ -1339,17 +1408,15 @@
   if (entry != NULL) {
     void* value = entry->value;
     entries_map_.Remove(from, AddressHash(from));
-    if (to != NULL) {
-      entry = entries_map_.Lookup(to, AddressHash(to), true);
-      // We can have an entry at the new location, it is OK, as GC can overwrite
-      // dead objects with alive objects being moved.
-      entry->value = value;
-    }
+    entry = entries_map_.Lookup(to, AddressHash(to), true);
+    // We can have an entry at the new location, it is OK, as GC can overwrite
+    // dead objects with alive objects being moved.
+    entry->value = value;
   }
 }
 
 
-void HeapObjectsMap::AddEntry(Address addr, SnapshotObjectId id) {
+void HeapObjectsMap::AddEntry(Address addr, uint64_t id) {
   HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
   ASSERT(entry->value == NULL);
   entry->value = reinterpret_cast<void*>(entries_->length());
@@ -1357,7 +1424,7 @@
 }
 
 
-SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
+uint64_t HeapObjectsMap::FindEntry(Address addr) {
   HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
   if (entry != NULL) {
     int entry_index =
@@ -1397,8 +1464,8 @@
 }
 
 
-SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
-  SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
+uint64_t HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
+  uint64_t id = static_cast<uint64_t>(info->GetHash());
   const char* label = info->GetLabel();
   id ^= HashSequentialString(label,
                              static_cast<int>(strlen(label)),
@@ -1468,11 +1535,7 @@
 }
 
 
-Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
-    SnapshotObjectId id) {
-  // First perform a full GC in order to avoid dead objects.
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                          "HeapSnapshotsCollection::FindHeapObjectById");
+Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
   AssertNoAllocation no_allocation;
   HeapObject* object = NULL;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
@@ -1490,7 +1553,7 @@
 }
 
 
-HeapEntry* const HeapEntriesMap::kHeapEntryPlaceholder =
+HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
     reinterpret_cast<HeapEntry*>(1);
 
 HeapEntriesMap::HeapEntriesMap()
@@ -1619,18 +1682,12 @@
 }
 
 
-HeapObject* const V8HeapExplorer::kInternalRootObject =
+HeapObject *const V8HeapExplorer::kInternalRootObject =
     reinterpret_cast<HeapObject*>(
         static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject* const V8HeapExplorer::kGcRootsObject =
+HeapObject *const V8HeapExplorer::kGcRootsObject =
     reinterpret_cast<HeapObject*>(
         static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
-HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
 
 
 V8HeapExplorer::V8HeapExplorer(
@@ -1663,19 +1720,24 @@
     return snapshot_->AddRootEntry(children_count);
   } else if (object == kGcRootsObject) {
     return snapshot_->AddGcRootsEntry(children_count, retainers_count);
-  } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
-    return snapshot_->AddGcSubrootEntry(
-        GetGcSubrootOrder(object),
-        children_count,
-        retainers_count);
+  } else if (object->IsJSGlobalObject()) {
+    const char* tag = objects_tags_.GetTag(object);
+    const char* name = collection_->names()->GetName(
+        GetConstructorName(JSObject::cast(object)));
+    if (tag != NULL) {
+      name = collection_->names()->GetFormatted("%s / %s", name, tag);
+    }
+    return AddEntry(object,
+                    HeapEntry::kObject,
+                    name,
+                    children_count,
+                    retainers_count);
   } else if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
-    const char* name = shared->bound() ? "native_bind" :
-        collection_->names()->GetName(String::cast(shared->name()));
     return AddEntry(object,
                     HeapEntry::kClosure,
-                    name,
+                    collection_->names()->GetName(String::cast(shared->name())),
                     children_count,
                     retainers_count);
   } else if (object->IsJSRegExp()) {
@@ -1688,7 +1750,8 @@
   } else if (object->IsJSObject()) {
     return AddEntry(object,
                     HeapEntry::kObject,
-                    "",
+                    collection_->names()->GetName(
+                        GetConstructorName(JSObject::cast(object))),
                     children_count,
                     retainers_count);
   } else if (object->IsString()) {
@@ -1720,18 +1783,6 @@
                         : "",
                     children_count,
                     retainers_count);
-  } else if (object->IsGlobalContext()) {
-    return AddEntry(object,
-                    HeapEntry::kHidden,
-                    "system / GlobalContext",
-                    children_count,
-                    retainers_count);
-  } else if (object->IsContext()) {
-    return AddEntry(object,
-                    HeapEntry::kHidden,
-                    "system / Context",
-                    children_count,
-                    retainers_count);
   } else if (object->IsFixedArray() ||
              object->IsFixedDoubleArray() ||
              object->IsByteArray() ||
@@ -1771,38 +1822,9 @@
 }
 
 
-class GcSubrootsEnumerator : public ObjectVisitor {
- public:
-  GcSubrootsEnumerator(
-      SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
-      : filler_(filler),
-        explorer_(explorer),
-        previous_object_count_(0),
-        object_count_(0) {
-  }
-  void VisitPointers(Object** start, Object** end) {
-    object_count_ += end - start;
-  }
-  void Synchronize(VisitorSynchronization::SyncTag tag) {
-    // Skip empty subroots.
-    if (previous_object_count_ != object_count_) {
-      previous_object_count_ = object_count_;
-      filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
-    }
-  }
- private:
-  SnapshotFillerInterface* filler_;
-  V8HeapExplorer* explorer_;
-  intptr_t previous_object_count_;
-  intptr_t object_count_;
-};
-
-
 void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
   filler->AddEntry(kInternalRootObject, this);
   filler->AddEntry(kGcRootsObject, this);
-  GcSubrootsEnumerator enumerator(filler, this);
-  heap_->IterateRoots(&enumerator, VISIT_ALL);
 }
 
 
@@ -1821,13 +1843,12 @@
 }
 
 
-int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
+int V8HeapExplorer::EstimateObjectsCount() {
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
   int objects_count = 0;
-  for (HeapObject* obj = iterator->next();
+  for (HeapObject* obj = iterator.next();
        obj != NULL;
-       obj = iterator->next()) {
-    objects_count++;
-  }
+       obj = iterator.next(), ++objects_count) {}
   return objects_count;
 }
 
@@ -1900,7 +1921,6 @@
           SetPropertyReference(
               obj, entry,
               heap_->prototype_symbol(), proto_or_map,
-              NULL,
               JSFunction::kPrototypeOrInitialMapOffset);
         } else {
           SetPropertyReference(
@@ -1908,27 +1928,17 @@
               heap_->prototype_symbol(), js_fun->prototype());
         }
       }
-      SharedFunctionInfo* shared_info = js_fun->shared();
-      // JSFunction has either bindings or literals and never both.
-      bool bound = shared_info->bound();
-      TagObject(js_fun->literals_or_bindings(),
-                bound ? "(function bindings)" : "(function literals)");
       SetInternalReference(js_fun, entry,
-                           bound ? "bindings" : "literals",
-                           js_fun->literals_or_bindings(),
-                           JSFunction::kLiteralsOffset);
-      SetInternalReference(js_fun, entry,
-                           "shared", shared_info,
+                           "shared", js_fun->shared(),
                            JSFunction::kSharedFunctionInfoOffset);
       TagObject(js_fun->unchecked_context(), "(context)");
       SetInternalReference(js_fun, entry,
                            "context", js_fun->unchecked_context(),
                            JSFunction::kContextOffset);
-      for (int i = JSFunction::kNonWeakFieldsEndOffset;
-           i < JSFunction::kSize;
-           i += kPointerSize) {
-        SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
-      }
+      TagObject(js_fun->literals(), "(function literals)");
+      SetInternalReference(js_fun, entry,
+                           "literals", js_fun->literals(),
+                           JSFunction::kLiteralsOffset);
     }
     TagObject(js_obj->properties(), "(object properties)");
     SetInternalReference(obj, entry,
@@ -1944,10 +1954,6 @@
       SetInternalReference(obj, entry, 1, cs->first());
       SetInternalReference(obj, entry, 2, cs->second());
     }
-    if (obj->IsSlicedString()) {
-      SlicedString* ss = SlicedString::cast(obj);
-      SetInternalReference(obj, entry, "parent", ss->parent());
-    }
     extract_indexed_refs = false;
   } else if (obj->IsGlobalContext()) {
     Context* context = Context::cast(obj);
@@ -1955,14 +1961,8 @@
               "(context func. result caches)");
     TagObject(context->normalized_map_cache(), "(context norm. map cache)");
     TagObject(context->runtime_context(), "(runtime context)");
+    TagObject(context->map_cache(), "(context map cache)");
     TagObject(context->data(), "(context data)");
-    for (int i = Context::FIRST_WEAK_SLOT;
-         i < Context::GLOBAL_CONTEXT_SLOTS;
-         ++i) {
-      SetWeakReference(obj, entry,
-                       i, context->get(i),
-                       FixedArray::OffsetOfElementAt(i));
-    }
   } else if (obj->IsMap()) {
     Map* map = Map::cast(obj);
     SetInternalReference(obj, entry,
@@ -1976,14 +1976,6 @@
                            "descriptors", map->instance_descriptors(),
                            Map::kInstanceDescriptorsOrBitField3Offset);
     }
-    if (map->prototype_transitions() != heap_->empty_fixed_array()) {
-      TagObject(map->prototype_transitions(), "(prototype transitions)");
-      SetInternalReference(obj,
-                           entry,
-                           "prototype_transitions",
-                           map->prototype_transitions(),
-                           Map::kPrototypeTransitionsOffset);
-    }
     SetInternalReference(obj, entry,
                          "code_cache", map->code_cache(),
                          Map::kCodeCacheOffset);
@@ -2005,9 +1997,6 @@
     SetInternalReference(obj, entry,
                          "script", shared->script(),
                          SharedFunctionInfo::kScriptOffset);
-    SetWeakReference(obj, entry,
-                     1, shared->initial_map(),
-                     SharedFunctionInfo::kInitialMapOffset);
   } else if (obj->IsScript()) {
     Script* script = Script::cast(obj);
     SetInternalReference(obj, entry,
@@ -2026,6 +2015,17 @@
     SetInternalReference(obj, entry,
                          "line_ends", script->line_ends(),
                          Script::kLineEndsOffset);
+  } else if (obj->IsDescriptorArray()) {
+    DescriptorArray* desc_array = DescriptorArray::cast(obj);
+    if (desc_array->length() > DescriptorArray::kContentArrayIndex) {
+      Object* content_array =
+          desc_array->get(DescriptorArray::kContentArrayIndex);
+      TagObject(content_array, "(map descriptor content)");
+      SetInternalReference(obj, entry,
+                           "content", content_array,
+                           FixedArray::OffsetOfElementAt(
+                               DescriptorArray::kContentArrayIndex));
+    }
   } else if (obj->IsCodeCache()) {
     CodeCache* code_cache = CodeCache::cast(obj);
     TagObject(code_cache->default_cache(), "(default code cache)");
@@ -2051,44 +2051,21 @@
 
 void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
                                               HeapEntry* entry) {
-  if (!js_obj->IsJSFunction()) return;
-
-  JSFunction* func = JSFunction::cast(js_obj);
-  Context* context = func->context();
-  ScopeInfo* scope_info = context->closure()->shared()->scope_info();
-
-  if (func->shared()->bound()) {
-    FixedArray* bindings = func->function_bindings();
-    SetNativeBindReference(js_obj, entry, "bound_this",
-                           bindings->get(JSFunction::kBoundThisIndex));
-    SetNativeBindReference(js_obj, entry, "bound_function",
-                           bindings->get(JSFunction::kBoundFunctionIndex));
-    for (int i = JSFunction::kBoundArgumentsStartIndex;
-         i < bindings->length(); i++) {
-      const char* reference_name = collection_->names()->GetFormatted(
-          "bound_argument_%d",
-          i - JSFunction::kBoundArgumentsStartIndex);
-      SetNativeBindReference(js_obj, entry, reference_name,
-                             bindings->get(i));
-    }
-  } else {
-    // Add context allocated locals.
-    int context_locals = scope_info->ContextLocalCount();
-    for (int i = 0; i < context_locals; ++i) {
-      String* local_name = scope_info->ContextLocalName(i);
-      int idx = Context::MIN_CONTEXT_SLOTS + i;
-      SetClosureReference(js_obj, entry, local_name, context->get(idx));
-    }
-
-    // Add function variable.
-    if (scope_info->HasFunctionName()) {
-      String* name = scope_info->FunctionName();
-      int idx = Context::MIN_CONTEXT_SLOTS + context_locals;
-#ifdef DEBUG
-      VariableMode mode;
-      ASSERT(idx == scope_info->FunctionContextSlotIndex(name, &mode));
-#endif
-      SetClosureReference(js_obj, entry, name, context->get(idx));
+  if (js_obj->IsJSFunction()) {
+    HandleScope hs;
+    JSFunction* func = JSFunction::cast(js_obj);
+    Context* context = func->context();
+    ZoneScope zscope(Isolate::Current(), DELETE_ON_EXIT);
+    SerializedScopeInfo* serialized_scope_info =
+        context->closure()->shared()->scope_info();
+    ScopeInfo<ZoneListAllocationPolicy> zone_scope_info(serialized_scope_info);
+    int locals_number = zone_scope_info.NumberOfLocals();
+    for (int i = 0; i < locals_number; ++i) {
+      String* local_name = *zone_scope_info.LocalName(i);
+      int idx = serialized_scope_info->ContextSlotIndex(local_name, NULL);
+      if (idx >= 0 && idx < context->length()) {
+        SetClosureReference(js_obj, entry, local_name, context->get(idx));
+      }
     }
   }
 }
@@ -2106,7 +2083,6 @@
             SetPropertyReference(
                 js_obj, entry,
                 descs->GetKey(i), js_obj->InObjectPropertyAt(index),
-                NULL,
                 js_obj->GetInObjectPropertyOffset(index));
           } else {
             SetPropertyReference(
@@ -2120,29 +2096,7 @@
               js_obj, entry,
               descs->GetKey(i), descs->GetConstantFunction(i));
           break;
-        case CALLBACKS: {
-          Object* callback_obj = descs->GetValue(i);
-          if (callback_obj->IsAccessorPair()) {
-            AccessorPair* accessors = AccessorPair::cast(callback_obj);
-            if (Object* getter = accessors->getter()) {
-              SetPropertyReference(js_obj, entry, descs->GetKey(i),
-                                   getter, "get-%s");
-            }
-            if (Object* setter = accessors->setter()) {
-              SetPropertyReference(js_obj, entry, descs->GetKey(i),
-                                   setter, "set-%s");
-            }
-          }
-          break;
-        }
-        case NORMAL:  // only in slow mode
-        case HANDLER:  // only in lookup results, not in descriptors
-        case INTERCEPTOR:  // only in lookup results, not in descriptors
-        case MAP_TRANSITION:  // we do not care about transitions here...
-        case ELEMENTS_TRANSITION:
-        case CONSTANT_TRANSITION:
-        case NULL_DESCRIPTOR:  // ... and not about "holes"
-          break;
+        default: ;
       }
     }
   } else {
@@ -2207,16 +2161,15 @@
 
 
 String* V8HeapExplorer::GetConstructorName(JSObject* object) {
-  Heap* heap = object->GetHeap();
-  if (object->IsJSFunction()) return heap->closure_symbol();
+  if (object->IsJSFunction()) return HEAP->closure_symbol();
   String* constructor_name = object->constructor_name();
-  if (constructor_name == heap->Object_symbol()) {
+  if (constructor_name == HEAP->Object_symbol()) {
     // Look up an immediate "constructor" property, if it is a function,
     // return its name. This is for instances of binding objects, which
     // have prototype constructor type "Object".
     Object* constructor_prop = NULL;
-    LookupResult result(heap->isolate());
-    object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
+    LookupResult result;
+    object->LocalLookupRealNamedProperty(HEAP->constructor_symbol(), &result);
     if (result.IsProperty()) {
       constructor_prop = result.GetLazyValue();
     }
@@ -2239,76 +2192,23 @@
 
 
 class RootsReferencesExtractor : public ObjectVisitor {
- private:
-  struct IndexTag {
-    IndexTag(int index, VisitorSynchronization::SyncTag tag)
-        : index(index), tag(tag) { }
-    int index;
-    VisitorSynchronization::SyncTag tag;
-  };
-
  public:
-  RootsReferencesExtractor()
-      : collecting_all_references_(false),
-        previous_reference_count_(0) {
+  explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
+      : explorer_(explorer) {
   }
-
   void VisitPointers(Object** start, Object** end) {
-    if (collecting_all_references_) {
-      for (Object** p = start; p < end; p++) all_references_.Add(*p);
-    } else {
-      for (Object** p = start; p < end; p++) strong_references_.Add(*p);
-    }
+    for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
   }
-
-  void SetCollectingAllReferences() { collecting_all_references_ = true; }
-
-  void FillReferences(V8HeapExplorer* explorer) {
-    ASSERT(strong_references_.length() <= all_references_.length());
-    for (int i = 0; i < reference_tags_.length(); ++i) {
-      explorer->SetGcRootsReference(reference_tags_[i].tag);
-    }
-    int strong_index = 0, all_index = 0, tags_index = 0;
-    while (all_index < all_references_.length()) {
-      if (strong_index < strong_references_.length() &&
-          strong_references_[strong_index] == all_references_[all_index]) {
-        explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
-                                        false,
-                                        all_references_[all_index++]);
-        ++strong_index;
-      } else {
-        explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
-                                        true,
-                                        all_references_[all_index++]);
-      }
-      if (reference_tags_[tags_index].index == all_index) ++tags_index;
-    }
-  }
-
-  void Synchronize(VisitorSynchronization::SyncTag tag) {
-    if (collecting_all_references_ &&
-        previous_reference_count_ != all_references_.length()) {
-      previous_reference_count_ = all_references_.length();
-      reference_tags_.Add(IndexTag(previous_reference_count_, tag));
-    }
-  }
-
  private:
-  bool collecting_all_references_;
-  List<Object*> strong_references_;
-  List<Object*> all_references_;
-  int previous_reference_count_;
-  List<IndexTag> reference_tags_;
+  V8HeapExplorer* explorer_;
 };
 
 
 bool V8HeapExplorer::IterateAndExtractReferences(
     SnapshotFillerInterface* filler) {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
-
   filler_ = filler;
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
   bool interrupted = false;
-
   // Heap iteration with filtering must be finished in any case.
   for (HeapObject* obj = iterator.next();
        obj != NULL;
@@ -2323,42 +2223,13 @@
     return false;
   }
   SetRootGcRootsReference();
-  RootsReferencesExtractor extractor;
-  heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
-  extractor.SetCollectingAllReferences();
+  RootsReferencesExtractor extractor(this);
   heap_->IterateRoots(&extractor, VISIT_ALL);
-  extractor.FillReferences(this);
   filler_ = NULL;
   return progress_->ProgressReport(false);
 }
 
 
-bool V8HeapExplorer::IterateAndSetObjectNames(SnapshotFillerInterface* filler) {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
-  filler_ = filler;
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    SetObjectName(obj);
-  }
-  return true;
-}
-
-
-void V8HeapExplorer::SetObjectName(HeapObject* object) {
-  if (!object->IsJSObject() || object->IsJSRegExp() || object->IsJSFunction()) {
-    return;
-  }
-  const char* name = collection_->names()->GetName(
-      GetConstructorName(JSObject::cast(object)));
-  if (object->IsJSGlobalObject()) {
-    const char* tag = objects_tags_.GetTag(object);
-    if (tag != NULL) {
-      name = collection_->names()->GetFormatted("%s / %s", name, tag);
-    }
-  }
-  GetEntry(object)->set_name(name);
-}
-
-
 void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
                                          HeapEntry* parent_entry,
                                          String* reference_name,
@@ -2375,22 +2246,6 @@
 }
 
 
-void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
-                                            HeapEntry* parent_entry,
-                                            const char* reference_name,
-                                            Object* child_obj) {
-  HeapEntry* child_entry = GetEntry(child_obj);
-  if (child_entry != NULL) {
-    filler_->SetNamedReference(HeapGraphEdge::kShortcut,
-                               parent_obj,
-                               parent_entry,
-                               reference_name,
-                               child_obj,
-                               child_entry);
-  }
-}
-
-
 void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
                                          HeapEntry* parent_entry,
                                          int index,
@@ -2459,45 +2314,19 @@
 }
 
 
-void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
-                                      HeapEntry* parent_entry,
-                                      int index,
-                                      Object* child_obj,
-                                      int field_offset) {
-  HeapEntry* child_entry = GetEntry(child_obj);
-  if (child_entry != NULL) {
-    filler_->SetIndexedReference(HeapGraphEdge::kWeak,
-                                 parent_obj,
-                                 parent_entry,
-                                 index,
-                                 child_obj,
-                                 child_entry);
-    IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
-  }
-}
-
-
 void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
                                           HeapEntry* parent_entry,
                                           String* reference_name,
                                           Object* child_obj,
-                                          const char* name_format_string,
                                           int field_offset) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     HeapGraphEdge::Type type = reference_name->length() > 0 ?
         HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
-    const char* name = name_format_string  != NULL ?
-        collection_->names()->GetFormatted(
-            name_format_string,
-            *reference_name->ToCString(DISALLOW_NULLS,
-                                       ROBUST_STRING_TRAVERSAL)) :
-        collection_->names()->GetName(reference_name);
-
     filler_->SetNamedReference(type,
                                parent_obj,
                                parent_entry,
-                               name,
+                               collection_->names()->GetName(reference_name),
                                child_obj,
                                child_entry);
     IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
@@ -2539,21 +2368,12 @@
 }
 
 
-void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
-  filler_->SetIndexedAutoIndexReference(
-      HeapGraphEdge::kElement,
-      kGcRootsObject, snapshot_->gc_roots(),
-      GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
-}
-
-
-void V8HeapExplorer::SetGcSubrootReference(
-    VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
+void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedAutoIndexReference(
-        is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
-        GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
+        HeapGraphEdge::kElement,
+        kGcRootsObject, snapshot_->gc_roots(),
         child_obj, child_entry);
   }
 }
@@ -2564,6 +2384,7 @@
       !obj->IsOddball() &&
       obj != heap_->raw_unchecked_empty_byte_array() &&
       obj != heap_->raw_unchecked_empty_fixed_array() &&
+      obj != heap_->raw_unchecked_empty_fixed_double_array() &&
       obj != heap_->raw_unchecked_empty_descriptor_array()) {
     objects_tags_.SetTag(obj, tag);
   }
@@ -2596,7 +2417,6 @@
 
 // Modifies heap. Must not be run during heap traversal.
 void V8HeapExplorer::TagGlobalObjects() {
-  HandleScope scope;
   Isolate* isolate = Isolate::Current();
   GlobalObjectsEnumerator enumerator;
   isolate->global_handles()->IterateAllRoots(&enumerator);
@@ -2607,7 +2427,6 @@
   const char** urls = NewArray<const char*>(enumerator.count());
   for (int i = 0, l = enumerator.count(); i < l; ++i) {
     urls[i] = NULL;
-    HandleScope scope;
     Handle<JSGlobalObject> global_obj = enumerator.at(i);
     Object* obj_document;
     if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
@@ -2645,43 +2464,9 @@
   NativeObjectsExplorer* explorer_;
 };
 
-
-class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
- public:
-  BasicHeapEntriesAllocator(
-      HeapSnapshot* snapshot,
-      HeapEntry::Type entries_type)
-    : snapshot_(snapshot),
-      collection_(snapshot_->collection()),
-      entries_type_(entries_type) {
-  }
-  virtual HeapEntry* AllocateEntry(
-      HeapThing ptr, int children_count, int retainers_count);
- private:
-  HeapSnapshot* snapshot_;
-  HeapSnapshotsCollection* collection_;
-  HeapEntry::Type entries_type_;
-};
-
-
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(
-    HeapThing ptr, int children_count, int retainers_count) {
-  v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
-  intptr_t elements = info->GetElementCount();
-  intptr_t size = info->GetSizeInBytes();
-  return snapshot_->AddEntry(
-      entries_type_,
-      elements != -1 ?
-          collection_->names()->GetFormatted(
-              "%s / %" V8_PTR_PREFIX "d entries",
-              info->GetLabel(),
-              info->GetElementCount()) :
-          collection_->names()->GetCopy(info->GetLabel()),
-      HeapObjectsMap::GenerateId(info),
-      size != -1 ? static_cast<int>(size) : 0,
-      children_count,
-      retainers_count);
-}
+HeapThing const NativeObjectsExplorer::kNativesRootObject =
+    reinterpret_cast<HeapThing>(
+        static_cast<intptr_t>(HeapObjectsMap::kNativesRootObjectId));
 
 
 NativeObjectsExplorer::NativeObjectsExplorer(
@@ -2691,12 +2476,7 @@
       progress_(progress),
       embedder_queried_(false),
       objects_by_info_(RetainedInfosMatch),
-      native_groups_(StringsMatch),
       filler_(NULL) {
-  synthetic_entries_allocator_ =
-      new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
-  native_entries_allocator_ =
-      new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
 }
 
 
@@ -2711,15 +2491,37 @@
         reinterpret_cast<List<HeapObject*>* >(p->value);
     delete objects;
   }
-  for (HashMap::Entry* p = native_groups_.Start();
-       p != NULL;
-       p = native_groups_.Next(p)) {
+}
+
+
+HeapEntry* NativeObjectsExplorer::AllocateEntry(
+    HeapThing ptr, int children_count, int retainers_count) {
+  if (ptr == kNativesRootObject) {
+    return snapshot_->AddNativesRootEntry(children_count, retainers_count);
+  } else {
     v8::RetainedObjectInfo* info =
-        reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
-    info->Dispose();
+        reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
+    intptr_t elements = info->GetElementCount();
+    intptr_t size = info->GetSizeInBytes();
+    return snapshot_->AddEntry(
+        HeapEntry::kNative,
+        elements != -1 ?
+            collection_->names()->GetFormatted(
+                "%s / %" V8_PTR_PREFIX "d entries",
+                info->GetLabel(),
+                info->GetElementCount()) :
+                collection_->names()->GetCopy(info->GetLabel()),
+        HeapObjectsMap::GenerateId(info),
+        size != -1 ? static_cast<int>(size) : 0,
+        children_count,
+        retainers_count);
   }
-  delete synthetic_entries_allocator_;
-  delete native_entries_allocator_;
+}
+
+
+void NativeObjectsExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
+  if (EstimateObjectsCount() <= 0) return;
+  filler->AddEntry(kNativesRootObject, this);
 }
 
 
@@ -2754,29 +2556,6 @@
   embedder_queried_ = true;
 }
 
-void NativeObjectsExplorer::FillImplicitReferences() {
-  Isolate* isolate = Isolate::Current();
-  List<ImplicitRefGroup*>* groups =
-      isolate->global_handles()->implicit_ref_groups();
-  for (int i = 0; i < groups->length(); ++i) {
-    ImplicitRefGroup* group = groups->at(i);
-    HeapObject* parent = *group->parent_;
-    HeapEntry* parent_entry =
-        filler_->FindOrAddEntry(parent, native_entries_allocator_);
-    ASSERT(parent_entry != NULL);
-    Object*** children = group->children_;
-    for (size_t j = 0; j < group->length_; ++j) {
-      Object* child = *children[j];
-      HeapEntry* child_entry =
-          filler_->FindOrAddEntry(child, native_entries_allocator_);
-      filler_->SetNamedReference(
-          HeapGraphEdge::kInternal,
-          parent, parent_entry,
-          "native",
-          child, child_entry);
-    }
-  }
-}
 
 List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
     v8::RetainedObjectInfo* info) {
@@ -2793,82 +2572,34 @@
 
 bool NativeObjectsExplorer::IterateAndExtractReferences(
     SnapshotFillerInterface* filler) {
+  if (EstimateObjectsCount() <= 0) return true;
   filler_ = filler;
   FillRetainedObjects();
-  FillImplicitReferences();
-  if (EstimateObjectsCount() > 0) {
-    for (HashMap::Entry* p = objects_by_info_.Start();
-         p != NULL;
-         p = objects_by_info_.Next(p)) {
-      v8::RetainedObjectInfo* info =
-          reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
-      SetNativeRootReference(info);
-      List<HeapObject*>* objects =
-          reinterpret_cast<List<HeapObject*>* >(p->value);
-      for (int i = 0; i < objects->length(); ++i) {
-        SetWrapperNativeReferences(objects->at(i), info);
-      }
+  for (HashMap::Entry* p = objects_by_info_.Start();
+       p != NULL;
+       p = objects_by_info_.Next(p)) {
+    v8::RetainedObjectInfo* info =
+        reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+    SetNativeRootReference(info);
+    List<HeapObject*>* objects =
+        reinterpret_cast<List<HeapObject*>* >(p->value);
+    for (int i = 0; i < objects->length(); ++i) {
+      SetWrapperNativeReferences(objects->at(i), info);
     }
-    SetRootNativeRootsReference();
   }
+  SetRootNativesRootReference();
   filler_ = NULL;
   return true;
 }
 
 
-class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
- public:
-  explicit NativeGroupRetainedObjectInfo(const char* label)
-      : disposed_(false),
-        hash_(reinterpret_cast<intptr_t>(label)),
-        label_(label) {
-  }
-
-  virtual ~NativeGroupRetainedObjectInfo() {}
-  virtual void Dispose() {
-    CHECK(!disposed_);
-    disposed_ = true;
-    delete this;
-  }
-  virtual bool IsEquivalent(RetainedObjectInfo* other) {
-    return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
-  }
-  virtual intptr_t GetHash() { return hash_; }
-  virtual const char* GetLabel() { return label_; }
-
- private:
-  bool disposed_;
-  intptr_t hash_;
-  const char* label_;
-};
-
-
-NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
-    const char* label) {
-  const char* label_copy = collection_->names()->GetCopy(label);
-  uint32_t hash = HashSequentialString(label_copy,
-                                       static_cast<int>(strlen(label_copy)),
-                                       HEAP->HashSeed());
-  HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
-                                                hash, true);
-  if (entry->value == NULL)
-    entry->value = new NativeGroupRetainedObjectInfo(label);
-  return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
-}
-
-
 void NativeObjectsExplorer::SetNativeRootReference(
     v8::RetainedObjectInfo* info) {
-  HeapEntry* child_entry =
-      filler_->FindOrAddEntry(info, native_entries_allocator_);
+  HeapEntry* child_entry = filler_->FindOrAddEntry(info, this);
   ASSERT(child_entry != NULL);
-  NativeGroupRetainedObjectInfo* group_info =
-      FindOrAddGroupInfo(info->GetGroupLabel());
-  HeapEntry* group_entry =
-      filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
-  filler_->SetNamedAutoIndexReference(
-      HeapGraphEdge::kInternal,
-      group_info, group_entry,
+  filler_->SetIndexedAutoIndexReference(
+      HeapGraphEdge::kElement,
+      kNativesRootObject, snapshot_->natives_root(),
       info, child_entry);
 }
 
@@ -2877,8 +2608,7 @@
     HeapObject* wrapper, v8::RetainedObjectInfo* info) {
   HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
   ASSERT(wrapper_entry != NULL);
-  HeapEntry* info_entry =
-      filler_->FindOrAddEntry(info, native_entries_allocator_);
+  HeapEntry* info_entry = filler_->FindOrAddEntry(info, this);
   ASSERT(info_entry != NULL);
   filler_->SetNamedReference(HeapGraphEdge::kInternal,
                              wrapper, wrapper_entry,
@@ -2890,20 +2620,11 @@
 }
 
 
-void NativeObjectsExplorer::SetRootNativeRootsReference() {
-  for (HashMap::Entry* entry = native_groups_.Start();
-       entry;
-       entry = native_groups_.Next(entry)) {
-    NativeGroupRetainedObjectInfo* group_info =
-        static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
-    HeapEntry* group_entry =
-        filler_->FindOrAddEntry(group_info, native_entries_allocator_);
-    ASSERT(group_entry != NULL);
-    filler_->SetIndexedAutoIndexReference(
-        HeapGraphEdge::kElement,
-        V8HeapExplorer::kInternalRootObject, snapshot_->root(),
-        group_info, group_entry);
-  }
+void NativeObjectsExplorer::SetRootNativesRootReference() {
+  filler_->SetIndexedAutoIndexReference(
+      HeapGraphEdge::kElement,
+      V8HeapExplorer::kInternalRootObject, snapshot_->root(),
+      kNativesRootObject, snapshot_->natives_root());
 }
 
 
@@ -2917,6 +2638,15 @@
 }
 
 
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
+                                             v8::ActivityControl* control)
+    : snapshot_(snapshot),
+      control_(control),
+      v8_heap_explorer_(snapshot_, this),
+      dom_explorer_(snapshot_, this) {
+}
+
+
 class SnapshotCounter : public SnapshotFillerInterface {
  public:
   explicit SnapshotCounter(HeapEntriesMap* entries) : entries_(entries) { }
@@ -3041,72 +2771,27 @@
 };
 
 
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
-                                             v8::ActivityControl* control)
-    : snapshot_(snapshot),
-      control_(control),
-      v8_heap_explorer_(snapshot_, this),
-      dom_explorer_(snapshot_, this) {
-}
-
-
 bool HeapSnapshotGenerator::GenerateSnapshot() {
   v8_heap_explorer_.TagGlobalObjects();
 
-  // TODO(1562) Profiler assumes that any object that is in the heap after
-  // full GC is reachable from the root when computing dominators.
-  // This is not true for weakly reachable objects.
-  // As a temporary solution we call GC twice.
-  Isolate::Current()->heap()->CollectAllGarbage(
-      Heap::kMakeHeapIterableMask,
-      "HeapSnapshotGenerator::GenerateSnapshot");
-  Isolate::Current()->heap()->CollectAllGarbage(
-      Heap::kMakeHeapIterableMask,
-      "HeapSnapshotGenerator::GenerateSnapshot");
-
-#ifdef DEBUG
-  Heap* debug_heap = Isolate::Current()->heap();
-  ASSERT(!debug_heap->old_data_space()->was_swept_conservatively());
-  ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively());
-  ASSERT(!debug_heap->code_space()->was_swept_conservatively());
-  ASSERT(!debug_heap->cell_space()->was_swept_conservatively());
-  ASSERT(!debug_heap->map_space()->was_swept_conservatively());
-#endif
-
-  // The following code uses heap iterators, so we want the heap to be
-  // stable. It should follow TagGlobalObjects as that can allocate.
   AssertNoAllocation no_alloc;
 
-#ifdef DEBUG
-  debug_heap->Verify();
-#endif
-
-  SetProgressTotal(2);  // 2 passes.
-
-#ifdef DEBUG
-  debug_heap->Verify();
-#endif
+  SetProgressTotal(4);  // 2 passes + dominators + sizes.
 
   // Pass 1. Iterate heap contents to count entries and references.
   if (!CountEntriesAndReferences()) return false;
 
-#ifdef DEBUG
-  debug_heap->Verify();
-#endif
-
-  // Allocate memory for entries and references.
+  // Allocate and fill entries in the snapshot, allocate references.
   snapshot_->AllocateEntries(entries_.entries_count(),
                              entries_.total_children_count(),
                              entries_.total_retainers_count());
-
-  // Allocate heap objects to entries hash map.
   entries_.AllocateEntries();
 
   // Pass 2. Fill references.
   if (!FillReferences()) return false;
 
   if (!SetEntriesDominators()) return false;
-  if (!CalculateRetainedSizes()) return false;
+  if (!ApproximateRetainedSizes()) return false;
 
   progress_counter_ = progress_total_;
   if (!ProgressReport(true)) return false;
@@ -3133,9 +2818,8 @@
 
 void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
   if (control_ == NULL) return;
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
   progress_total_ = (
-      v8_heap_explorer_.EstimateObjectsCount(&iterator) +
+      v8_heap_explorer_.EstimateObjectsCount() +
       dom_explorer_.EstimateObjectsCount()) * iterations_count;
   progress_counter_ = 0;
 }
@@ -3144,22 +2828,18 @@
 bool HeapSnapshotGenerator::CountEntriesAndReferences() {
   SnapshotCounter counter(&entries_);
   v8_heap_explorer_.AddRootEntries(&counter);
-  return v8_heap_explorer_.IterateAndExtractReferences(&counter)
-      && dom_explorer_.IterateAndExtractReferences(&counter);
+  dom_explorer_.AddRootEntries(&counter);
+  return
+      v8_heap_explorer_.IterateAndExtractReferences(&counter) &&
+      dom_explorer_.IterateAndExtractReferences(&counter);
 }
 
 
 bool HeapSnapshotGenerator::FillReferences() {
   SnapshotFiller filler(snapshot_, &entries_);
-  // IterateAndExtractReferences cannot set object names because
-  // it makes call to JSObject::LocalLookupRealNamedProperty which
-  // in turn may relocate objects in property maps thus changing the heap
-  // layout and affecting retainer counts. This is not acceptable because
-  // number of retainers must not change between count and fill passes.
-  // To avoid this there's a separate postpass that set object names.
-  return v8_heap_explorer_.IterateAndExtractReferences(&filler)
-      && dom_explorer_.IterateAndExtractReferences(&filler)
-      && v8_heap_explorer_.IterateAndSetObjectNames(&filler);
+  return
+      v8_heap_explorer_.IterateAndExtractReferences(&filler) &&
+      dom_explorer_.IterateAndExtractReferences(&filler);
 }
 
 
@@ -3169,7 +2849,7 @@
   int current_entry = 0;
   List<HeapEntry*> nodes_to_visit;
   nodes_to_visit.Add(snapshot_->root());
-  snapshot_->root()->paint();
+  snapshot_->root()->paint_reachable();
   while (!nodes_to_visit.is_empty()) {
     HeapEntry* entry = nodes_to_visit.last();
     Vector<HeapGraphEdge> children = entry->children();
@@ -3177,9 +2857,9 @@
     for (int i = 0; i < children.length(); ++i) {
       if (children[i].type() == HeapGraphEdge::kShortcut) continue;
       HeapEntry* child = children[i].to();
-      if (!child->painted()) {
+      if (!child->painted_reachable()) {
         nodes_to_visit.Add(child);
-        child->paint();
+        child->paint_reachable();
         has_new_edges = true;
       }
     }
@@ -3189,76 +2869,63 @@
       nodes_to_visit.RemoveLast();
     }
   }
-  ASSERT_EQ(current_entry, entries->length());
+  entries->Truncate(current_entry);
 }
 
 
-static int Intersect(int i1, int i2, const Vector<int>& dominators) {
+static int Intersect(int i1, int i2, const Vector<HeapEntry*>& dominators) {
   int finger1 = i1, finger2 = i2;
   while (finger1 != finger2) {
-    while (finger1 < finger2) finger1 = dominators[finger1];
-    while (finger2 < finger1) finger2 = dominators[finger2];
+    while (finger1 < finger2) finger1 = dominators[finger1]->ordered_index();
+    while (finger2 < finger1) finger2 = dominators[finger2]->ordered_index();
   }
   return finger1;
 }
 
-
 // The algorithm is based on the article:
 // K. Cooper, T. Harvey and K. Kennedy "A Simple, Fast Dominance Algorithm"
 // Softw. Pract. Exper. 4 (2001), pp. 1-10.
 bool HeapSnapshotGenerator::BuildDominatorTree(
     const Vector<HeapEntry*>& entries,
-    Vector<int>* dominators) {
+    Vector<HeapEntry*>* dominators) {
   if (entries.length() == 0) return true;
   const int entries_length = entries.length(), root_index = entries_length - 1;
-  static const int kNoDominator = -1;
-  for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator;
-  (*dominators)[root_index] = root_index;
-
-  // The affected array is used to mark entries which dominators
-  // have to be racalculated because of changes in their retainers.
-  ScopedVector<bool> affected(entries_length);
-  for (int i = 0; i < affected.length(); ++i) affected[i] = false;
-  // Mark the root direct children as affected.
-  Vector<HeapGraphEdge> children = entries[root_index]->children();
-  for (int i = 0; i < children.length(); ++i) {
-    affected[children[i].to()->ordered_index()] = true;
-  }
-
-  bool changed = true;
-  while (changed) {
-    changed = false;
-    if (!ProgressReport(true)) return false;
+  for (int i = 0; i < root_index; ++i) (*dominators)[i] = NULL;
+  (*dominators)[root_index] = entries[root_index];
+  int changed = 1;
+  const int base_progress_counter = progress_counter_;
+  while (changed != 0) {
+    changed = 0;
     for (int i = root_index - 1; i >= 0; --i) {
-      if (!affected[i]) continue;
-      affected[i] = false;
-      // If dominator of the entry has already been set to root,
-      // then it can't propagate any further.
-      if ((*dominators)[i] == root_index) continue;
-      int new_idom_index = kNoDominator;
+      HeapEntry* new_idom = NULL;
       Vector<HeapGraphEdge*> rets = entries[i]->retainers();
-      for (int j = 0; j < rets.length(); ++j) {
+      int j = 0;
+      for (; j < rets.length(); ++j) {
         if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
-        int ret_index = rets[j]->From()->ordered_index();
-        if (dominators->at(ret_index) != kNoDominator) {
-          new_idom_index = new_idom_index == kNoDominator
-              ? ret_index
-              : Intersect(ret_index, new_idom_index, *dominators);
-          // If idom has already reached the root, it doesn't make sense
-          // to check other retainers.
-          if (new_idom_index == root_index) break;
+        HeapEntry* ret = rets[j]->From();
+        if (dominators->at(ret->ordered_index()) != NULL) {
+          new_idom = ret;
+          break;
         }
       }
-      if (new_idom_index != kNoDominator
-          && dominators->at(i) != new_idom_index) {
-        (*dominators)[i] = new_idom_index;
-        changed = true;
-        Vector<HeapGraphEdge> children = entries[i]->children();
-        for (int j = 0; j < children.length(); ++j) {
-          affected[children[j].to()->ordered_index()] = true;
+      for (++j; j < rets.length(); ++j) {
+        if (rets[j]->type() == HeapGraphEdge::kShortcut) continue;
+        HeapEntry* ret = rets[j]->From();
+        if (dominators->at(ret->ordered_index()) != NULL) {
+          new_idom = entries[Intersect(ret->ordered_index(),
+                                       new_idom->ordered_index(),
+                                       *dominators)];
         }
       }
+      if (new_idom != NULL && dominators->at(i) != new_idom) {
+        (*dominators)[i] = new_idom;
+        ++changed;
+      }
     }
+    int remaining = entries_length - changed;
+    if (remaining < 0) remaining = 0;
+    progress_counter_ = base_progress_counter + remaining;
+    if (!ProgressReport(true)) return false;
   }
   return true;
 }
@@ -3268,49 +2935,40 @@
   // This array is used for maintaining reverse postorder of nodes.
   ScopedVector<HeapEntry*> ordered_entries(snapshot_->entries()->length());
   FillReversePostorderIndexes(&ordered_entries);
-  ScopedVector<int> dominators(ordered_entries.length());
+  ScopedVector<HeapEntry*> dominators(ordered_entries.length());
   if (!BuildDominatorTree(ordered_entries, &dominators)) return false;
   for (int i = 0; i < ordered_entries.length(); ++i) {
-    ASSERT(dominators[i] >= 0);
-    ordered_entries[i]->set_dominator(ordered_entries[dominators[i]]);
+    ASSERT(dominators[i] != NULL);
+    ordered_entries[i]->set_dominator(dominators[i]);
   }
   return true;
 }
 
 
-bool HeapSnapshotGenerator::CalculateRetainedSizes() {
+bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
   // As for the dominators tree we only know parent nodes, not
   // children, to sum up total sizes we "bubble" node's self size
   // adding it to all of its parents.
-  List<HeapEntry*>& entries = *snapshot_->entries();
-  for (int i = 0; i < entries.length(); ++i) {
-    HeapEntry* entry = entries[i];
+  for (int i = 0; i < snapshot_->entries()->length(); ++i) {
+    HeapEntry* entry = snapshot_->entries()->at(i);
     entry->set_retained_size(entry->self_size());
   }
-  for (int i = 0; i < entries.length(); ++i) {
-    HeapEntry* entry = entries[i];
+  for (int i = 0;
+       i < snapshot_->entries()->length();
+       ++i, ProgressStep()) {
+    HeapEntry* entry = snapshot_->entries()->at(i);
     int entry_size = entry->self_size();
     for (HeapEntry* dominator = entry->dominator();
          dominator != entry;
          entry = dominator, dominator = entry->dominator()) {
       dominator->add_retained_size(entry_size);
     }
+    if (!ProgressReport()) return false;
   }
   return true;
 }
 
 
-template<int bytes> struct MaxDecimalDigitsIn;
-template<> struct MaxDecimalDigitsIn<4> {
-  static const int kSigned = 11;
-  static const int kUnsigned = 10;
-};
-template<> struct MaxDecimalDigitsIn<8> {
-  static const int kSigned = 20;
-  static const int kUnsigned = 20;
-};
-
-
 class OutputStreamWriter {
  public:
   explicit OutputStreamWriter(v8::OutputStream* stream)
@@ -3360,34 +3018,23 @@
  private:
   template<typename T>
   void AddNumberImpl(T n, const char* format) {
-    // Buffer for the longest value plus trailing \0
-    static const int kMaxNumberSize =
-        MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
-    if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
-      int result = OS::SNPrintF(
-          chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
-      ASSERT(result != -1);
-      chunk_pos_ += result;
-      MaybeWriteChunk();
-    } else {
-      EmbeddedVector<char, kMaxNumberSize> buffer;
-      int result = OS::SNPrintF(buffer, format, n);
-      USE(result);
-      ASSERT(result != -1);
-      AddString(buffer.start());
-    }
+    ScopedVector<char> buffer(32);
+    int result = OS::SNPrintF(buffer, format, n);
+    USE(result);
+    ASSERT(result != -1);
+    AddString(buffer.start());
   }
   void MaybeWriteChunk() {
     ASSERT(chunk_pos_ <= chunk_size_);
     if (chunk_pos_ == chunk_size_) {
       WriteChunk();
+      chunk_pos_ = 0;
     }
   }
   void WriteChunk() {
     if (aborted_) return;
     if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
         v8::OutputStream::kAbort) aborted_ = true;
-    chunk_pos_ = 0;
   }
 
   v8::OutputStream* stream_;
@@ -3397,14 +3044,15 @@
   bool aborted_;
 };
 
+const int HeapSnapshotJSONSerializer::kMaxSerializableSnapshotRawSize =
+    256 * MB;
 
 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
   ASSERT(writer_ == NULL);
   writer_ = new OutputStreamWriter(stream);
 
   HeapSnapshot* original_snapshot = NULL;
-  if (snapshot_->raw_entries_size() >=
-      SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
+  if (snapshot_->raw_entries_size() >= kMaxSerializableSnapshotRawSize) {
     // The snapshot is too big. Serialize a fake snapshot.
     original_snapshot = snapshot_;
     snapshot_ = CreateFakeSnapshot();
@@ -3431,14 +3079,8 @@
                                           snapshot_->uid());
   result->AllocateEntries(2, 1, 0);
   HeapEntry* root = result->AddRootEntry(1);
-  const char* text = snapshot_->collection()->names()->GetFormatted(
-      "The snapshot is too big. "
-      "Maximum snapshot size is %"  V8_PTR_PREFIX "u MB. "
-      "Actual snapshot size is %"  V8_PTR_PREFIX "u MB.",
-      SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
-      (snapshot_->raw_entries_size() + MB - 1) / MB);
   HeapEntry* message = result->AddEntry(
-      HeapEntry::kString, text, 0, 4, 0, 0);
+      HeapEntry::kString, "The snapshot is too big", 0, 4, 0, 0);
   root->SetUnidirElementReference(0, 1, message);
   result->SetDominatorsToSelf();
   return result;
@@ -3503,51 +3145,37 @@
 
 
 void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
-  // The buffer needs space for 3 ints, 3 commas and \0
-  static const int kBufferSize =
-      MaxDecimalDigitsIn<sizeof(int)>::kSigned * 3 + 3 + 1;  // NOLINT
-  EmbeddedVector<char, kBufferSize> buffer;
-  int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
-      || edge->type() == HeapGraphEdge::kHidden
-      || edge->type() == HeapGraphEdge::kWeak
-      ? edge->index() : GetStringId(edge->name());
-  STATIC_CHECK(sizeof(int) == sizeof(edge->type()));  // NOLINT
-  STATIC_CHECK(sizeof(int) == sizeof(edge_name_or_index));  // NOLINT
-  STATIC_CHECK(sizeof(int) == sizeof(GetNodeId(edge->to())));  // NOLINT
-  int result = OS::SNPrintF(buffer, ",%d,%d,%d",
-      edge->type(), edge_name_or_index, GetNodeId(edge->to()));
-  USE(result);
-  ASSERT(result != -1);
-  writer_->AddString(buffer.start());
+  writer_->AddCharacter(',');
+  writer_->AddNumber(edge->type());
+  writer_->AddCharacter(',');
+  if (edge->type() == HeapGraphEdge::kElement
+      || edge->type() == HeapGraphEdge::kHidden) {
+    writer_->AddNumber(edge->index());
+  } else {
+    writer_->AddNumber(GetStringId(edge->name()));
+  }
+  writer_->AddCharacter(',');
+  writer_->AddNumber(GetNodeId(edge->to()));
 }
 
 
 void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
-  // The buffer needs space for 6 ints, 1 uint32_t, 7 commas, \n and \0
-  static const int kBufferSize =
-      6 * MaxDecimalDigitsIn<sizeof(int)>::kSigned  // NOLINT
-      + MaxDecimalDigitsIn<sizeof(uint32_t)>::kUnsigned  // NOLINT
-      + 7 + 1 + 1;
-  EmbeddedVector<char, kBufferSize> buffer;
+  writer_->AddCharacter('\n');
+  writer_->AddCharacter(',');
+  writer_->AddNumber(entry->type());
+  writer_->AddCharacter(',');
+  writer_->AddNumber(GetStringId(entry->name()));
+  writer_->AddCharacter(',');
+  writer_->AddNumber(entry->id());
+  writer_->AddCharacter(',');
+  writer_->AddNumber(entry->self_size());
+  writer_->AddCharacter(',');
+  writer_->AddNumber(entry->RetainedSize(false));
+  writer_->AddCharacter(',');
+  writer_->AddNumber(GetNodeId(entry->dominator()));
   Vector<HeapGraphEdge> children = entry->children();
-  STATIC_CHECK(sizeof(int) == sizeof(entry->type()));  // NOLINT
-  STATIC_CHECK(sizeof(int) == sizeof(GetStringId(entry->name())));  // NOLINT
-  STATIC_CHECK(sizeof(unsigned) == sizeof(entry->id()));  // NOLINT
-  STATIC_CHECK(sizeof(int) == sizeof(entry->self_size()));  // NOLINT
-  STATIC_CHECK(sizeof(int) == sizeof(entry->retained_size()));  // NOLINT
-  STATIC_CHECK(sizeof(int) == sizeof(GetNodeId(entry->dominator())));  // NOLINT
-  STATIC_CHECK(sizeof(int) == sizeof(children.length()));  // NOLINT
-  int result = OS::SNPrintF(buffer, "\n,%d,%d,%u,%d,%d,%d,%d",
-      entry->type(),
-      GetStringId(entry->name()),
-      entry->id(),
-      entry->self_size(),
-      entry->retained_size(),
-      GetNodeId(entry->dominator()),
-      children.length());
-  USE(result);
-  ASSERT(result != -1);
-  writer_->AddString(buffer.start());
+  writer_->AddCharacter(',');
+  writer_->AddNumber(children.length());
   for (int i = 0; i < children.length(); ++i) {
     SerializeEdge(&children[i]);
     if (writer_->aborted()) return;
@@ -3582,8 +3210,7 @@
             "," JSON_S("closure")
             "," JSON_S("regexp")
             "," JSON_S("number")
-            "," JSON_S("native")
-            "," JSON_S("synthetic"))
+            "," JSON_S("native"))
         "," JSON_S("string")
         "," JSON_S("number")
         "," JSON_S("number")
@@ -3602,8 +3229,7 @@
                     "," JSON_S("property")
                     "," JSON_S("internal")
                     "," JSON_S("hidden")
-                    "," JSON_S("shortcut")
-                    "," JSON_S("weak"))
+                    "," JSON_S("shortcut"))
                 "," JSON_S("string_or_number")
                 "," JSON_S("node"))))));
 #undef JSON_S
diff --git a/src/profile-generator.h b/src/profile-generator.h
index d9a1319..0beb109 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,8 +35,6 @@
 namespace v8 {
 namespace internal {
 
-typedef uint32_t SnapshotObjectId;
-
 class TokenEnumerator {
  public:
   TokenEnumerator();
@@ -76,8 +74,6 @@
   inline const char* GetFunctionName(const char* name);
 
  private:
-  static const int kMaxNameSize = 1024;
-
   INLINE(static bool StringsMatch(void* key1, void* key2)) {
     return strcmp(reinterpret_cast<char*>(key1),
                   reinterpret_cast<char*>(key2)) == 0;
@@ -261,7 +257,7 @@
     typedef Address Key;
     typedef CodeEntryInfo Value;
     static const Key kNoKey;
-    static const Value NoValue() { return CodeEntryInfo(NULL, 0); }
+    static const Value kNoValue;
     static int Compare(const Key& a, const Key& b) {
       return a < b ? -1 : (a > b ? 1 : 0);
     }
@@ -457,8 +453,7 @@
     kProperty = v8::HeapGraphEdge::kProperty,
     kInternal = v8::HeapGraphEdge::kInternal,
     kHidden = v8::HeapGraphEdge::kHidden,
-    kShortcut = v8::HeapGraphEdge::kShortcut,
-    kWeak = v8::HeapGraphEdge::kWeak
+    kShortcut = v8::HeapGraphEdge::kShortcut
   };
 
   HeapGraphEdge() { }
@@ -468,7 +463,7 @@
 
   Type type() { return static_cast<Type>(type_); }
   int index() {
-    ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
+    ASSERT(type_ == kElement || type_ == kHidden);
     return index_;
   }
   const char* name() {
@@ -527,15 +522,14 @@
     kClosure = v8::HeapGraphNode::kClosure,
     kRegExp = v8::HeapGraphNode::kRegExp,
     kHeapNumber = v8::HeapGraphNode::kHeapNumber,
-    kNative = v8::HeapGraphNode::kNative,
-    kSynthetic = v8::HeapGraphNode::kSynthetic
+    kNative = v8::HeapGraphNode::kNative
   };
 
   HeapEntry() { }
   void Init(HeapSnapshot* snapshot,
             Type type,
             const char* name,
-            SnapshotObjectId id,
+            uint64_t id,
             int self_size,
             int children_count,
             int retainers_count);
@@ -543,8 +537,7 @@
   HeapSnapshot* snapshot() { return snapshot_; }
   Type type() { return static_cast<Type>(type_); }
   const char* name() { return name_; }
-  void set_name(const char* name) { name_ = name; }
-  inline SnapshotObjectId id() { return id_; }
+  inline uint64_t id();
   int self_size() { return self_size_; }
   int retained_size() { return retained_size_; }
   void add_retained_size(int size) { retained_size_ += size; }
@@ -557,13 +550,23 @@
   Vector<HeapGraphEdge*> retainers() {
     return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
   HeapEntry* dominator() { return dominator_; }
-  void set_dominator(HeapEntry* entry) {
-    ASSERT(entry != NULL);
-    dominator_ = entry;
+  void set_dominator(HeapEntry* entry) { dominator_ = entry; }
+
+  void clear_paint() { painted_ = kUnpainted; }
+  bool painted_reachable() { return painted_ == kPainted; }
+  void paint_reachable() {
+    ASSERT(painted_ == kUnpainted);
+    painted_ = kPainted;
   }
-  void clear_paint() { painted_ = false; }
-  bool painted() { return painted_; }
-  void paint() { painted_ = true; }
+  bool not_painted_reachable_from_others() {
+    return painted_ != kPaintedReachableFromOthers;
+  }
+  void paint_reachable_from_others() {
+    painted_ = kPaintedReachableFromOthers;
+  }
+  template<class Visitor>
+  void ApplyAndPaintAllReachable(Visitor* visitor);
+  void PaintAllReachable();
 
   void SetIndexedReference(HeapGraphEdge::Type type,
                            int child_index,
@@ -577,18 +580,16 @@
                          int retainer_index);
   void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
 
-  size_t EntrySize() {
-    return EntriesSize(1, children_count_, retainers_count_);
-  }
+  int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
+  int RetainedSize(bool exact);
 
-  void Print(
-      const char* prefix, const char* edge_name, int max_depth, int indent);
+  void Print(int max_depth, int indent);
 
   Handle<HeapObject> GetHeapObject();
 
-  static size_t EntriesSize(int entries_count,
-                            int children_count,
-                            int retainers_count);
+  static int EntriesSize(int entries_count,
+                         int children_count,
+                         int retainers_count);
 
  private:
   HeapGraphEdge* children_arr() {
@@ -597,22 +598,33 @@
   HeapGraphEdge** retainers_arr() {
     return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
   }
+  void CalculateExactRetainedSize();
   const char* TypeAsString();
 
-  unsigned painted_: 1;
+  unsigned painted_: 2;
   unsigned type_: 4;
-  int children_count_: 27;
+  int children_count_: 26;
   int retainers_count_;
   int self_size_;
   union {
     int ordered_index_;  // Used during dominator tree building.
     int retained_size_;  // At that moment, there is no retained size yet.
   };
-  SnapshotObjectId id_;
   HeapEntry* dominator_;
   HeapSnapshot* snapshot_;
+  struct Id {
+    uint32_t id1_;
+    uint32_t id2_;
+  } id_;  // This is to avoid extra padding of 64-bit value.
   const char* name_;
 
+  // Paints used for exact retained sizes calculation.
+  static const unsigned kUnpainted = 0;
+  static const unsigned kPainted = 1;
+  static const unsigned kPaintedReachableFromOthers = 2;
+
+  static const int kExactRetainedSizeTag = 1;
+
   DISALLOW_COPY_AND_ASSIGN(HeapEntry);
 };
 
@@ -644,26 +656,22 @@
   HeapEntry* root() { return root_entry_; }
   HeapEntry* gc_roots() { return gc_roots_entry_; }
   HeapEntry* natives_root() { return natives_root_entry_; }
-  HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
   List<HeapEntry*>* entries() { return &entries_; }
-  size_t raw_entries_size() { return raw_entries_size_; }
+  int raw_entries_size() { return raw_entries_size_; }
 
   void AllocateEntries(
       int entries_count, int children_count, int retainers_count);
   HeapEntry* AddEntry(HeapEntry::Type type,
                       const char* name,
-                      SnapshotObjectId id,
+                      uint64_t id,
                       int size,
                       int children_count,
                       int retainers_count);
   HeapEntry* AddRootEntry(int children_count);
   HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
-  HeapEntry* AddGcSubrootEntry(int tag,
-                               int children_count,
-                               int retainers_count);
   HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
   void ClearPaint();
-  HeapEntry* GetEntryById(SnapshotObjectId id);
+  HeapEntry* GetEntryById(uint64_t id);
   List<HeapEntry*>* GetSortedEntriesList();
   template<class Visitor>
   void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
@@ -682,11 +690,10 @@
   HeapEntry* root_entry_;
   HeapEntry* gc_roots_entry_;
   HeapEntry* natives_root_entry_;
-  HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
   char* raw_entries_;
   List<HeapEntry*> entries_;
   bool entries_sorted_;
-  size_t raw_entries_size_;
+  int raw_entries_size_;
 
   friend class HeapSnapshotTester;
 
@@ -700,31 +707,26 @@
   ~HeapObjectsMap();
 
   void SnapshotGenerationFinished();
-  SnapshotObjectId FindObject(Address addr);
+  uint64_t FindObject(Address addr);
   void MoveObject(Address from, Address to);
 
-  static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
-  static inline SnapshotObjectId GetNthGcSubrootId(int delta);
+  static uint64_t GenerateId(v8::RetainedObjectInfo* info);
 
-  static const int kObjectIdStep = 2;
-  static const SnapshotObjectId kInternalRootObjectId;
-  static const SnapshotObjectId kGcRootsObjectId;
-  static const SnapshotObjectId kNativesRootObjectId;
-  static const SnapshotObjectId kGcRootsFirstSubrootId;
-  static const SnapshotObjectId kFirstAvailableObjectId;
+  static const uint64_t kInternalRootObjectId;
+  static const uint64_t kGcRootsObjectId;
+  static const uint64_t kNativesRootObjectId;
+  static const uint64_t kFirstAvailableObjectId;
 
  private:
   struct EntryInfo {
-    explicit EntryInfo(SnapshotObjectId id) : id(id), accessed(true) { }
-    EntryInfo(SnapshotObjectId id, bool accessed)
-      : id(id),
-        accessed(accessed) { }
-    SnapshotObjectId id;
+    explicit EntryInfo(uint64_t id) : id(id), accessed(true) { }
+    EntryInfo(uint64_t id, bool accessed) : id(id), accessed(accessed) { }
+    uint64_t id;
     bool accessed;
   };
 
-  void AddEntry(Address addr, SnapshotObjectId id);
-  SnapshotObjectId FindEntry(Address addr);
+  void AddEntry(Address addr, uint64_t id);
+  uint64_t FindEntry(Address addr);
   void RemoveDeadEntries();
 
   static bool AddressesMatch(void* key1, void* key2) {
@@ -738,7 +740,7 @@
   }
 
   bool initial_fill_mode_;
-  SnapshotObjectId next_id_;
+  uint64_t next_id_;
   HashMap entries_map_;
   List<EntryInfo>* entries_;
 
@@ -763,8 +765,8 @@
   StringsStorage* names() { return &names_; }
   TokenEnumerator* token_enumerator() { return token_enumerator_; }
 
-  SnapshotObjectId GetObjectId(Address addr) { return ids_.FindObject(addr); }
-  Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
+  uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
+  Handle<HeapObject> FindHeapObjectById(uint64_t id);
   void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
 
  private:
@@ -817,7 +819,7 @@
   int total_children_count() { return total_children_count_; }
   int total_retainers_count() { return total_retainers_count_; }
 
-  static HeapEntry* const kHeapEntryPlaceholder;
+  static HeapEntry *const kHeapEntryPlaceholder;
 
  private:
   struct EntryInfo {
@@ -920,9 +922,8 @@
   virtual HeapEntry* AllocateEntry(
       HeapThing ptr, int children_count, int retainers_count);
   void AddRootEntries(SnapshotFillerInterface* filler);
-  int EstimateObjectsCount(HeapIterator* iterator);
+  int EstimateObjectsCount();
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-  bool IterateAndSetObjectNames(SnapshotFillerInterface* filler);
   void TagGlobalObjects();
 
   static String* GetConstructorName(JSObject* object);
@@ -947,10 +948,6 @@
                            HeapEntry* parent,
                            String* reference_name,
                            Object* child);
-  void SetNativeBindReference(HeapObject* parent_obj,
-                              HeapEntry* parent,
-                              const char* reference_name,
-                              Object* child);
   void SetElementReference(HeapObject* parent_obj,
                            HeapEntry* parent,
                            int index,
@@ -969,16 +966,10 @@
                           HeapEntry* parent,
                           int index,
                           Object* child);
-  void SetWeakReference(HeapObject* parent_obj,
-                        HeapEntry* parent_entry,
-                        int index,
-                        Object* child_obj,
-                        int field_offset);
   void SetPropertyReference(HeapObject* parent_obj,
                             HeapEntry* parent,
                             String* reference_name,
                             Object* child,
-                            const char* name_format_string = NULL,
                             int field_offset = -1);
   void SetPropertyShortcutReference(HeapObject* parent_obj,
                                     HeapEntry* parent,
@@ -986,17 +977,11 @@
                                     Object* child);
   void SetRootShortcutReference(Object* child);
   void SetRootGcRootsReference();
-  void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
-  void SetGcSubrootReference(
-      VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
-  void SetObjectName(HeapObject* object);
+  void SetGcRootsReference(Object* child);
   void TagObject(Object* obj, const char* tag);
 
   HeapEntry* GetEntry(Object* obj);
 
-  static inline HeapObject* GetNthGcSubrootObject(int delta);
-  static inline int GetGcSubrootOrder(HeapObject* subroot);
-
   Heap* heap_;
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -1005,36 +990,31 @@
   HeapObjectsSet objects_tags_;
 
   static HeapObject* const kGcRootsObject;
-  static HeapObject* const kFirstGcSubrootObject;
-  static HeapObject* const kLastGcSubrootObject;
 
   friend class IndexedReferencesExtractor;
-  friend class GcSubrootsEnumerator;
   friend class RootsReferencesExtractor;
 
   DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
 };
 
 
-class NativeGroupRetainedObjectInfo;
-
-
 // An implementation of retained native objects extractor.
-class NativeObjectsExplorer {
+class NativeObjectsExplorer : public HeapEntriesAllocator {
  public:
   NativeObjectsExplorer(HeapSnapshot* snapshot,
                       SnapshottingProgressReportingInterface* progress);
   virtual ~NativeObjectsExplorer();
+  virtual HeapEntry* AllocateEntry(
+      HeapThing ptr, int children_count, int retainers_count);
   void AddRootEntries(SnapshotFillerInterface* filler);
   int EstimateObjectsCount();
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
 
  private:
   void FillRetainedObjects();
-  void FillImplicitReferences();
   List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
   void SetNativeRootReference(v8::RetainedObjectInfo* info);
-  void SetRootNativeRootsReference();
+  void SetRootNativesRootReference();
   void SetWrapperNativeReferences(HeapObject* wrapper,
                                       v8::RetainedObjectInfo* info);
   void VisitSubtreeWrapper(Object** p, uint16_t class_id);
@@ -1048,12 +1028,6 @@
         (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
             reinterpret_cast<v8::RetainedObjectInfo*>(key2));
   }
-  INLINE(static bool StringsMatch(void* key1, void* key2)) {
-    return strcmp(reinterpret_cast<char*>(key1),
-                  reinterpret_cast<char*>(key2)) == 0;
-  }
-
-  NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
 
   HeapSnapshot* snapshot_;
   HeapSnapshotsCollection* collection_;
@@ -1062,9 +1036,6 @@
   HeapObjectsSet in_groups_;
   // RetainedObjectInfo* -> List<HeapObject*>*
   HashMap objects_by_info_;
-  HashMap native_groups_;
-  HeapEntriesAllocator* synthetic_entries_allocator_;
-  HeapEntriesAllocator* native_entries_allocator_;
   // Used during references extraction.
   SnapshotFillerInterface* filler_;
 
@@ -1083,9 +1054,9 @@
   bool GenerateSnapshot();
 
  private:
+  bool ApproximateRetainedSizes();
   bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
-                          Vector<int>* dominators);
-  bool CalculateRetainedSizes();
+                          Vector<HeapEntry*>* dominators);
   bool CountEntriesAndReferences();
   bool FillReferences();
   void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
diff --git a/src/property-details.h b/src/property-details.h
deleted file mode 100644
index c79aa96..0000000
--- a/src/property-details.h
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_PROPERTY_DETAILS_H_
-#define V8_PROPERTY_DETAILS_H_
-
-#include "../include/v8.h"
-#include "allocation.h"
-#include "utils.h"
-
-// Ecma-262 3rd 8.6.1
-enum PropertyAttributes {
-  NONE              = v8::None,
-  READ_ONLY         = v8::ReadOnly,
-  DONT_ENUM         = v8::DontEnum,
-  DONT_DELETE       = v8::DontDelete,
-  ABSENT            = 16  // Used in runtime to indicate a property is absent.
-  // ABSENT can never be stored in or returned from a descriptor's attributes
-  // bitfield.  It is only used as a return value meaning the attributes of
-  // a non-existent property.
-};
-
-
-namespace v8 {
-namespace internal {
-
-class Smi;
-
-// Type of properties.
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in mirror-debugger.js.
-enum PropertyType {
-  NORMAL                    = 0,  // only in slow mode
-  FIELD                     = 1,  // only in fast mode
-  CONSTANT_FUNCTION         = 2,  // only in fast mode
-  CALLBACKS                 = 3,
-  HANDLER                   = 4,  // only in lookup results, not in descriptors
-  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
-  // All properties before MAP_TRANSITION are real.
-  MAP_TRANSITION            = 6,  // only in fast mode
-  ELEMENTS_TRANSITION       = 7,
-  CONSTANT_TRANSITION       = 8,  // only in fast mode
-  NULL_DESCRIPTOR           = 9,  // only in fast mode
-  // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
-  // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
-  // nonexistent properties.
-  NONEXISTENT = NULL_DESCRIPTOR
-};
-
-
-// PropertyDetails captures type and attributes for a property.
-// They are used both in property dictionaries and instance descriptors.
-class PropertyDetails BASE_EMBEDDED {
- public:
-  PropertyDetails(PropertyAttributes attributes,
-                  PropertyType type,
-                  int index = 0) {
-    ASSERT(TypeField::is_valid(type));
-    ASSERT(AttributesField::is_valid(attributes));
-    ASSERT(StorageField::is_valid(index));
-
-    value_ = TypeField::encode(type)
-        | AttributesField::encode(attributes)
-        | StorageField::encode(index);
-
-    ASSERT(type == this->type());
-    ASSERT(attributes == this->attributes());
-    ASSERT(index == this->index());
-  }
-
-  // Conversion for storing details as Object*.
-  explicit inline PropertyDetails(Smi* smi);
-  inline Smi* AsSmi();
-
-  PropertyType type() { return TypeField::decode(value_); }
-
-  PropertyAttributes attributes() { return AttributesField::decode(value_); }
-
-  int index() { return StorageField::decode(value_); }
-
-  inline PropertyDetails AsDeleted();
-
-  static bool IsValidIndex(int index) {
-    return StorageField::is_valid(index);
-  }
-
-  bool IsReadOnly() { return (attributes() & READ_ONLY) != 0; }
-  bool IsDontDelete() { return (attributes() & DONT_DELETE) != 0; }
-  bool IsDontEnum() { return (attributes() & DONT_ENUM) != 0; }
-  bool IsDeleted() { return DeletedField::decode(value_) != 0;}
-
-  // Bit fields in value_ (type, shift, size). Must be public so the
-  // constants can be embedded in generated code.
-  class TypeField:       public BitField<PropertyType,       0, 4> {};
-  class AttributesField: public BitField<PropertyAttributes, 4, 3> {};
-  class DeletedField:    public BitField<uint32_t,           7, 1> {};
-  class StorageField:    public BitField<uint32_t,           8, 32-8> {};
-
-  static const int kInitialIndex = 1;
-
- private:
-  uint32_t value_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_PROPERTY_DETAILS_H_
diff --git a/src/property.cc b/src/property.cc
index 78f237d..7cc2df5 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,15 +31,6 @@
 namespace internal {
 
 
-void LookupResult::Iterate(ObjectVisitor* visitor) {
-  LookupResult* current = this;  // Could be NULL.
-  while (current != NULL) {
-    visitor->VisitPointer(BitCast<Object**>(&current->holder_));
-    current = current->next_;
-  }
-}
-
-
 #ifdef OBJECT_PRINT
 void LookupResult::Print(FILE* out) {
   if (!IsFound()) {
@@ -91,9 +82,6 @@
       break;
     case CONSTANT_TRANSITION:
       PrintF(out, " -type = constant property transition\n");
-      PrintF(out, " -map:\n");
-      GetTransitionMap()->Print(out);
-      PrintF(out, "\n");
       break;
     case NULL_DESCRIPTOR:
       PrintF(out, " =type = null descriptor\n");
@@ -114,28 +102,4 @@
 #endif
 
 
-bool Descriptor::ContainsTransition() {
-  switch (details_.type()) {
-    case MAP_TRANSITION:
-    case CONSTANT_TRANSITION:
-    case ELEMENTS_TRANSITION:
-      return true;
-    case CALLBACKS: {
-      if (!value_->IsAccessorPair()) return false;
-      AccessorPair* accessors = AccessorPair::cast(value_);
-      return accessors->getter()->IsMap() || accessors->setter()->IsMap();
-    }
-    case NORMAL:
-    case FIELD:
-    case CONSTANT_FUNCTION:
-    case HANDLER:
-    case INTERCEPTOR:
-    case NULL_DESCRIPTOR:
-      return false;
-  }
-  UNREACHABLE();  // Keep the compiler happy.
-  return false;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/property.h b/src/property.h
index 04f78b2..e7d9fc5 100644
--- a/src/property.h
+++ b/src/property.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,8 +49,11 @@
 
   MUST_USE_RESULT MaybeObject* KeyToSymbol() {
     if (!StringShape(key_).IsSymbol()) {
-      MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
-      if (!maybe_result->To(&key_)) return maybe_result;
+      Object* result;
+      { MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
+        if (!maybe_result->ToObject(&result)) return maybe_result;
+      }
+      key_ = String::cast(result);
     }
     return key_;
   }
@@ -68,8 +71,6 @@
     details_ = PropertyDetails(details_.attributes(), details_.type(), index);
   }
 
-  bool ContainsTransition();
-
  private:
   String* key_;
   Object* value_;
@@ -114,9 +115,11 @@
 class ElementsTransitionDescriptor: public Descriptor {
  public:
   ElementsTransitionDescriptor(String* key,
-                               Object* map_or_array)
-      : Descriptor(key, map_or_array, PropertyDetails(NONE,
-                                                      ELEMENTS_TRANSITION)) { }
+                               Map* map,
+                               ElementsKind elements_kind)
+      : Descriptor(key, map, PropertyDetails(NONE,
+                                             ELEMENTS_TRANSITION,
+                                             elements_kind)) { }
 };
 
 // Marks a field name in a map so that adding the field is guaranteed
@@ -161,51 +164,12 @@
 };
 
 
-template <class T>
-bool IsPropertyDescriptor(T* desc) {
-  switch (desc->type()) {
-    case NORMAL:
-    case FIELD:
-    case CONSTANT_FUNCTION:
-    case HANDLER:
-    case INTERCEPTOR:
-      return true;
-    case CALLBACKS: {
-      Object* callback_object = desc->GetCallbackObject();
-      // Non-JavaScript (i.e. native) accessors are always a property, otherwise
-      // either the getter or the setter must be an accessor. Put another way:
-      // If we only see map transitions and holes in a pair, this is not a
-      // property.
-      return (!callback_object->IsAccessorPair() ||
-              AccessorPair::cast(callback_object)->ContainsAccessor());
-    }
-    case MAP_TRANSITION:
-    case ELEMENTS_TRANSITION:
-    case CONSTANT_TRANSITION:
-    case NULL_DESCRIPTOR:
-      return false;
-  }
-  UNREACHABLE();  // keep the compiler happy
-  return false;
-}
-
-
 class LookupResult BASE_EMBEDDED {
  public:
-  explicit LookupResult(Isolate* isolate)
-      : isolate_(isolate),
-        next_(isolate->top_lookup_result()),
-        lookup_type_(NOT_FOUND),
-        holder_(NULL),
+  LookupResult()
+      : lookup_type_(NOT_FOUND),
         cacheable_(true),
-        details_(NONE, NORMAL) {
-    isolate->SetTopLookupResult(this);
-  }
-
-  ~LookupResult() {
-    ASSERT(isolate_->top_lookup_result() == this);
-    isolate_->SetTopLookupResult(next_);
-  }
+        details_(NONE, NORMAL) {}
 
   void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
     lookup_type_ = DESCRIPTOR_TYPE;
@@ -238,9 +202,9 @@
     number_ = entry;
   }
 
-  void HandlerResult(JSProxy* proxy) {
+  void HandlerResult() {
     lookup_type_ = HANDLER_TYPE;
-    holder_ = proxy;
+    holder_ = NULL;
     details_ = PropertyDetails(NONE, HANDLER);
     cacheable_ = false;
   }
@@ -253,17 +217,11 @@
 
   void NotFound() {
     lookup_type_ = NOT_FOUND;
-    holder_ = NULL;
   }
 
   JSObject* holder() {
     ASSERT(IsFound());
-    return JSObject::cast(holder_);
-  }
-
-  JSProxy* proxy() {
-    ASSERT(IsFound());
-    return JSProxy::cast(holder_);
+    return holder_;
   }
 
   PropertyType type() {
@@ -287,9 +245,15 @@
   bool IsFound() { return lookup_type_ != NOT_FOUND; }
   bool IsHandler() { return lookup_type_ == HANDLER_TYPE; }
 
-  // Is the result is a property excluding transitions and the null descriptor?
+  // Is the result is a property excluding transitions and the null
+  // descriptor?
   bool IsProperty() {
-    return IsFound() && IsPropertyDescriptor(this);
+    return IsFound() && (type() < FIRST_PHANTOM_PROPERTY_TYPE);
+  }
+
+  // Is the result a property or a transition?
+  bool IsPropertyOrTransition() {
+    return IsFound() && (type() != NULL_DESCRIPTOR);
   }
 
   bool IsCacheable() { return cacheable_; }
@@ -314,12 +278,10 @@
     }
   }
 
-
   Map* GetTransitionMap() {
     ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
-    ASSERT(type() == MAP_TRANSITION ||
-           type() == ELEMENTS_TRANSITION ||
-           type() == CONSTANT_TRANSITION);
+    ASSERT(type() == MAP_TRANSITION || type() == CONSTANT_TRANSITION ||
+           type() == ELEMENTS_TRANSITION);
     return Map::cast(GetValue());
   }
 
@@ -381,12 +343,7 @@
     return holder()->GetNormalizedProperty(this);
   }
 
-  void Iterate(ObjectVisitor* visitor);
-
  private:
-  Isolate* isolate_;
-  LookupResult* next_;
-
   // Where did we find the result;
   enum {
     NOT_FOUND,
@@ -397,7 +354,7 @@
     CONSTANT_TYPE
   } lookup_type_;
 
-  JSReceiver* holder_;
+  JSObject* holder_;
   int number_;
   bool cacheable_;
   PropertyDetails details_;
diff --git a/src/proxy.js b/src/proxy.js
index 4e86c88..4e44cd4 100644
--- a/src/proxy.js
+++ b/src/proxy.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-"use strict";
-
 global.Proxy = new $Object();
 
 var $Proxy = global.Proxy
@@ -34,10 +32,7 @@
 $Proxy.create = function(handler, proto) {
   if (!IS_SPEC_OBJECT(handler))
     throw MakeTypeError("handler_non_object", ["create"])
-  if (IS_UNDEFINED(proto))
-    proto = null
-  else if (!(IS_SPEC_OBJECT(proto) || proto === null))
-    throw MakeTypeError("proto_non_object", ["create"])
+  if (!IS_SPEC_OBJECT(proto)) proto = null  // Mozilla does this...
   return %CreateJSProxy(handler, proto)
 }
 
@@ -47,14 +42,8 @@
   if (!IS_SPEC_FUNCTION(callTrap))
     throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
   if (IS_UNDEFINED(constructTrap)) {
-    constructTrap = DerivedConstructTrap(callTrap)
-  } else if (IS_SPEC_FUNCTION(constructTrap)) {
-    // Make sure the trap receives 'undefined' as this.
-    var construct = constructTrap
-    constructTrap = function() {
-      return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength());
-    }
-  } else {
+    constructTrap = callTrap
+  } else if (!IS_SPEC_FUNCTION(constructTrap)) {
     throw MakeTypeError("trap_function_expected",
                         ["createFunction", "construct"])
   }
@@ -68,17 +57,6 @@
 // Builtins
 ////////////////////////////////////////////////////////////////////////////////
 
-function DerivedConstructTrap(callTrap) {
-  return function() {
-    var proto = this.prototype
-    if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
-    var obj = new $Object()
-    obj.__proto__ = proto
-    var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
-    return IS_SPEC_OBJECT(result) ? result : obj
-  }
-}
-
 function DelegateCallAndConstruct(callTrap, constructTrap) {
   return function() {
     return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
@@ -158,32 +136,9 @@
   var enumerableNames = []
   for (var i = 0, count = 0; i < names.length; ++i) {
     var name = names[i]
-    var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name))
-    if (!IS_UNDEFINED(desc) && desc.enumerable) {
+    if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) {
       enumerableNames[count++] = names[i]
     }
   }
   return enumerableNames
 }
-
-function DerivedEnumerateTrap() {
-  var names = this.getPropertyNames()
-  var enumerableNames = []
-  for (var i = 0, count = 0; i < names.length; ++i) {
-    var name = names[i]
-    var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
-    if (!IS_UNDEFINED(desc) && desc.enumerable) {
-      enumerableNames[count++] = names[i]
-    }
-  }
-  return enumerableNames
-}
-
-function ProxyEnumerate(proxy) {
-  var handler = %GetHandler(proxy)
-  if (IS_UNDEFINED(handler.enumerate)) {
-    return %Apply(DerivedEnumerateTrap, handler, [], 0, 0)
-  } else {
-    return ToStringArray(handler.enumerate(), "enumerate")
-  }
-}
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index f843278..b32d71d 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -37,8 +37,8 @@
     RegExpMacroAssembler* assembler) :
   assembler_(assembler) {
   unsigned int type = assembler->Implementation();
-  ASSERT(type < 5);
-  const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
+  ASSERT(type < 4);
+  const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
   PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
 }
 
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index b6fb3c5..f91ea93 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -81,7 +81,7 @@
   if (subject->IsAsciiRepresentation()) {
     const byte* address;
     if (StringShape(subject).IsExternal()) {
-      const char* data = ExternalAsciiString::cast(subject)->GetChars();
+      const char* data = ExternalAsciiString::cast(subject)->resource()->data();
       address = reinterpret_cast<const byte*>(data);
     } else {
       ASSERT(subject->IsSeqAsciiString());
@@ -92,7 +92,7 @@
   }
   const uc16* data;
   if (StringShape(subject).IsExternal()) {
-    data = ExternalTwoByteString::cast(subject)->GetChars();
+    data = ExternalTwoByteString::cast(subject)->resource()->data();
   } else {
     ASSERT(subject->IsSeqTwoByteString());
     data = SeqTwoByteString::cast(subject)->GetChars();
@@ -133,7 +133,7 @@
     subject_ptr = slice->parent();
     slice_offset = slice->offset();
   }
-  // Ensure that an underlying string has the same ASCII-ness.
+  // Ensure that an underlying string has the same ascii-ness.
   bool is_ascii = subject_ptr->IsAsciiRepresentation();
   ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
   // String is now either Sequential or External
diff --git a/src/regexp.js b/src/regexp.js
index bc9508d..38d4496 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,7 +28,7 @@
 // Expect $Object = global.Object;
 // Expect $Array = global.Array;
 
-var $RegExp = global.RegExp;
+const $RegExp = global.RegExp;
 
 // A recursive descent parser for Patterns according to the grammar of
 // ECMA-262 15.10.1, with deviations noted below.
@@ -95,11 +95,12 @@
   }
 }
 
+
 // Deprecated RegExp.prototype.compile method.  We behave like the constructor
 // were called again.  In SpiderMonkey, this method returns the regexp object.
 // In JSC, it returns undefined.  For compatibility with JSC, we match their
 // behavior.
-function RegExpCompile(pattern, flags) {
+function CompileRegExp(pattern, flags) {
   // Both JSC and SpiderMonkey treat a missing pattern argument as the
   // empty subject string, and an actual undefined value passed as the
   // pattern as the string 'undefined'.  Note that JSC is inconsistent
@@ -107,11 +108,6 @@
   // RegExp.prototype.compile and in the constructor, where they are
   // the empty string.  For compatibility with JSC, we match their
   // behavior.
-  if (this == $RegExp.prototype) {
-    // We don't allow recompiling RegExp.prototype.
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['RegExp.prototype.compile', this]);
-  }
   if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
     DoConstructRegExp(this, 'undefined', flags);
   } else {
@@ -174,6 +170,13 @@
                         ['RegExp.prototype.exec', this]);
   }
 
+  if (%_ArgumentsLength() === 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    if (IS_UNDEFINED(regExpInput)) {
+      throw MakeError('no_input_to_regexp', [this]);
+    }
+    string = regExpInput;
+  }
   string = TO_STRING_INLINE(string);
   var lastIndex = this.lastIndex;
 
@@ -222,6 +225,14 @@
     throw MakeTypeError('incompatible_method_receiver',
                         ['RegExp.prototype.test', this]);
   }
+  if (%_ArgumentsLength() == 0) {
+    var regExpInput = LAST_INPUT(lastMatchInfo);
+    if (IS_UNDEFINED(regExpInput)) {
+      throw MakeError('no_input_to_regexp', [this]);
+    }
+    string = regExpInput;
+  }
+
   string = TO_STRING_INLINE(string);
 
   var lastIndex = this.lastIndex;
@@ -250,32 +261,29 @@
     // Remove irrelevant preceeding '.*' in a non-global test regexp.
     // The expression checks whether this.source starts with '.*' and
     // that the third char is not a '?'.
-    var regexp = this;
-    if (%_StringCharCodeAt(regexp.source, 0) == 46 &&  // '.'
-        %_StringCharCodeAt(regexp.source, 1) == 42 &&  // '*'
-        %_StringCharCodeAt(regexp.source, 2) != 63) {  // '?'
-      regexp = TrimRegExp(regexp);
+    if (%_StringCharCodeAt(this.source, 0) == 46 &&  // '.'
+        %_StringCharCodeAt(this.source, 1) == 42 &&  // '*'
+        %_StringCharCodeAt(this.source, 2) != 63) {  // '?'
+      if (!%_ObjectEquals(regexp_key, this)) {
+        regexp_key = this;
+        regexp_val = new $RegExp(SubString(this.source, 2, this.source.length),
+                                 (!this.ignoreCase
+                                  ? !this.multiline ? "" : "m"
+                                  : !this.multiline ? "i" : "im"));
+      }
+      if (%_RegExpExec(regexp_val, string, 0, lastMatchInfo) === null) {
+        return false;
+      }
     }
-    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
+    %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]);
     // matchIndices is either null or the lastMatchInfo array.
-    var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
+    var matchIndices = %_RegExpExec(this, string, 0, lastMatchInfo);
     if (matchIndices === null) return false;
     lastMatchInfoOverride = null;
     return true;
   }
 }
 
-function TrimRegExp(regexp) {
-  if (!%_ObjectEquals(regexp_key, regexp)) {
-    regexp_key = regexp;
-    regexp_val =
-      new $RegExp(SubString(regexp.source, 2, regexp.source.length),
-                  (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
-                                     : regexp.multiline ? "m" : ""));
-  }
-  return regexp_val;
-}
-
 
 function RegExpToString() {
   // If this.source is an empty string, output /(?:)/.
@@ -400,6 +408,7 @@
 function SetUpRegExp() {
   %CheckIsBootstrapping();
   %FunctionSetInstanceClassName($RegExp, 'RegExp');
+  %FunctionSetPrototype($RegExp, new $Object());
   %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
   %SetCode($RegExp, RegExpConstructor);
 
@@ -407,7 +416,7 @@
     "exec", RegExpExec,
     "test", RegExpTest,
     "toString", RegExpToString,
-    "compile", RegExpCompile
+    "compile", CompileRegExp
   ));
 
   // The length of compile is 1 in SpiderMonkey.
@@ -416,67 +425,63 @@
   // The properties input, $input, and $_ are aliases for each other.  When this
   // value is set the value it is set to is coerced to a string.
   // Getter and setter for the input.
-  var RegExpGetInput = function() {
+  function RegExpGetInput() {
     var regExpInput = LAST_INPUT(lastMatchInfo);
     return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
-  };
-  var RegExpSetInput = function(string) {
+  }
+  function RegExpSetInput(string) {
     LAST_INPUT(lastMatchInfo) = ToString(string);
   };
 
-  %DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput,
-                                    RegExpSetInput, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput,
-                                    RegExpSetInput, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$input', RegExpGetInput,
-                                    RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE);
+  %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE);
 
   // The properties multiline and $* are aliases for each other.  When this
   // value is set in SpiderMonkey, the value it is set to is coerced to a
   // boolean.  We mimic that behavior with a slight difference: in SpiderMonkey
   // the value of the expression 'RegExp.multiline = null' (for instance) is the
-  // boolean false (i.e., the value after coercion), while in V8 it is the value
-  // null (i.e., the value before coercion).
+  // boolean false (ie, the value after coercion), while in V8 it is the value
+  // null (ie, the value before coercion).
 
   // Getter and setter for multiline.
   var multiline = false;
-  var RegExpGetMultiline = function() { return multiline; };
-  var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
+  function RegExpGetMultiline() { return multiline; };
+  function RegExpSetMultiline(flag) { multiline = flag ? true : false; };
 
-  %DefineOrRedefineAccessorProperty($RegExp, 'multiline', RegExpGetMultiline,
-                                    RegExpSetMultiline, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$*', RegExpGetMultiline,
-                                    RegExpSetMultiline,
-                                    DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE);
+  %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE);
 
 
-  var NoOpSetter = function(ignored) {};
+  function NoOpSetter(ignored) {}
 
 
   // Static properties set by a successful match.
-  %DefineOrRedefineAccessorProperty($RegExp, 'lastMatch', RegExpGetLastMatch,
-                                    NoOpSetter, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$&', RegExpGetLastMatch,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, 'lastParen', RegExpGetLastParen,
-                                    NoOpSetter, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$+', RegExpGetLastParen,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, 'leftContext',
-                                    RegExpGetLeftContext, NoOpSetter,
-                                    DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$`', RegExpGetLeftContext,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, 'rightContext',
-                                    RegExpGetRightContext, NoOpSetter,
-                                    DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, "$'", RegExpGetRightContext,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE);
+  %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE);
+  %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE);
+  %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE);
+  %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE);
 
   for (var i = 1; i < 10; ++i) {
-    %DefineOrRedefineAccessorProperty($RegExp, '$' + i,
-                                      RegExpMakeCaptureGetter(i), NoOpSetter,
-                                      DONT_DELETE);
+    %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE);
+    %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE);
   }
 }
 
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 55f93ee..3d4c2dc 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -42,18 +42,12 @@
       : result_(result),
         result_assigned_(false),
         is_set_(false),
-        in_try_(false),
-        factory_(isolate()) { }
-
-  virtual ~Processor() { }
+        in_try_(false) {
+  }
 
   void Process(ZoneList<Statement*>* statements);
   bool result_assigned() const { return result_assigned_; }
 
-  AstNodeFactory<AstNullVisitor>* factory() {
-    return &factory_;
-  }
-
  private:
   Variable* result_;
 
@@ -70,13 +64,15 @@
   bool is_set_;
   bool in_try_;
 
-  AstNodeFactory<AstNullVisitor> factory_;
-
   Expression* SetResult(Expression* value) {
     result_assigned_ = true;
-    VariableProxy* result_proxy = factory()->NewVariableProxy(result_);
-    return factory()->NewAssignment(
-        Token::ASSIGN, result_proxy, value, RelocInfo::kNoPosition);
+    Zone* zone = isolate()->zone();
+    VariableProxy* result_proxy = new(zone) VariableProxy(isolate(), result_);
+    return new(zone) Assignment(isolate(),
+                                Token::ASSIGN,
+                                result_proxy,
+                                value,
+                                RelocInfo::kNoPosition);
   }
 
   // Node visitors.
@@ -209,15 +205,7 @@
 
 
 // Do nothing:
-void Processor::VisitVariableDeclaration(VariableDeclaration* node) {}
-void Processor::VisitFunctionDeclaration(FunctionDeclaration* node) {}
-void Processor::VisitModuleDeclaration(ModuleDeclaration* node) {}
-void Processor::VisitImportDeclaration(ImportDeclaration* node) {}
-void Processor::VisitExportDeclaration(ExportDeclaration* node) {}
-void Processor::VisitModuleLiteral(ModuleLiteral* node) {}
-void Processor::VisitModuleVariable(ModuleVariable* node) {}
-void Processor::VisitModulePath(ModulePath* node) {}
-void Processor::VisitModuleUrl(ModuleUrl* node) {}
+void Processor::VisitDeclaration(Declaration* node) {}
 void Processor::VisitEmptyStatement(EmptyStatement* node) {}
 void Processor::VisitReturnStatement(ReturnStatement* node) {}
 void Processor::VisitDebuggerStatement(DebuggerStatement* node) {}
@@ -248,21 +236,10 @@
     if (processor.HasStackOverflow()) return false;
 
     if (processor.result_assigned()) {
-      ASSERT(function->end_position() != RelocInfo::kNoPosition);
-      // Set the position of the assignment statement one character past the
-      // source code, such that it definitely is not in the source code range
-      // of an immediate inner scope. For example in
-      //   eval('with ({x:1}) x = 1');
-      // the end position of the function generated for executing the eval code
-      // coincides with the end of the with scope which is the position of '1'.
-      int position = function->end_position();
-      VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
-          result->name(), false, position);
-      result_proxy->BindTo(result);
-      Statement* result_statement =
-          processor.factory()->NewReturnStatement(result_proxy);
-      result_statement->set_statement_pos(position);
-      body->Add(result_statement);
+      Isolate* isolate = info->isolate();
+      Zone* zone = isolate->zone();
+      VariableProxy* result_proxy = new(zone) VariableProxy(isolate, result);
+      body->Add(new(zone) ReturnStatement(result_proxy));
     }
   }
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 6ed4ff4..26d8846 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,7 +35,6 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
-#include "isolate-inl.h"
 #include "mark-compact.h"
 #include "platform.h"
 #include "scopeinfo.h"
@@ -46,8 +45,6 @@
 
 // Optimization sampler constants.
 static const int kSamplerFrameCount = 2;
-
-// Constants for statistical profiler.
 static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
 
 static const int kSamplerTicksBetweenThresholdAdjustment = 32;
@@ -60,24 +57,14 @@
 
 static const int kSizeLimit = 1500;
 
-// Constants for counter based profiler.
-
-// Number of times a function has to be seen on the stack before it is
-// optimized.
-static const int kProfilerTicksBeforeOptimization = 2;
-
-// Maximum size in bytes of generated code for a function to be optimized
-// the very first time it is seen on the stack.
-static const int kMaxSizeEarlyOpt = 500;
-
 
 Atomic32 RuntimeProfiler::state_ = 0;
-
-// TODO(isolates): Clean up the semaphore when it is no longer required.
-static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
+// TODO(isolates): Create the semaphore lazily and clean it up when no
+// longer required.
+Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0);
 
 #ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_set_up_ = false;
+bool RuntimeProfiler::has_been_globally_setup_ = false;
 #endif
 bool RuntimeProfiler::enabled_ = false;
 
@@ -94,45 +81,21 @@
 
 
 void RuntimeProfiler::GlobalSetup() {
-  ASSERT(!has_been_globally_set_up_);
+  ASSERT(!has_been_globally_setup_);
   enabled_ = V8::UseCrankshaft() && FLAG_opt;
 #ifdef DEBUG
-  has_been_globally_set_up_ = true;
+  has_been_globally_setup_ = true;
 #endif
 }
 
 
-static void GetICCounts(JSFunction* function,
-                        int* ic_with_typeinfo_count,
-                        int* ic_total_count,
-                        int* percentage) {
-  *ic_total_count = 0;
-  *ic_with_typeinfo_count = 0;
-  Object* raw_info =
-      function->shared()->code()->type_feedback_info();
-  if (raw_info->IsTypeFeedbackInfo()) {
-    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
-    *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
-    *ic_total_count = info->ic_total_count();
-  }
-  *percentage = *ic_total_count > 0
-      ? 100 * *ic_with_typeinfo_count / *ic_total_count
-      : 100;
-}
-
-
-void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
+void RuntimeProfiler::Optimize(JSFunction* function) {
   ASSERT(function->IsOptimizable());
   if (FLAG_trace_opt) {
     PrintF("[marking ");
     function->PrintName();
     PrintF(" 0x%" V8PRIxPTR, reinterpret_cast<intptr_t>(function->address()));
-    PrintF(" for recompilation, reason: %s", reason);
-    if (FLAG_type_info_threshold > 0) {
-      int typeinfo, total, percentage;
-      GetICCounts(function, &typeinfo, &total, &percentage);
-      PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
-    }
+    PrintF(" for recompilation");
     PrintF("]\n");
   }
 
@@ -171,26 +134,15 @@
 
   // Get the stack check stub code object to match against.  We aren't
   // prepared to generate it, but we don't expect to have to.
-  bool found_code = false;
-  Code* stack_check_code = NULL;
-#if defined(V8_TARGET_ARCH_IA32) || \
-    defined(V8_TARGET_ARCH_ARM) || \
-    defined(V8_TARGET_ARCH_MIPS)
-  if (FLAG_count_based_interrupts) {
-    InterruptStub interrupt_stub;
-    found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
-  } else  // NOLINT
-#endif
-  {  // NOLINT
-    StackCheckStub check_stub;
-    found_code = check_stub.FindCodeInCache(&stack_check_code);
-  }
-  if (found_code) {
+  StackCheckStub check_stub;
+  Object* check_code;
+  MaybeObject* maybe_check_code = check_stub.TryGetCode();
+  if (maybe_check_code->ToObject(&check_code)) {
     Code* replacement_code =
         isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
     Code* unoptimized_code = shared->code();
     Deoptimizer::PatchStackCheckCode(unoptimized_code,
-                                     stack_check_code,
+                                     Code::cast(check_code),
                                      replacement_code);
   }
 }
@@ -234,33 +186,28 @@
   JSFunction* samples[kSamplerFrameCount];
   int sample_count = 0;
   int frame_count = 0;
-  int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
-                                                 : kSamplerFrameCount;
   for (JavaScriptFrameIterator it(isolate_);
-       frame_count++ < frame_count_limit && !it.done();
+       frame_count++ < kSamplerFrameCount && !it.done();
        it.Advance()) {
     JavaScriptFrame* frame = it.frame();
     JSFunction* function = JSFunction::cast(frame->function());
 
-    if (!FLAG_watch_ic_patching) {
-      // Adjust threshold each time we have processed
-      // a certain number of ticks.
-      if (sampler_ticks_until_threshold_adjustment_ > 0) {
-        sampler_ticks_until_threshold_adjustment_--;
-        if (sampler_ticks_until_threshold_adjustment_ <= 0) {
-          // If the threshold is not already at the minimum
-          // modify and reset the ticks until next adjustment.
-          if (sampler_threshold_ > kSamplerThresholdMin) {
-            sampler_threshold_ -= kSamplerThresholdDelta;
-            sampler_ticks_until_threshold_adjustment_ =
-                kSamplerTicksBetweenThresholdAdjustment;
-          }
+    // Adjust threshold each time we have processed
+    // a certain number of ticks.
+    if (sampler_ticks_until_threshold_adjustment_ > 0) {
+      sampler_ticks_until_threshold_adjustment_--;
+      if (sampler_ticks_until_threshold_adjustment_ <= 0) {
+        // If the threshold is not already at the minimum
+        // modify and reset the ticks until next adjustment.
+        if (sampler_threshold_ > kSamplerThresholdMin) {
+          sampler_threshold_ -= kSamplerThresholdDelta;
+          sampler_ticks_until_threshold_adjustment_ =
+              kSamplerTicksBetweenThresholdAdjustment;
         }
       }
     }
 
-    if (function->IsMarkedForLazyRecompilation() &&
-        function->shared()->code()->kind() == Code::FUNCTION) {
+    if (function->IsMarkedForLazyRecompilation()) {
       Code* unoptimized = function->shared()->code();
       int nesting = unoptimized->allow_osr_at_loop_nesting_level();
       if (nesting == 0) AttemptOnStackReplacement(function);
@@ -270,101 +217,37 @@
 
     // Do not record non-optimizable functions.
     if (!function->IsOptimizable()) continue;
-    if (function->shared()->optimization_disabled()) continue;
+    samples[sample_count++] = function;
 
-    // Only record top-level code on top of the execution stack and
-    // avoid optimizing excessively large scripts since top-level code
-    // will be executed only once.
-    const int kMaxToplevelSourceSize = 10 * 1024;
-    if (function->shared()->is_toplevel()
-        && (frame_count > 1
-            || function->shared()->SourceSize() > kMaxToplevelSourceSize)) {
-      continue;
-    }
+    int function_size = function->shared()->SourceSize();
+    int threshold_size_factor = (function_size > kSizeLimit)
+        ? sampler_threshold_size_factor_
+        : 1;
 
-    if (FLAG_watch_ic_patching) {
-      int ticks = function->shared()->profiler_ticks();
+    int threshold = sampler_threshold_ * threshold_size_factor;
 
-      if (ticks >= kProfilerTicksBeforeOptimization) {
-        int typeinfo, total, percentage;
-        GetICCounts(function, &typeinfo, &total, &percentage);
-        if (percentage >= FLAG_type_info_threshold) {
-          // If this particular function hasn't had any ICs patched for enough
-          // ticks, optimize it now.
-          Optimize(function, "hot and stable");
-        } else if (ticks >= 100) {
-          // If this function does not have enough type info, but has
-          // seen a huge number of ticks, optimize it as it is.
-          Optimize(function, "not much type info but very hot");
-        } else {
-          function->shared()->set_profiler_ticks(ticks + 1);
-          if (FLAG_trace_opt_verbose) {
-            PrintF("[not yet optimizing ");
-            function->PrintName();
-            PrintF(", not enough type info: %d/%d (%d%%)]\n",
-                   typeinfo, total, percentage);
-          }
-        }
-      } else if (!any_ic_changed_ &&
-          function->shared()->code()->instruction_size() < kMaxSizeEarlyOpt) {
-        // If no IC was patched since the last tick and this function is very
-        // small, optimistically optimize it now.
-        Optimize(function, "small function");
-      } else if (!code_generated_ &&
-          !any_ic_changed_ &&
-          total_code_generated_ > 0 &&
-          total_code_generated_ < 2000) {
-        // If no code was generated and no IC was patched since the last tick,
-        // but a little code has already been generated since last Reset(),
-        // then type info might already be stable and we can optimize now.
-        Optimize(function, "stable on startup");
-      } else {
-        function->shared()->set_profiler_ticks(ticks + 1);
-      }
-    } else {  // !FLAG_watch_ic_patching
-      samples[sample_count++] = function;
-
-      int function_size = function->shared()->SourceSize();
-      int threshold_size_factor = (function_size > kSizeLimit)
-          ? sampler_threshold_size_factor_
-          : 1;
-
-      int threshold = sampler_threshold_ * threshold_size_factor;
-
-      if (LookupSample(function) >= threshold) {
-        Optimize(function, "sampler window lookup");
-      }
+    if (LookupSample(function) >= threshold) {
+      Optimize(function);
     }
   }
-  if (FLAG_watch_ic_patching) {
-    any_ic_changed_ = false;
-    code_generated_ = false;
-  } else {  // !FLAG_watch_ic_patching
-    // Add the collected functions as samples. It's important not to do
-    // this as part of collecting them because this will interfere with
-    // the sample lookup in case of recursive functions.
-    for (int i = 0; i < sample_count; i++) {
-      AddSample(samples[i], kSamplerFrameWeight[i]);
-    }
+
+  // Add the collected functions as samples. It's important not to do
+  // this as part of collecting them because this will interfere with
+  // the sample lookup in case of recursive functions.
+  for (int i = 0; i < sample_count; i++) {
+    AddSample(samples[i], kSamplerFrameWeight[i]);
   }
 }
 
 
 void RuntimeProfiler::NotifyTick() {
-#if defined(V8_TARGET_ARCH_IA32) || \
-    defined(V8_TARGET_ARCH_ARM) || \
-    defined(V8_TARGET_ARCH_MIPS)
-  if (FLAG_count_based_interrupts) return;
-#endif
   isolate_->stack_guard()->RequestRuntimeProfilerTick();
 }
 
 
-void RuntimeProfiler::SetUp() {
-  ASSERT(has_been_globally_set_up_);
-  if (!FLAG_watch_ic_patching) {
-    ClearSampleBuffer();
-  }
+void RuntimeProfiler::Setup() {
+  ASSERT(has_been_globally_setup_);
+  ClearSampleBuffer();
   // If the ticker hasn't already started, make sure to do so to get
   // the ticks for the runtime profiler.
   if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
@@ -372,14 +255,10 @@
 
 
 void RuntimeProfiler::Reset() {
-  if (FLAG_watch_ic_patching) {
-    total_code_generated_ = 0;
-  } else {  // !FLAG_watch_ic_patching
-    sampler_threshold_ = kSamplerThresholdInit;
-    sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
-    sampler_ticks_until_threshold_adjustment_ =
-        kSamplerTicksBetweenThresholdAdjustment;
-  }
+  sampler_threshold_ = kSamplerThresholdInit;
+  sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
+  sampler_ticks_until_threshold_adjustment_ =
+      kSamplerTicksBetweenThresholdAdjustment;
 }
 
 
@@ -416,7 +295,7 @@
   // undid the decrement done by the profiler thread. Increment again
   // to get the right count of active isolates.
   NoBarrier_AtomicIncrement(&state_, 1);
-  semaphore.Pointer()->Signal();
+  semaphore_->Signal();
 }
 
 
@@ -429,7 +308,7 @@
   Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
   ASSERT(old_state >= -1);
   if (old_state != 0) return false;
-  semaphore.Pointer()->Wait();
+  semaphore_->Wait();
   return true;
 }
 
@@ -445,7 +324,7 @@
   if (new_state == 0) {
     // The profiler thread is waiting. Wake it up. It must check for
     // stop conditions before attempting to wait again.
-    semaphore.Pointer()->Signal();
+    semaphore_->Signal();
   }
   thread->Join();
   // The profiler thread is now stopped. Undo the increment in case it
@@ -459,8 +338,7 @@
 void RuntimeProfiler::RemoveDeadSamples() {
   for (int i = 0; i < kSamplerWindowSize; i++) {
     Object* function = sampler_window_[i];
-    if (function != NULL &&
-        !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
+    if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
       sampler_window_[i] = NULL;
     }
   }
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index e338849..15c2097 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -46,7 +46,7 @@
   static void GlobalSetup();
 
   static inline bool IsEnabled() {
-    ASSERT(has_been_globally_set_up_);
+    ASSERT(has_been_globally_setup_);
     return enabled_;
   }
 
@@ -54,22 +54,13 @@
 
   void NotifyTick();
 
-  void SetUp();
+  void Setup();
   void Reset();
   void TearDown();
 
   Object** SamplerWindowAddress();
   int SamplerWindowSize();
 
-  void NotifyICChanged() { any_ic_changed_ = true; }
-
-  void NotifyCodeGenerated(int generated_code_size) {
-    if (FLAG_watch_ic_patching) {
-      code_generated_ = true;
-      total_code_generated_ += generated_code_size;
-    }
-  }
-
   // Rate limiting support.
 
   // VM thread interface.
@@ -101,14 +92,14 @@
   void RemoveDeadSamples();
   void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
 
-  void AttemptOnStackReplacement(JSFunction* function);
-
  private:
   static const int kSamplerWindowSize = 16;
 
   static void HandleWakeUp(Isolate* isolate);
 
-  void Optimize(JSFunction* function, const char* reason);
+  void Optimize(JSFunction* function);
+
+  void AttemptOnStackReplacement(JSFunction* function);
 
   void ClearSampleBuffer();
 
@@ -128,17 +119,14 @@
   int sampler_window_position_;
   int sampler_window_weight_[kSamplerWindowSize];
 
-  bool any_ic_changed_;
-  bool code_generated_;
-  int total_code_generated_;
-
   // Possible state values:
   //   -1            => the profiler thread is waiting on the semaphore
   //   0 or positive => the number of isolates running JavaScript code.
   static Atomic32 state_;
+  static Semaphore* semaphore_;
 
 #ifdef DEBUG
-  static bool has_been_globally_set_up_;
+  static bool has_been_globally_setup_;
 #endif
   static bool enabled_;
 };
diff --git a/src/runtime.cc b/src/runtime.cc
index 320ab59..b1c4c10 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -40,10 +40,8 @@
 #include "dateparser-inl.h"
 #include "debug.h"
 #include "deoptimizer.h"
-#include "date.h"
 #include "execution.h"
 #include "global-handles.h"
-#include "isolate-inl.h"
 #include "jsregexp.h"
 #include "json-parser.h"
 #include "liveedit.h"
@@ -70,20 +68,20 @@
 // Cast the given object to a value of the specified type and store
 // it in a variable with the given name.  If the object is not of the
 // expected type call IllegalOperation and return.
-#define CONVERT_ARG_CHECKED(Type, name, index)                       \
-  RUNTIME_ASSERT(args[index]->Is##Type());                           \
-  Type* name = Type::cast(args[index]);
+#define CONVERT_CHECKED(Type, name, obj)                             \
+  RUNTIME_ASSERT(obj->Is##Type());                                   \
+  Type* name = Type::cast(obj);
 
-#define CONVERT_ARG_HANDLE_CHECKED(Type, name, index)                \
+#define CONVERT_ARG_CHECKED(Type, name, index)                       \
   RUNTIME_ASSERT(args[index]->Is##Type());                           \
   Handle<Type> name = args.at<Type>(index);
 
 // Cast the given object to a boolean and store it in a variable with
 // the given name.  If the object is not a boolean call IllegalOperation
 // and return.
-#define CONVERT_BOOLEAN_ARG_CHECKED(name, index)                     \
-  RUNTIME_ASSERT(args[index]->IsBoolean());                          \
-  bool name = args[index]->IsTrue();
+#define CONVERT_BOOLEAN_CHECKED(name, obj)                            \
+  RUNTIME_ASSERT(obj->IsBoolean());                                   \
+  bool name = (obj)->IsTrue();
 
 // Cast the given argument to a Smi and store its value in an int variable
 // with the given name.  If the argument is not a Smi call IllegalOperation
@@ -107,35 +105,6 @@
   type name = NumberTo##Type(obj);
 
 
-// Cast the given argument to PropertyDetails and store its value in a
-// variable with the given name.  If the argument is not a Smi call
-// IllegalOperation and return.
-#define CONVERT_PROPERTY_DETAILS_CHECKED(name, index)                \
-  RUNTIME_ASSERT(args[index]->IsSmi());                              \
-  PropertyDetails name = PropertyDetails(Smi::cast(args[index]));
-
-
-// Assert that the given argument has a valid value for a StrictModeFlag
-// and store it in a StrictModeFlag variable with the given name.
-#define CONVERT_STRICT_MODE_ARG_CHECKED(name, index)                 \
-  RUNTIME_ASSERT(args[index]->IsSmi());                              \
-  RUNTIME_ASSERT(args.smi_at(index) == kStrictMode ||                \
-                 args.smi_at(index) == kNonStrictMode);              \
-  StrictModeFlag name =                                              \
-      static_cast<StrictModeFlag>(args.smi_at(index));
-
-
-// Assert that the given argument has a valid value for a LanguageMode
-// and store it in a LanguageMode variable with the given name.
-#define CONVERT_LANGUAGE_MODE_ARG(name, index)                       \
-  ASSERT(args[index]->IsSmi());                                      \
-  ASSERT(args.smi_at(index) == CLASSIC_MODE ||                       \
-         args.smi_at(index) == STRICT_MODE ||                        \
-         args.smi_at(index) == EXTENDED_MODE);                       \
-  LanguageMode name =                                                \
-      static_cast<LanguageMode>(args.smi_at(index));
-
-
 MUST_USE_RESULT static MaybeObject* DeepCopyBoilerplate(Isolate* isolate,
                                                    JSObject* boilerplate) {
   StackLimitCheck check(isolate);
@@ -174,7 +143,7 @@
     }
   } else {
     { MaybeObject* maybe_result =
-          heap->AllocateFixedArray(copy->NumberOfLocalProperties());
+          heap->AllocateFixedArray(copy->NumberOfLocalProperties(NONE));
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     FixedArray* names = FixedArray::cast(result);
@@ -208,7 +177,6 @@
   // Pixel elements cannot be created using an object literal.
   ASSERT(!copy->HasExternalArrayElements());
   switch (copy->GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(copy->elements());
       if (elements->map() == heap->fixed_cow_array_map()) {
@@ -221,9 +189,6 @@
       } else {
         for (int i = 0; i < elements->length(); i++) {
           Object* value = elements->get(i);
-          ASSERT(value->IsSmi() ||
-                 value->IsTheHole() ||
-                 (copy->GetElementsKind() == FAST_ELEMENTS));
           if (value->IsJSObject()) {
             JSObject* js_object = JSObject::cast(value);
             { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -275,6 +240,18 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneLiteralBoilerplate) {
+  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
+  return DeepCopyBoilerplate(isolate, boilerplate);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CloneShallowLiteralBoilerplate) {
+  CONVERT_CHECKED(JSObject, boilerplate, args[0]);
+  return isolate->heap()->CopyJSObject(boilerplate);
+}
+
+
 static Handle<Map> ComputeObjectLiteralMap(
     Handle<Context> context,
     Handle<FixedArray> constant_properties,
@@ -282,43 +259,45 @@
   Isolate* isolate = context->GetIsolate();
   int properties_length = constant_properties->length();
   int number_of_properties = properties_length / 2;
-  // Check that there are only symbols and array indices among keys.
-  int number_of_symbol_keys = 0;
-  for (int p = 0; p != properties_length; p += 2) {
-    Object* key = constant_properties->get(p);
-    uint32_t element_index = 0;
-    if (key->IsSymbol()) {
-      number_of_symbol_keys++;
-    } else if (key->ToArrayIndex(&element_index)) {
-      // An index key does not require space in the property backing store.
-      number_of_properties--;
-    } else {
-      // Bail out as a non-symbol non-index key makes caching impossible.
-      // ASSERT to make sure that the if condition after the loop is false.
-      ASSERT(number_of_symbol_keys != number_of_properties);
-      break;
-    }
-  }
-  // If we only have symbols and array indices among keys then we can
-  // use the map cache in the global context.
-  const int kMaxKeys = 10;
-  if ((number_of_symbol_keys == number_of_properties) &&
-      (number_of_symbol_keys < kMaxKeys)) {
-    // Create the fixed array with the key.
-    Handle<FixedArray> keys =
-        isolate->factory()->NewFixedArray(number_of_symbol_keys);
-    if (number_of_symbol_keys > 0) {
-      int index = 0;
-      for (int p = 0; p < properties_length; p += 2) {
-        Object* key = constant_properties->get(p);
-        if (key->IsSymbol()) {
-          keys->set(index++, key);
-        }
+  if (FLAG_canonicalize_object_literal_maps) {
+    // Check that there are only symbols and array indices among keys.
+    int number_of_symbol_keys = 0;
+    for (int p = 0; p != properties_length; p += 2) {
+      Object* key = constant_properties->get(p);
+      uint32_t element_index = 0;
+      if (key->IsSymbol()) {
+        number_of_symbol_keys++;
+      } else if (key->ToArrayIndex(&element_index)) {
+        // An index key does not require space in the property backing store.
+        number_of_properties--;
+      } else {
+        // Bail out as a non-symbol non-index key makes caching impossible.
+        // ASSERT to make sure that the if condition after the loop is false.
+        ASSERT(number_of_symbol_keys != number_of_properties);
+        break;
       }
-      ASSERT(index == number_of_symbol_keys);
     }
-    *is_result_from_cache = true;
-    return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
+    // If we only have symbols and array indices among keys then we can
+    // use the map cache in the global context.
+    const int kMaxKeys = 10;
+    if ((number_of_symbol_keys == number_of_properties) &&
+        (number_of_symbol_keys < kMaxKeys)) {
+      // Create the fixed array with the key.
+      Handle<FixedArray> keys =
+          isolate->factory()->NewFixedArray(number_of_symbol_keys);
+      if (number_of_symbol_keys > 0) {
+        int index = 0;
+        for (int p = 0; p < properties_length; p += 2) {
+          Object* key = constant_properties->get(p);
+          if (key->IsSymbol()) {
+            keys->set(index++, key);
+          }
+        }
+        ASSERT(index == number_of_symbol_keys);
+      }
+      *is_result_from_cache = true;
+      return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
+    }
   }
   *is_result_from_cache = false;
   return isolate->factory()->CopyMap(
@@ -362,7 +341,7 @@
   Handle<JSObject> boilerplate = isolate->factory()->NewJSObjectFromMap(map);
 
   // Normalize the elements of the boilerplate to save space if needed.
-  if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
+  if (!should_have_fast_elements) NormalizeElements(boilerplate);
 
   // Add the constant properties to the boilerplate.
   int length = constant_properties->length();
@@ -372,8 +351,7 @@
     // Normalize the properties of object to avoid n^2 behavior
     // when extending the object multiple properties. Indicate the number of
     // properties to be added.
-    JSObject::NormalizeProperties(
-        boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
+    NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
   }
 
   for (int index = 0; index < length; index +=2) {
@@ -391,18 +369,22 @@
     if (key->IsSymbol()) {
       if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
         // Array index as string (uint32).
-        result = JSObject::SetOwnElement(
-            boilerplate, element_index, value, kNonStrictMode);
+        result = SetOwnElement(boilerplate,
+                               element_index,
+                               value,
+                               kNonStrictMode);
       } else {
         Handle<String> name(String::cast(*key));
         ASSERT(!name->AsArrayIndex(&element_index));
-        result = JSObject::SetLocalPropertyIgnoreAttributes(
-            boilerplate, name, value, NONE);
+        result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
+                                                  value, NONE);
       }
     } else if (key->ToArrayIndex(&element_index)) {
       // Array index (uint32).
-      result = JSObject::SetOwnElement(
-          boilerplate, element_index, value, kNonStrictMode);
+      result = SetOwnElement(boilerplate,
+                             element_index,
+                             value,
+                             kNonStrictMode);
     } else {
       // Non-uint32 number.
       ASSERT(key->IsNumber());
@@ -412,8 +394,8 @@
       const char* str = DoubleToCString(num, buffer);
       Handle<String> name =
           isolate->factory()->NewStringFromAscii(CStrVector(str));
-      result = JSObject::SetLocalPropertyIgnoreAttributes(
-          boilerplate, name, value, NONE);
+      result = SetLocalPropertyIgnoreAttributes(boilerplate, name,
+                                                value, NONE);
     }
     // If setting the property on the boilerplate throws an
     // exception, the exception is converted to an empty handle in
@@ -427,110 +409,52 @@
   // computed properties have been assigned so that we can generate
   // constant function properties.
   if (should_transform && !has_function_literal) {
-    JSObject::TransformToFastProperties(
-        boilerplate, boilerplate->map()->unused_property_fields());
+    TransformToFastProperties(boilerplate,
+                              boilerplate->map()->unused_property_fields());
   }
 
   return boilerplate;
 }
 
 
-MaybeObject* TransitionElements(Handle<Object> object,
-                                ElementsKind to_kind,
-                                Isolate* isolate) {
-  HandleScope scope(isolate);
-  if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
-  ElementsKind from_kind =
-      Handle<JSObject>::cast(object)->map()->elements_kind();
-  if (Map::IsValidElementsTransition(from_kind, to_kind)) {
-    Handle<Object> result = JSObject::TransitionElementsKind(
-        Handle<JSObject>::cast(object), to_kind);
-    if (result.is_null()) return isolate->ThrowIllegalOperation();
-    return *result;
-  }
-  return isolate->ThrowIllegalOperation();
-}
-
-
-static const int kSmiOnlyLiteralMinimumLength = 1024;
-
-
-Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
+static Handle<Object> CreateArrayLiteralBoilerplate(
     Isolate* isolate,
     Handle<FixedArray> literals,
     Handle<FixedArray> elements) {
   // Create the JSArray.
   Handle<JSFunction> constructor(
       JSFunction::GlobalContextFromLiterals(*literals)->array_function());
-  Handle<JSArray> object =
-      Handle<JSArray>::cast(isolate->factory()->NewJSObject(constructor));
+  Handle<Object> object = isolate->factory()->NewJSObject(constructor);
 
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(elements->get(1)));
+  const bool is_cow =
+      (elements->map() == isolate->heap()->fixed_cow_array_map());
+  Handle<FixedArray> copied_elements =
+      is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
 
-  Context* global_context = isolate->context()->global_context();
-  if (constant_elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    object->set_map(Map::cast(global_context->smi_js_array_map()));
-  } else if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
-    object->set_map(Map::cast(global_context->double_js_array_map()));
-  } else {
-    object->set_map(Map::cast(global_context->object_js_array_map()));
-  }
-
-  Handle<FixedArrayBase> copied_elements_values;
-  if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
-    ASSERT(FLAG_smi_only_arrays);
-    copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
-        Handle<FixedDoubleArray>::cast(constant_elements_values));
-  } else {
-    ASSERT(constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-           constant_elements_kind == FAST_ELEMENTS);
-    const bool is_cow =
-        (constant_elements_values->map() ==
-         isolate->heap()->fixed_cow_array_map());
-    if (is_cow) {
-      copied_elements_values = constant_elements_values;
-#if DEBUG
-      Handle<FixedArray> fixed_array_values =
-          Handle<FixedArray>::cast(copied_elements_values);
-      for (int i = 0; i < fixed_array_values->length(); i++) {
-        ASSERT(!fixed_array_values->get(i)->IsFixedArray());
-      }
+  Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
+  if (is_cow) {
+#ifdef DEBUG
+    // Copy-on-write arrays must be shallow (and simple).
+    for (int i = 0; i < content->length(); i++) {
+      ASSERT(!content->get(i)->IsFixedArray());
+    }
 #endif
-    } else {
-      Handle<FixedArray> fixed_array_values =
-          Handle<FixedArray>::cast(constant_elements_values);
-      Handle<FixedArray> fixed_array_values_copy =
-          isolate->factory()->CopyFixedArray(fixed_array_values);
-      copied_elements_values = fixed_array_values_copy;
-      for (int i = 0; i < fixed_array_values->length(); i++) {
-        Object* current = fixed_array_values->get(i);
-        if (current->IsFixedArray()) {
-          // The value contains the constant_properties of a
-          // simple object or array literal.
-          Handle<FixedArray> fa(FixedArray::cast(fixed_array_values->get(i)));
-          Handle<Object> result =
-              CreateLiteralBoilerplate(isolate, literals, fa);
-          if (result.is_null()) return result;
-          fixed_array_values_copy->set(i, *result);
-        }
+  } else {
+    for (int i = 0; i < content->length(); i++) {
+      if (content->get(i)->IsFixedArray()) {
+        // The value contains the constant_properties of a
+        // simple object or array literal.
+        Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
+        Handle<Object> result =
+            CreateLiteralBoilerplate(isolate, literals, fa);
+        if (result.is_null()) return result;
+        content->set(i, *result);
       }
     }
   }
-  object->set_elements(*copied_elements_values);
-  object->set_length(Smi::FromInt(copied_elements_values->length()));
 
-  //  Ensure that the boilerplate object has FAST_ELEMENTS, unless the flag is
-  //  on or the object is larger than the threshold.
-  if (!FLAG_smi_only_arrays &&
-      constant_elements_values->length() < kSmiOnlyLiteralMinimumLength) {
-    if (object->GetElementsKind() != FAST_ELEMENTS) {
-      CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
-    }
-  }
-
+  // Set the elements.
+  Handle<JSArray>::cast(object)->SetContent(*content);
   return object;
 }
 
@@ -555,8 +479,7 @@
                                             false,
                                             kHasNoFunctionLiteral);
     case CompileTimeValue::ARRAY_LITERAL:
-      return Runtime::CreateArrayLiteralBoilerplate(
-          isolate, literals, elements);
+      return CreateArrayLiteralBoilerplate(isolate, literals, elements);
     default:
       UNREACHABLE();
       return Handle<Object>::null();
@@ -564,12 +487,34 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralBoilerplate) {
+  // Takes a FixedArray of elements containing the literal elements of
+  // the array literal and produces JSArray with those elements.
+  // Additionally takes the literals array of the surrounding function
+  // which contains the context from which to get the Array function
+  // to use for creating the array literal.
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
+  CONVERT_SMI_ARG_CHECKED(literals_index, 1);
+  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
+
+  Handle<Object> object =
+      CreateArrayLiteralBoilerplate(isolate, literals, elements);
+  if (object.is_null()) return Failure::Exception();
+
+  // Update the functions literal and return the boilerplate.
+  literals->set(literals_index, *object);
+  return *object;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
+  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
@@ -593,9 +538,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
+  CONVERT_ARG_CHECKED(FixedArray, constant_properties, 2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0;
@@ -619,15 +564,14 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(literals->get(literals_index), isolate);
   if (*boilerplate == isolate->heap()->undefined_value()) {
-    boilerplate =
-        Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
+    boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
     if (boilerplate.is_null()) return Failure::Exception();
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
@@ -639,16 +583,14 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+  CONVERT_ARG_CHECKED(FixedArray, elements, 2);
 
   // Check if boilerplate exists. If not, create it first.
   Handle<Object> boilerplate(literals->get(literals_index), isolate);
   if (*boilerplate == isolate->heap()->undefined_value()) {
-    ASSERT(*elements != isolate->heap()->empty_fixed_array());
-    boilerplate =
-        Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements);
+    boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
     if (boilerplate.is_null()) return Failure::Exception();
     // Update the functions literal and return the boilerplate.
     literals->set(literals_index, *boilerplate);
@@ -700,113 +642,37 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+  CONVERT_CHECKED(JSProxy, proxy, args[0]);
   return proxy->handler();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
+  CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]);
   return proxy->call_trap();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
+  CONVERT_CHECKED(JSFunctionProxy, proxy, args[0]);
   return proxy->construct_trap();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
+  CONVERT_CHECKED(JSProxy, proxy, args[0]);
   proxy->Fix();
   return isolate->heap()->undefined_value();
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
-  Handle<ObjectHashSet> table = isolate->factory()->NewObjectHashSet(0);
-  holder->set_table(*table);
-  return *holder;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
-  Handle<Object> key(args[1]);
-  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
-  table = ObjectHashSetAdd(table, key);
-  holder->set_table(*table);
-  return isolate->heap()->undefined_symbol();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
-  Handle<Object> key(args[1]);
-  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
-  return isolate->heap()->ToBoolean(table->Contains(*key));
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
-  Handle<Object> key(args[1]);
-  Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
-  table = ObjectHashSetRemove(table, key);
-  holder->set_table(*table);
-  return isolate->heap()->undefined_symbol();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
-  Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
-  holder->set_table(*table);
-  return *holder;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
-  Handle<Object> key(args[1]);
-  return ObjectHashTable::cast(holder->table())->Lookup(*key);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
-  Handle<Object> key(args[1]);
-  Handle<Object> value(args[2]);
-  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
-  Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
-  holder->set_table(*new_table);
-  return *value;
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
   ASSERT(weakmap->map()->inobject_properties() == 0);
   Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
   weakmap->set_table(*table);
@@ -818,19 +684,22 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
-  return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
+  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
+  // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys
+  // because they cannot be cast to JSObject to get an identity hash code.
+  CONVERT_ARG_CHECKED(JSObject, key, 1);
+  return weakmap->table()->Lookup(*key);
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
+  CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
+  // TODO(mstarzinger): See Runtime_WeakMapGet above.
+  CONVERT_ARG_CHECKED(JSObject, key, 1);
   Handle<Object> value(args[2]);
-  Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
+  Handle<ObjectHashTable> table(weakmap->table());
   Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
   weakmap->set_table(*new_table);
   return *value;
@@ -849,7 +718,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0);
+  CONVERT_CHECKED(JSReceiver, input_obj, args[0]);
   Object* obj = input_obj;
   // We don't expect access checks to be needed on JSProxy objects.
   ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
@@ -883,6 +752,57 @@
 }
 
 
+// Inserts an object as the hidden prototype of another object.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHiddenPrototype) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSObject, jsobject, args[0]);
+  CONVERT_CHECKED(JSObject, proto, args[1]);
+
+  // Sanity checks.  The old prototype (that we are replacing) could
+  // theoretically be null, but if it is not null then check that we
+  // didn't already install a hidden prototype here.
+  RUNTIME_ASSERT(!jsobject->GetPrototype()->IsHeapObject() ||
+    !HeapObject::cast(jsobject->GetPrototype())->map()->is_hidden_prototype());
+  RUNTIME_ASSERT(!proto->map()->is_hidden_prototype());
+
+  // Allocate up front before we start altering state in case we get a GC.
+  Object* map_or_failure;
+  { MaybeObject* maybe_map_or_failure = proto->map()->CopyDropTransitions();
+    if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
+      return maybe_map_or_failure;
+    }
+  }
+  Map* new_proto_map = Map::cast(map_or_failure);
+
+  { MaybeObject* maybe_map_or_failure = jsobject->map()->CopyDropTransitions();
+    if (!maybe_map_or_failure->ToObject(&map_or_failure)) {
+      return maybe_map_or_failure;
+    }
+  }
+  Map* new_map = Map::cast(map_or_failure);
+
+  // Set proto's prototype to be the old prototype of the object.
+  new_proto_map->set_prototype(jsobject->GetPrototype());
+  proto->set_map(new_proto_map);
+  new_proto_map->set_is_hidden_prototype();
+
+  // Set the object's prototype to proto.
+  new_map->set_prototype(proto);
+  jsobject->set_map(new_map);
+
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsConstructCall) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 0);
+  JavaScriptFrameIterator it(isolate);
+  return isolate->heap()->ToBoolean(it.frame()->IsConstructor());
+}
+
+
 // Recursively traverses hidden prototypes if property is not found
 static void GetOwnPropertyImplementation(JSObject* obj,
                                          String* name,
@@ -996,14 +916,23 @@
   DESCRIPTOR_SIZE
 };
 
-
-static MaybeObject* GetOwnProperty(Isolate* isolate,
-                                   Handle<JSObject> obj,
-                                   Handle<String> name) {
+// Returns an array with the property description:
+//  if args[1] is not a property on args[0]
+//          returns undefined
+//  if args[1] is a data property on args[0]
+//         [false, value, Writeable, Enumerable, Configurable]
+//  if args[1] is an accessor on args[0]
+//         [true, GetFunction, SetFunction, Enumerable, Configurable]
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
+  ASSERT(args.length() == 2);
   Heap* heap = isolate->heap();
+  HandleScope scope(isolate);
   Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
   Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
-  LookupResult result(isolate);
+  LookupResult result;
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+
   // This could be an element.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
@@ -1023,7 +952,7 @@
         elms->set(IS_ACCESSOR_INDEX, heap->false_value());
         elms->set(VALUE_INDEX, *substr);
         elms->set(WRITABLE_INDEX, heap->false_value());
-        elms->set(ENUMERABLE_INDEX,  heap->true_value());
+        elms->set(ENUMERABLE_INDEX,  heap->false_value());
         elms->set(CONFIGURABLE_INDEX, heap->false_value());
         return *desc;
       }
@@ -1031,7 +960,7 @@
       case JSObject::INTERCEPTED_ELEMENT:
       case JSObject::FAST_ELEMENT: {
         elms->set(IS_ACCESSOR_INDEX, heap->false_value());
-        Handle<Object> value = Object::GetElement(obj, index);
+        Handle<Object> value = GetElement(obj, index);
         RETURN_IF_EMPTY_HANDLE(isolate, value);
         elms->set(VALUE_INDEX, *value);
         elms->set(WRITABLE_INDEX, heap->true_value());
@@ -1061,21 +990,21 @@
         switch (details.type()) {
           case CALLBACKS: {
             // This is an accessor property with getter and/or setter.
-            AccessorPair* accessors =
-                AccessorPair::cast(dictionary->ValueAt(entry));
+            FixedArray* callbacks =
+                FixedArray::cast(dictionary->ValueAt(entry));
             elms->set(IS_ACCESSOR_INDEX, heap->true_value());
             if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
-              elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
+              elms->set(GETTER_INDEX, callbacks->get(0));
             }
             if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
-              elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
+              elms->set(SETTER_INDEX, callbacks->get(1));
             }
             break;
           }
           case NORMAL: {
             // This is a data property.
             elms->set(IS_ACCESSOR_INDEX, heap->false_value());
-            Handle<Object> value = Object::GetElement(obj, index);
+            Handle<Object> value = GetElement(obj, index);
             ASSERT(!value.is_null());
             elms->set(VALUE_INDEX, *value);
             elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
@@ -1107,18 +1036,18 @@
   elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
 
   bool is_js_accessor = (result.type() == CALLBACKS) &&
-                        (result.GetCallbackObject()->IsAccessorPair());
+                        (result.GetCallbackObject()->IsFixedArray());
 
   if (is_js_accessor) {
     // __defineGetter__/__defineSetter__ callback.
     elms->set(IS_ACCESSOR_INDEX, heap->true_value());
 
-    AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
+    FixedArray* structure = FixedArray::cast(result.GetCallbackObject());
     if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
-      elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
+      elms->set(GETTER_INDEX, structure->get(0));
     }
     if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
-      elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
+      elms->set(SETTER_INDEX, structure->get(1));
     }
   } else {
     elms->set(IS_ACCESSOR_INDEX, heap->false_value());
@@ -1137,32 +1066,16 @@
 }
 
 
-// Returns an array with the property description:
-//  if args[1] is not a property on args[0]
-//          returns undefined
-//  if args[1] is a data property on args[0]
-//         [false, value, Writeable, Enumerable, Configurable]
-//  if args[1] is an accessor on args[0]
-//         [true, GetFunction, SetFunction, Enumerable, Configurable]
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) {
-  ASSERT(args.length() == 2);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-  return GetOwnProperty(isolate, obj, name);
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_CHECKED(JSObject, obj, args[0]);
   return obj->PreventExtensions();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_CHECKED(JSObject, obj, args[0]);
   if (obj->IsJSGlobalProxy()) {
     Object* proto = obj->GetPrototype();
     if (proto->IsNull()) return isolate->heap()->false_value();
@@ -1176,9 +1089,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
+  CONVERT_ARG_CHECKED(JSRegExp, re, 0);
+  CONVERT_ARG_CHECKED(String, pattern, 1);
+  CONVERT_ARG_CHECKED(String, flags, 2);
   Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
   if (result.is_null()) return Failure::Exception();
   return *result;
@@ -1188,7 +1101,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0);
+  CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
   return *isolate->factory()->CreateApiFunction(data);
 }
 
@@ -1203,8 +1116,9 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) {
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(HeapObject, templ, 0);
-  CONVERT_SMI_ARG_CHECKED(index, 1)
+  CONVERT_CHECKED(HeapObject, templ, args[0]);
+  CONVERT_CHECKED(Smi, field, args[1]);
+  int index = field->value();
   int offset = index * kPointerSize + HeapObject::kHeaderSize;
   InstanceType type = templ->map()->instance_type();
   RUNTIME_ASSERT(type ==  FUNCTION_TEMPLATE_INFO_TYPE ||
@@ -1221,7 +1135,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(HeapObject, object, 0);
+  CONVERT_CHECKED(HeapObject, object, args[0]);
   Map* old_map = object->map();
   bool needs_access_checks = old_map->is_access_check_needed();
   if (needs_access_checks) {
@@ -1240,7 +1154,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(HeapObject, object, 0);
+  CONVERT_CHECKED(HeapObject, object, args[0]);
   Map* old_map = object->map();
   if (!old_map->is_access_check_needed()) {
     // Copy map so it won't interfere constructor's initial map.
@@ -1276,7 +1190,7 @@
       isolate->context()->global());
 
   Handle<Context> context = args.at<Context>(0);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
+  CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
   CONVERT_SMI_ARG_CHECKED(flags, 2);
 
   // Traverse the name/value pairs and set the properties.
@@ -1294,20 +1208,49 @@
     if (value->IsUndefined() || is_const_property) {
       // Lookup the property in the global object, and don't set the
       // value of the variable if the property is already there.
-      LookupResult lookup(isolate);
+      LookupResult lookup;
       global->Lookup(*name, &lookup);
       if (lookup.IsProperty()) {
-        // We found an existing property. Unless it was an interceptor
-        // that claims the property is absent, skip this declaration.
-        if (lookup.type() != INTERCEPTOR) {
-          continue;
-        }
+        // Determine if the property is local by comparing the holder
+        // against the global object. The information will be used to
+        // avoid throwing re-declaration errors when declaring
+        // variables or constants that exist in the prototype chain.
+        bool is_local = (*global == lookup.holder());
+        // Get the property attributes and determine if the property is
+        // read-only.
         PropertyAttributes attributes = global->GetPropertyAttribute(*name);
-        if (attributes != ABSENT) {
+        bool is_read_only = (attributes & READ_ONLY) != 0;
+        if (lookup.type() == INTERCEPTOR) {
+          // If the interceptor says the property is there, we
+          // just return undefined without overwriting the property.
+          // Otherwise, we continue to setting the property.
+          if (attributes != ABSENT) {
+            // Check if the existing property conflicts with regards to const.
+            if (is_local && (is_read_only || is_const_property)) {
+              const char* type = (is_read_only) ? "const" : "var";
+              return ThrowRedeclarationError(isolate, type, name);
+            };
+            // The property already exists without conflicting: Go to
+            // the next declaration.
+            continue;
+          }
+          // Fall-through and introduce the absent property by using
+          // SetProperty.
+        } else {
+          // For const properties, we treat a callback with this name
+          // even in the prototype as a conflicting declaration.
+          if (is_const_property && (lookup.type() == CALLBACKS)) {
+            return ThrowRedeclarationError(isolate, "const", name);
+          }
+          // Otherwise, we check for locally conflicting declarations.
+          if (is_local && (is_read_only || is_const_property)) {
+            const char* type = (is_read_only) ? "const" : "var";
+            return ThrowRedeclarationError(isolate, type, name);
+          }
+          // The property already exists without conflicting: Go to
+          // the next declaration.
           continue;
         }
-        // Fall-through and introduce the absent property by using
-        // SetProperty.
       }
     } else {
       is_function_declaration = true;
@@ -1321,24 +1264,36 @@
       value = function;
     }
 
-    LookupResult lookup(isolate);
+    LookupResult lookup;
     global->LocalLookup(*name, &lookup);
 
+    // There's a local property that we need to overwrite because
+    // we're either declaring a function or there's an interceptor
+    // that claims the property is absent.
+    //
+    // Check for conflicting re-declarations. We cannot have
+    // conflicting types in case of intercepted properties because
+    // they are absent.
+    if (lookup.IsProperty() &&
+        (lookup.type() != INTERCEPTOR) &&
+        (lookup.IsReadOnly() || is_const_property)) {
+      const char* type = (lookup.IsReadOnly()) ? "const" : "var";
+      return ThrowRedeclarationError(isolate, type, name);
+    }
+
     // Compute the property attributes. According to ECMA-262, section
     // 13, page 71, the property must be read-only and
     // non-deletable. However, neither SpiderMonkey nor KJS creates the
     // property as read-only, so we don't either.
     int attr = NONE;
-    if (!DeclareGlobalsEvalFlag::decode(flags)) {
+    if ((flags & kDeclareGlobalsEvalFlag) == 0) {
       attr |= DONT_DELETE;
     }
-    bool is_native = DeclareGlobalsNativeFlag::decode(flags);
+    bool is_native = (flags & kDeclareGlobalsNativeFlag) != 0;
     if (is_const_property || (is_native && is_function_declaration)) {
       attr |= READ_ONLY;
     }
 
-    LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
-
     // Safari does not allow the invocation of callback setters for
     // function declarations. To mimic this behavior, we do not allow
     // the invocation of setters for function values. This makes a
@@ -1346,33 +1301,28 @@
     // handlers such as "function onload() {}". Firefox does call the
     // onload setter in those case and Safari does not. We follow
     // Safari for compatibility.
-    if (is_function_declaration) {
+    if (value->IsJSFunction()) {
+      // Do not change DONT_DELETE to false from true.
       if (lookup.IsProperty() && (lookup.type() != INTERCEPTOR)) {
-        // Do not overwrite READ_ONLY properties.
-        if (lookup.GetAttributes() & READ_ONLY) {
-          if (language_mode != CLASSIC_MODE) {
-            Handle<Object> args[] = { name };
-            return isolate->Throw(*isolate->factory()->NewTypeError(
-                "strict_cannot_assign", HandleVector(args, ARRAY_SIZE(args))));
-          }
-          continue;
-        }
-        // Do not change DONT_DELETE to false from true.
         attr |= lookup.GetAttributes() & DONT_DELETE;
       }
       PropertyAttributes attributes = static_cast<PropertyAttributes>(attr);
 
-      RETURN_IF_EMPTY_HANDLE(
-          isolate,
-          JSObject::SetLocalPropertyIgnoreAttributes(global, name, value,
-                                                     attributes));
+      RETURN_IF_EMPTY_HANDLE(isolate,
+                             SetLocalPropertyIgnoreAttributes(global,
+                                                              name,
+                                                              value,
+                                                              attributes));
     } else {
-      RETURN_IF_EMPTY_HANDLE(
-          isolate,
-          JSReceiver::SetProperty(global, name, value,
-                                  static_cast<PropertyAttributes>(attr),
-                                  language_mode == CLASSIC_MODE
-                                      ? kNonStrictMode : kStrictMode));
+      StrictModeFlag strict_mode =
+          ((flags & kDeclareGlobalsStrictModeFlag) != 0) ? kStrictMode
+                                                         : kNonStrictMode;
+      RETURN_IF_EMPTY_HANDLE(isolate,
+                             SetProperty(global,
+                                         name,
+                                         value,
+                                         static_cast<PropertyAttributes>(attr),
+                                         strict_mode));
     }
   }
 
@@ -1385,17 +1335,15 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
-  // Declarations are always made in a function or global context.  In the
-  // case of eval code, the context passed is the context of the caller,
-  // which may be some nested context and not the declaration context.
-  RUNTIME_ASSERT(args[0]->IsContext());
-  Handle<Context> context(Context::cast(args[0])->declaration_context());
-
+  CONVERT_ARG_CHECKED(Context, context, 0);
   Handle<String> name(String::cast(args[1]));
   PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
   RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
   Handle<Object> initial_value(args[3], isolate);
 
+  // Declarations are always done in a function or global context.
+  context = Handle<Context>(context->declaration_context());
+
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
@@ -1404,7 +1352,9 @@
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
   if (attributes != ABSENT) {
-    // The name was declared before; check for conflicting re-declarations.
+    // The name was declared before; check for conflicting
+    // re-declarations: This is similar to the code in parser.cc in
+    // the AstBuildingParser::Declare function.
     if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
       // Functions are not read-only.
       ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
@@ -1415,42 +1365,53 @@
     // Initialize it if necessary.
     if (*initial_value != NULL) {
       if (index >= 0) {
-        ASSERT(holder.is_identical_to(context));
-        if (((attributes & READ_ONLY) == 0) ||
-            context->get(index)->IsTheHole()) {
-          context->set(index, *initial_value);
+        // The variable or constant context slot should always be in
+        // the function context or the arguments object.
+        if (holder->IsContext()) {
+          ASSERT(holder.is_identical_to(context));
+          if (((attributes & READ_ONLY) == 0) ||
+              context->get(index)->IsTheHole()) {
+            context->set(index, *initial_value);
+          }
+        } else {
+          // The holder is an arguments object.
+          Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+          Handle<Object> result = SetElement(arguments, index, initial_value,
+                                             kNonStrictMode);
+          if (result.is_null()) return Failure::Exception();
         }
       } else {
-        // Slow case: The property is in the context extension object of a
-        // function context or the global object of a global context.
-        Handle<JSObject> object = Handle<JSObject>::cast(holder);
+        // Slow case: The property is not in the FixedArray part of the context.
+        Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
         RETURN_IF_EMPTY_HANDLE(
             isolate,
-            JSReceiver::SetProperty(object, name, initial_value, mode,
-                                    kNonStrictMode));
+            SetProperty(context_ext, name, initial_value,
+                        mode, kNonStrictMode));
       }
     }
 
   } else {
     // The property is not in the function context. It needs to be
-    // "declared" in the function context's extension context or as a
-    // property of the the global object.
-    Handle<JSObject> object;
+    // "declared" in the function context's extension context, or in the
+    // global context.
+    Handle<JSObject> context_ext;
     if (context->has_extension()) {
-      object = Handle<JSObject>(JSObject::cast(context->extension()));
+      // The function context's extension context exists - use it.
+      context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
     } else {
-      // Context extension objects are allocated lazily.
-      ASSERT(context->IsFunctionContext());
-      object = isolate->factory()->NewJSObject(
+      // The function context's extension context does not exists - allocate
+      // it.
+      context_ext = isolate->factory()->NewJSObject(
           isolate->context_extension_function());
-      context->set_extension(*object);
+      // And store it in the extension slot.
+      context->set_extension(*context_ext);
     }
-    ASSERT(*object != NULL);
+    ASSERT(*context_ext != NULL);
 
     // Declare the property by setting it to the initial value if provided,
     // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
     // constant declarations).
-    ASSERT(!object->HasLocalProperty(*name));
+    ASSERT(!context_ext->HasLocalProperty(*name));
     Handle<Object> value(isolate->heap()->undefined_value(), isolate);
     if (*initial_value != NULL) value = initial_value;
     // Declaring a const context slot is a conflicting declaration if
@@ -1460,16 +1421,16 @@
     // SetProperty and no setters are invoked for those since they are
     // not real JSObjects.
     if (initial_value->IsTheHole() &&
-        !object->IsJSContextExtensionObject()) {
-      LookupResult lookup(isolate);
-      object->Lookup(*name, &lookup);
-      if (lookup.IsFound() && (lookup.type() == CALLBACKS)) {
+        !context_ext->IsJSContextExtensionObject()) {
+      LookupResult lookup;
+      context_ext->Lookup(*name, &lookup);
+      if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
         return ThrowRedeclarationError(isolate, "const", name);
       }
     }
-    RETURN_IF_EMPTY_HANDLE(
-        isolate,
-        JSReceiver::SetProperty(object, name, value, mode, kNonStrictMode));
+    RETURN_IF_EMPTY_HANDLE(isolate,
+                           SetProperty(context_ext, name, value, mode,
+                                       kNonStrictMode));
   }
 
   return isolate->heap()->undefined_value();
@@ -1479,7 +1440,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
   NoHandleAllocation nha;
   // args[0] == name
-  // args[1] == language_mode
+  // args[1] == strict_mode
   // args[2] == value (optional)
 
   // Determine if we need to assign to the variable if it already
@@ -1487,12 +1448,11 @@
   RUNTIME_ASSERT(args.length() == 2 || args.length() == 3);
   bool assign = args.length() == 3;
 
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  CONVERT_ARG_CHECKED(String, name, 0);
   GlobalObject* global = isolate->context()->global();
   RUNTIME_ASSERT(args[1]->IsSmi());
-  CONVERT_LANGUAGE_MODE_ARG(language_mode, 1);
-  StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE)
-      ? kNonStrictMode : kStrictMode;
+  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(1));
+  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
 
   // According to ECMA-262, section 12.2, page 62, the property must
   // not be deletable.
@@ -1505,35 +1465,67 @@
   // to assign to the property.
   // Note that objects can have hidden prototypes, so we need to traverse
   // the whole chain of hidden prototypes to do a 'local' lookup.
-  Object* object = global;
-  LookupResult lookup(isolate);
-  while (object->IsJSObject() &&
-         JSObject::cast(object)->map()->is_hidden_prototype()) {
-    JSObject* raw_holder = JSObject::cast(object);
-    raw_holder->LocalLookup(*name, &lookup);
-    if (lookup.IsFound() && lookup.type() == INTERCEPTOR) {
-      HandleScope handle_scope(isolate);
-      Handle<JSObject> holder(raw_holder);
-      PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
-      // Update the raw pointer in case it's changed due to GC.
-      raw_holder = *holder;
-      if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
-        // Found an interceptor that's not read only.
-        if (assign) {
-          return raw_holder->SetProperty(
-              &lookup, *name, args[2], attributes, strict_mode_flag);
-        } else {
-          return isolate->heap()->undefined_value();
+  JSObject* real_holder = global;
+  LookupResult lookup;
+  while (true) {
+    real_holder->LocalLookup(*name, &lookup);
+    if (lookup.IsProperty()) {
+      // Determine if this is a redeclaration of something read-only.
+      if (lookup.IsReadOnly()) {
+        // If we found readonly property on one of hidden prototypes,
+        // just shadow it.
+        if (real_holder != isolate->context()->global()) break;
+        return ThrowRedeclarationError(isolate, "const", name);
+      }
+
+      // Determine if this is a redeclaration of an intercepted read-only
+      // property and figure out if the property exists at all.
+      bool found = true;
+      PropertyType type = lookup.type();
+      if (type == INTERCEPTOR) {
+        HandleScope handle_scope(isolate);
+        Handle<JSObject> holder(real_holder);
+        PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+        real_holder = *holder;
+        if (intercepted == ABSENT) {
+          // The interceptor claims the property isn't there. We need to
+          // make sure to introduce it.
+          found = false;
+        } else if ((intercepted & READ_ONLY) != 0) {
+          // The property is present, but read-only. Since we're trying to
+          // overwrite it with a variable declaration we must throw a
+          // re-declaration error.  However if we found readonly property
+          // on one of hidden prototypes, just shadow it.
+          if (real_holder != isolate->context()->global()) break;
+          return ThrowRedeclarationError(isolate, "const", name);
         }
       }
+
+      if (found && !assign) {
+        // The global property is there and we're not assigning any value
+        // to it. Just return.
+        return isolate->heap()->undefined_value();
+      }
+
+      // Assign the value (or undefined) to the property.
+      Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
+      return real_holder->SetProperty(
+          &lookup, *name, value, attributes, strict_mode);
     }
-    object = raw_holder->GetPrototype();
+
+    Object* proto = real_holder->GetPrototype();
+    if (!proto->IsJSObject())
+      break;
+
+    if (!JSObject::cast(proto)->map()->is_hidden_prototype())
+      break;
+
+    real_holder = JSObject::cast(proto);
   }
 
-  // Reload global in case the loop above performed a GC.
   global = isolate->context()->global();
   if (assign) {
-    return global->SetProperty(*name, args[2], attributes, strict_mode_flag);
+    return global->SetProperty(*name, args[2], attributes, strict_mode);
   }
   return isolate->heap()->undefined_value();
 }
@@ -1544,7 +1536,7 @@
   // of the constant is the first argument and the initial value
   // is the second.
   RUNTIME_ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+  CONVERT_ARG_CHECKED(String, name, 0);
   Handle<Object> value = args.at<Object>(1);
 
   // Get the current global object from top.
@@ -1560,7 +1552,7 @@
   // add it as a local property even in case of callbacks in the
   // prototype chain (this rules out using SetProperty).
   // We use SetLocalPropertyIgnoreAttributes instead
-  LookupResult lookup(isolate);
+  LookupResult lookup;
   global->LocalLookup(*name, &lookup);
   if (!lookup.IsProperty()) {
     return global->SetLocalPropertyIgnoreAttributes(*name,
@@ -1568,9 +1560,25 @@
                                                     attributes);
   }
 
+  // Determine if this is a redeclaration of something not
+  // read-only. In case the result is hidden behind an interceptor we
+  // need to ask it for the property attributes.
   if (!lookup.IsReadOnly()) {
+    if (lookup.type() != INTERCEPTOR) {
+      return ThrowRedeclarationError(isolate, "var", name);
+    }
+
+    PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
+
+    // Throw re-declaration error if the intercepted property is present
+    // but not read-only.
+    if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+      return ThrowRedeclarationError(isolate, "var", name);
+    }
+
     // Restore global object from context (in case of GC) and continue
-    // with setting the value.
+    // with setting the value because the property is either absent or
+    // read-only. We also have to do redo the lookup.
     HandleScope handle_scope(isolate);
     Handle<GlobalObject> global(isolate->context()->global());
 
@@ -1578,27 +1586,28 @@
     // property through an interceptor and only do it if it's
     // uninitialized, e.g. the hole. Nirk...
     // Passing non-strict mode because the property is writable.
-    RETURN_IF_EMPTY_HANDLE(
-        isolate,
-        JSReceiver::SetProperty(global, name, value, attributes,
-                                kNonStrictMode));
+    RETURN_IF_EMPTY_HANDLE(isolate,
+                           SetProperty(global,
+                                       name,
+                                       value,
+                                       attributes,
+                                       kNonStrictMode));
     return *value;
   }
 
-  // Set the value, but only if we're assigning the initial value to a
+  // Set the value, but only we're assigning the initial value to a
   // constant. For now, we determine this by checking if the
   // current value is the hole.
-  // Strict mode handling not needed (const is disallowed in strict mode).
+  // Strict mode handling not needed (const disallowed in strict mode).
   PropertyType type = lookup.type();
   if (type == FIELD) {
     FixedArray* properties = global->properties();
     int index = lookup.GetFieldIndex();
-    if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
+    if (properties->get(index)->IsTheHole()) {
       properties->set(index, *value);
     }
   } else if (type == NORMAL) {
-    if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
-        !lookup.IsReadOnly()) {
+    if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
       global->SetNormalizedProperty(&lookup, *value);
     }
   } else {
@@ -1618,12 +1627,11 @@
 
   Handle<Object> value(args[0], isolate);
   ASSERT(!value->IsTheHole());
+  CONVERT_ARG_CHECKED(Context, context, 1);
+  Handle<String> name(String::cast(args[2]));
 
   // Initializations are always done in a function or global context.
-  RUNTIME_ASSERT(args[1]->IsContext());
-  Handle<Context> context(Context::cast(args[1])->declaration_context());
-
-  Handle<String> name(String::cast(args[2]));
+  context = Handle<Context>(context->declaration_context());
 
   int index;
   PropertyAttributes attributes;
@@ -1632,64 +1640,72 @@
   Handle<Object> holder =
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
-  if (index >= 0) {
-    ASSERT(holder->IsContext());
-    // Property was found in a context.  Perform the assignment if we
-    // found some non-constant or an uninitialized constant.
-    Handle<Context> context = Handle<Context>::cast(holder);
-    if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
-      context->set(index, *value);
-    }
-    return *value;
-  }
-
-  // The property could not be found, we introduce it as a property of the
-  // global object.
-  if (attributes == ABSENT) {
-    Handle<JSObject> global = Handle<JSObject>(
-        isolate->context()->global());
-    // Strict mode not needed (const disallowed in strict mode).
-    RETURN_IF_EMPTY_HANDLE(
-        isolate,
-        JSReceiver::SetProperty(global, name, value, NONE, kNonStrictMode));
-    return *value;
-  }
-
-  // The property was present in some function's context extension object,
-  // as a property on the subject of a with, or as a property of the global
-  // object.
-  //
-  // In most situations, eval-introduced consts should still be present in
-  // the context extension object.  However, because declaration and
-  // initialization are separate, the property might have been deleted
+  // In most situations, the property introduced by the const
+  // declaration should be present in the context extension object.
+  // However, because declaration and initialization are separate, the
+  // property might have been deleted (if it was introduced by eval)
   // before we reach the initialization point.
   //
   // Example:
   //
   //    function f() { eval("delete x; const x;"); }
   //
-  // In that case, the initialization behaves like a normal assignment.
-  Handle<JSObject> object = Handle<JSObject>::cast(holder);
+  // In that case, the initialization behaves like a normal assignment
+  // to property 'x'.
+  if (index >= 0) {
+    if (holder->IsContext()) {
+      // Property was found in a context.  Perform the assignment if we
+      // found some non-constant or an uninitialized constant.
+      Handle<Context> context = Handle<Context>::cast(holder);
+      if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+        context->set(index, *value);
+      }
+    } else {
+      // The holder is an arguments object.
+      ASSERT((attributes & READ_ONLY) == 0);
+      Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
+      RETURN_IF_EMPTY_HANDLE(
+          isolate,
+          SetElement(arguments, index, value, kNonStrictMode));
+    }
+    return *value;
+  }
 
-  if (*object == context->extension()) {
-    // This is the property that was introduced by the const declaration.
-    // Set it if it hasn't been set before.  NOTE: We cannot use
-    // GetProperty() to get the current value as it 'unholes' the value.
-    LookupResult lookup(isolate);
-    object->LocalLookupRealNamedProperty(*name, &lookup);
-    ASSERT(lookup.IsFound());  // the property was declared
+  // The property could not be found, we introduce it in the global
+  // context.
+  if (attributes == ABSENT) {
+    Handle<JSObject> global = Handle<JSObject>(
+        isolate->context()->global());
+    // Strict mode not needed (const disallowed in strict mode).
+    RETURN_IF_EMPTY_HANDLE(
+        isolate,
+        SetProperty(global, name, value, NONE, kNonStrictMode));
+    return *value;
+  }
+
+  // The property was present in a context extension object.
+  Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+
+  if (*context_ext == context->extension()) {
+    // This is the property that was introduced by the const
+    // declaration.  Set it if it hasn't been set before.  NOTE: We
+    // cannot use GetProperty() to get the current value as it
+    // 'unholes' the value.
+    LookupResult lookup;
+    context_ext->LocalLookupRealNamedProperty(*name, &lookup);
+    ASSERT(lookup.IsProperty());  // the property was declared
     ASSERT(lookup.IsReadOnly());  // and it was declared as read-only
 
     PropertyType type = lookup.type();
     if (type == FIELD) {
-      FixedArray* properties = object->properties();
+      FixedArray* properties = context_ext->properties();
       int index = lookup.GetFieldIndex();
       if (properties->get(index)->IsTheHole()) {
         properties->set(index, *value);
       }
     } else if (type == NORMAL) {
-      if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
-        object->SetNormalizedProperty(&lookup, *value);
+      if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
+        context_ext->SetNormalizedProperty(&lookup, *value);
       }
     } else {
       // We should not reach here. Any real, named property should be
@@ -1697,14 +1713,13 @@
       UNREACHABLE();
     }
   } else {
-    // The property was found on some other object.  Set it if it is not a
-    // read-only property.
+    // The property was found in a different context extension object.
+    // Set it if it is not a read-only property.
     if ((attributes & READ_ONLY) == 0) {
       // Strict mode not needed (const disallowed in strict mode).
       RETURN_IF_EMPTY_HANDLE(
           isolate,
-          JSReceiver::SetProperty(object, name, value, attributes,
-                                  kNonStrictMode));
+          SetProperty(context_ext, name, value, attributes, kNonStrictMode));
     }
   }
 
@@ -1716,10 +1731,10 @@
                  Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
   CONVERT_SMI_ARG_CHECKED(properties, 1);
   if (object->HasFastProperties()) {
-    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
+    NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
   }
   return *object;
 }
@@ -1728,12 +1743,12 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_CHECKED(String, subject, 1);
   // Due to the way the JS calls are constructed this must be less than the
   // length of a string, i.e. it is always a Smi.  We check anyway for security.
   CONVERT_SMI_ARG_CHECKED(index, 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
+  CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
   RUNTIME_ASSERT(last_match_info->HasFastElements());
   RUNTIME_ASSERT(index >= 0);
   RUNTIME_ASSERT(index <= subject->length());
@@ -1785,8 +1800,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
   AssertNoAllocation no_alloc;
   ASSERT(args.length() == 5);
-  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
-  CONVERT_ARG_CHECKED(String, source, 1);
+  CONVERT_CHECKED(JSRegExp, regexp, args[0]);
+  CONVERT_CHECKED(String, source, args[1]);
 
   Object* global = args[2];
   if (!global->IsTrue()) global = isolate->heap()->false_value();
@@ -1803,17 +1818,14 @@
       JSFunction::cast(constructor)->initial_map() == map) {
     // If we still have the original map, set in-object properties directly.
     regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
-    // Both true and false are immovable immortal objects so no need for write
-    // barrier.
-    regexp->InObjectPropertyAtPut(
-        JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
-    regexp->InObjectPropertyAtPut(
-        JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
-    regexp->InObjectPropertyAtPut(
-        JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
+    // TODO(lrn): Consider skipping write barrier on booleans as well.
+    // Both true and false should be in oldspace at all times.
+    regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
+    regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
+    regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
     regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
                                   Smi::FromInt(0),
-                                  SKIP_WRITE_BARRIER);  // It's a Smi.
+                                  SKIP_WRITE_BARRIER);
     return regexp;
   }
 
@@ -1854,7 +1866,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FinishArrayPrototypeSetup) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
+  CONVERT_ARG_CHECKED(JSArray, prototype, 0);
   // This is necessary to enable fast checks for absence of elements
   // on Array.prototype and below.
   prototype->set_elements(isolate->heap()->empty_fixed_array());
@@ -1875,7 +1887,7 @@
                                       code,
                                       false);
   optimized->shared()->DontAdaptArguments();
-  JSReceiver::SetProperty(holder, key, optimized, NONE, kStrictMode);
+  SetProperty(holder, key, optimized, NONE, kStrictMode);
   return optimized;
 }
 
@@ -1883,7 +1895,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, holder, 0);
+  CONVERT_ARG_CHECKED(JSObject, holder, 0);
 
   InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
   InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
@@ -1898,21 +1910,11 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
+  NoHandleAllocation handle_free;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
-
-  if (!callable->IsJSFunction()) {
-    HandleScope scope(isolate);
-    bool threw = false;
-    Handle<Object> delegate =
-        Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
-    if (threw) return Failure::Exception();
-    callable = JSFunction::cast(*delegate);
-  }
-  JSFunction* function = JSFunction::cast(callable);
-
+  CONVERT_CHECKED(JSFunction, function, args[0]);
   SharedFunctionInfo* shared = function->shared();
-  if (shared->native() || !shared->is_classic_mode()) {
+  if (shared->native() || shared->strict_mode()) {
     return isolate->heap()->undefined_value();
   }
   // Returns undefined for strict or native functions, or
@@ -1927,7 +1929,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+  CONVERT_ARG_CHECKED(FixedArray, literals, 0);
   int index = args.smi_at(1);
   Handle<String> pattern = args.at<String>(2);
   Handle<String> flags = args.at<String>(3);
@@ -1958,7 +1960,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
   return f->shared()->name();
 }
 
@@ -1967,8 +1969,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
   f->shared()->set_name(name);
   return isolate->heap()->undefined_value();
 }
@@ -1977,7 +1979,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
   return isolate->heap()->ToBoolean(
       f->shared()->name_should_print_as_anonymous());
 }
@@ -1986,17 +1988,26 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
   f->shared()->set_name_should_print_as_anonymous(true);
   return isolate->heap()->undefined_value();
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetBound) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  fun->shared()->set_bound(true);
+  return isolate->heap()->undefined_value();
+}
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
   Object* obj = f->RemovePrototype();
   if (obj->IsFailure()) return obj;
 
@@ -2008,7 +2019,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
   Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
   if (!script->IsScript()) return isolate->heap()->undefined_value();
 
@@ -2017,12 +2028,11 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
-  HandleScope scope(isolate);
+  NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
-  Handle<SharedFunctionInfo> shared(f->shared());
-  return *shared->GetSourceCode();
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  return f->shared()->GetSourceCode();
 }
 
 
@@ -2030,7 +2040,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
   int pos = fun->shared()->start_position();
   return Smi::FromInt(pos);
 }
@@ -2039,7 +2049,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(Code, code, 0);
+  CONVERT_CHECKED(Code, code, args[0]);
   CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
 
   RUNTIME_ASSERT(0 <= offset && offset < code->Size());
@@ -2053,8 +2063,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
   fun->SetInstanceClassName(name);
   return isolate->heap()->undefined_value();
 }
@@ -2064,10 +2074,28 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
-  CONVERT_SMI_ARG_CHECKED(length, 1);
-  fun->shared()->set_length(length);
-  return isolate->heap()->undefined_value();
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(Smi, length, args[1]);
+  fun->shared()->set_length(length->value());
+  return length;
+}
+
+
+// Creates a local, readonly, property called length with the correct
+// length (when read by the user). This effectively overwrites the
+// interceptor used to normally provide the length.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionSetLength) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
+  CONVERT_CHECKED(Smi, length, args[1]);
+  MaybeObject* maybe_name =
+      isolate->heap()->AllocateStringFromAscii(CStrVector("length"));
+  String* name;
+  if (!maybe_name->To(&name)) return maybe_name;
+  PropertyAttributes attr =
+      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+  return fun->AddProperty(name, length, attr, kNonStrictMode);
 }
 
 
@@ -2075,7 +2103,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
+  CONVERT_CHECKED(JSFunction, fun, args[0]);
   ASSERT(fun->should_have_prototype());
   Object* obj;
   { MaybeObject* maybe_obj =
@@ -2089,7 +2117,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_CHECKED(JSFunction, function, args[0]);
 
   MaybeObject* maybe_name =
       isolate->heap()->AllocateStringFromAscii(CStrVector("prototype"));
@@ -2145,7 +2173,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
   return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
 }
 
@@ -2154,7 +2182,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
   return isolate->heap()->ToBoolean(f->IsBuiltin());
 }
 
@@ -2163,7 +2191,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+  CONVERT_ARG_CHECKED(JSFunction, target, 0);
   Handle<Object> code = args.at<Object>(1);
 
   Handle<Context> context(target->context());
@@ -2173,12 +2201,13 @@
     Handle<JSFunction> fun = Handle<JSFunction>::cast(code);
     Handle<SharedFunctionInfo> shared(fun->shared());
 
-    if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
+    if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
       return Failure::Exception();
     }
     // Since we don't store the source for this we should never
     // optimize this.
     shared->code()->set_optimizable(false);
+
     // Set the code, scope info, formal parameter count,
     // and the length of the target function.
     target->shared()->set_code(shared->code());
@@ -2210,7 +2239,9 @@
       literals->set(JSFunction::kLiteralGlobalContextIndex,
                     context->global_context());
     }
-    target->set_literals(*literals);
+    // It's okay to skip the write barrier here because the literals
+    // are guaranteed to be in old space.
+    target->set_literals(*literals, SKIP_WRITE_BARRIER);
     target->set_next_function_link(isolate->heap()->undefined_value());
 
     if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
@@ -2227,7 +2258,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   CONVERT_SMI_ARG_CHECKED(num, 1);
   RUNTIME_ASSERT(num >= 0);
   SetExpectedNofProperties(function, num);
@@ -2251,7 +2282,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_CHECKED(String, subject, args[0]);
   Object* index = args[1];
   RUNTIME_ASSERT(index->IsNumber());
 
@@ -2294,8 +2325,7 @@
  public:
   explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
       : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
-        length_(0),
-        has_non_smi_elements_(false) {
+        length_(0) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(initial_capacity > 0);
@@ -2303,8 +2333,7 @@
 
   explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
       : array_(backing_store),
-        length_(0),
-        has_non_smi_elements_(false) {
+        length_(0) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(backing_store->length() > 0);
@@ -2332,15 +2361,12 @@
   }
 
   void Add(Object* value) {
-    ASSERT(!value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
-    has_non_smi_elements_ = true;
   }
 
   void Add(Smi* value) {
-    ASSERT(value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
@@ -2365,7 +2391,7 @@
   }
 
   Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
-    FACTORY->SetContent(target_array, array_);
+    target_array->set_elements(*array_);
     target_array->set_length(Smi::FromInt(length_));
     return target_array;
   }
@@ -2373,7 +2399,6 @@
  private:
   Handle<FixedArray> array_;
   int length_;
-  bool has_non_smi_elements_;
 };
 
 
@@ -2868,7 +2893,7 @@
       }
     } else {
       Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
-      if (pattern_content.IsAscii()) {
+      if (pattern->IsAsciiRepresentation()) {
         FindStringIndices(isolate,
                           subject_vector,
                           pattern_content.ToAsciiVector(),
@@ -2994,7 +3019,7 @@
 
   // Shortcut for simple non-regexp global replacements
   if (is_global &&
-      regexp_handle->TypeTag() == JSRegExp::ATOM &&
+      regexp->TypeTag() == JSRegExp::ATOM &&
       compiled_replacement.simple_hint()) {
     if (subject_handle->HasOnlyAsciiChars() &&
         replacement_handle->HasOnlyAsciiChars()) {
@@ -3217,9 +3242,6 @@
 
   Address end_of_string = answer->address() + string_size;
   isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
-  if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
-    MemoryChunk::IncrementLiveBytesFromMutator(answer->address(), -delta);
-  }
 
   return *answer;
 }
@@ -3228,7 +3250,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
   ASSERT(args.length() == 4);
 
-  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_CHECKED(String, subject, args[0]);
   if (!subject->IsFlat()) {
     Object* flat_subject;
     { MaybeObject* maybe_flat_subject = subject->TryFlatten();
@@ -3239,7 +3261,7 @@
     subject = String::cast(flat_subject);
   }
 
-  CONVERT_ARG_CHECKED(String, replacement, 2);
+  CONVERT_CHECKED(String, replacement, args[2]);
   if (!replacement->IsFlat()) {
     Object* flat_replacement;
     { MaybeObject* maybe_flat_replacement = replacement->TryFlatten();
@@ -3250,8 +3272,8 @@
     replacement = String::cast(flat_replacement);
   }
 
-  CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
-  CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
+  CONVERT_CHECKED(JSRegExp, regexp, args[1]);
+  CONVERT_CHECKED(JSArray, last_match_info, args[3]);
 
   ASSERT(last_match_info->HasFastElements());
 
@@ -3273,79 +3295,6 @@
 }
 
 
-Handle<String> Runtime::StringReplaceOneCharWithString(Isolate* isolate,
-                                                       Handle<String> subject,
-                                                       Handle<String> search,
-                                                       Handle<String> replace,
-                                                       bool* found,
-                                                       int recursion_limit) {
-  if (recursion_limit == 0) return Handle<String>::null();
-  if (subject->IsConsString()) {
-    ConsString* cons = ConsString::cast(*subject);
-    Handle<String> first = Handle<String>(cons->first());
-    Handle<String> second = Handle<String>(cons->second());
-    Handle<String> new_first =
-        StringReplaceOneCharWithString(isolate,
-                                       first,
-                                       search,
-                                       replace,
-                                       found,
-                                       recursion_limit - 1);
-    if (*found) return isolate->factory()->NewConsString(new_first, second);
-    if (new_first.is_null()) return new_first;
-
-    Handle<String> new_second =
-        StringReplaceOneCharWithString(isolate,
-                                       second,
-                                       search,
-                                       replace,
-                                       found,
-                                       recursion_limit - 1);
-    if (*found) return isolate->factory()->NewConsString(first, new_second);
-    if (new_second.is_null()) return new_second;
-
-    return subject;
-  } else {
-    int index = StringMatch(isolate, subject, search, 0);
-    if (index == -1) return subject;
-    *found = true;
-    Handle<String> first = isolate->factory()->NewSubString(subject, 0, index);
-    Handle<String> cons1 = isolate->factory()->NewConsString(first, replace);
-    Handle<String> second =
-        isolate->factory()->NewSubString(subject, index + 1, subject->length());
-    return isolate->factory()->NewConsString(cons1, second);
-  }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) {
-  ASSERT(args.length() == 3);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
-
-  // If the cons string tree is too deep, we simply abort the recursion and
-  // retry with a flattened subject string.
-  const int kRecursionLimit = 0x1000;
-  bool found = false;
-  Handle<String> result =
-      Runtime::StringReplaceOneCharWithString(isolate,
-                                              subject,
-                                              search,
-                                              replace,
-                                              &found,
-                                              kRecursionLimit);
-  if (!result.is_null()) return *result;
-  return *Runtime::StringReplaceOneCharWithString(isolate,
-                                                  FlattenGetString(subject),
-                                                  search,
-                                                  replace,
-                                                  &found,
-                                                  kRecursionLimit);
-}
-
-
 // Perform string match of pattern on subject, starting at start index.
 // Caller must ensure that 0 <= start_index <= sub->length(),
 // and should check that pat->length() + start_index <= sub->length().
@@ -3402,8 +3351,8 @@
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
+  CONVERT_ARG_CHECKED(String, sub, 0);
+  CONVERT_ARG_CHECKED(String, pat, 1);
 
   Object* index = args[2];
   uint32_t start_index;
@@ -3454,8 +3403,8 @@
   HandleScope scope(isolate);  // create a new handle scope
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
+  CONVERT_ARG_CHECKED(String, sub, 0);
+  CONVERT_ARG_CHECKED(String, pat, 1);
 
   Object* index = args[2];
   uint32_t start_index;
@@ -3513,8 +3462,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(String, str1, 0);
-  CONVERT_ARG_CHECKED(String, str2, 1);
+  CONVERT_CHECKED(String, str1, args[0]);
+  CONVERT_CHECKED(String, str2, args[1]);
 
   if (str1 == str2) return Smi::FromInt(0);  // Equal.
   int str1_length = str1->length();
@@ -3561,7 +3510,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_CHECKED(String, value, 0);
+  CONVERT_CHECKED(String, value, args[0]);
   int start, end;
   // We have a fast integer-only case here to avoid a conversion to double in
   // the common case where from and to are Smis.
@@ -3587,9 +3536,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
   ASSERT_EQ(3, args.length());
 
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
+  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 1);
+  CONVERT_ARG_CHECKED(JSArray, regexp_info, 2);
   HandleScope handles;
 
   Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info);
@@ -3780,7 +3729,7 @@
   int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
   if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
 
-  OffsetsVector registers(required_registers, isolate);
+  OffsetsVector registers(required_registers);
   Vector<int32_t> register_vector(registers.vector(), registers.length());
   int subject_length = subject->length();
   bool first = true;
@@ -3853,7 +3802,7 @@
   int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
   if (required_registers < 0) return RegExpImpl::RE_EXCEPTION;
 
-  OffsetsVector registers(required_registers, isolate);
+  OffsetsVector registers(required_registers);
   Vector<int32_t> register_vector(registers.vector(), registers.length());
 
   RegExpImpl::IrregexpResult result =
@@ -3872,7 +3821,7 @@
   if (result == RegExpImpl::RE_SUCCESS) {
     // Need to keep a copy of the previous match for creating last_match_info
     // at the end, so we have two vectors that we swap between.
-    OffsetsVector registers2(required_registers, isolate);
+    OffsetsVector registers2(required_registers);
     Vector<int> prev_register_vector(registers2.vector(), registers2.length());
     bool first = true;
     do {
@@ -3980,11 +3929,11 @@
   ASSERT(args.length() == 4);
   HandleScope handles(isolate);
 
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+  CONVERT_ARG_CHECKED(String, subject, 1);
   if (!subject->IsFlat()) FlattenString(subject);
-  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
+  CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_CHECKED(JSArray, last_match_info, 2);
+  CONVERT_ARG_CHECKED(JSArray, result_array, 3);
 
   ASSERT(last_match_info->HasFastElements());
   ASSERT(regexp->GetFlags().is_global());
@@ -4052,13 +4001,13 @@
   // Slow case.
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return *isolate->factory()->nan_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
   if (isinf(value)) {
     if (value < 0) {
-      return *isolate->factory()->minus_infinity_symbol();
+      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
     }
-    return *isolate->factory()->infinity_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
   char* str = DoubleToRadixCString(value, radix);
   MaybeObject* result =
@@ -4074,13 +4023,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return *isolate->factory()->nan_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
   if (isinf(value)) {
     if (value < 0) {
-      return *isolate->factory()->minus_infinity_symbol();
+      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
     }
-    return *isolate->factory()->infinity_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4099,13 +4048,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return *isolate->factory()->nan_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
   if (isinf(value)) {
     if (value < 0) {
-      return *isolate->factory()->minus_infinity_symbol();
+      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
     }
-    return *isolate->factory()->infinity_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4124,13 +4073,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return *isolate->factory()->nan_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
   }
   if (isinf(value)) {
     if (value < 0) {
-      return *isolate->factory()->minus_infinity_symbol();
+      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
     }
-    return *isolate->factory()->infinity_symbol();
+    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4173,9 +4122,15 @@
   }
 
   if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
-    return object->GetPrototype()->GetElement(index);
+    Handle<Object> prototype = GetPrototype(object);
+    return prototype->GetElement(index);
   }
 
+  return GetElement(object, index);
+}
+
+
+MaybeObject* Runtime::GetElement(Handle<Object> object, uint32_t index) {
   return object->GetElement(index);
 }
 
@@ -4248,63 +4203,40 @@
   //
   // Additionally, we need to make sure that we do not cache results
   // for objects that require access checks.
-  if (args[0]->IsJSObject()) {
-    if (!args[0]->IsJSGlobalProxy() &&
-        !args[0]->IsAccessCheckNeeded() &&
-        args[1]->IsString()) {
-      JSObject* receiver = JSObject::cast(args[0]);
-      String* key = String::cast(args[1]);
-      if (receiver->HasFastProperties()) {
-        // Attempt to use lookup cache.
-        Map* receiver_map = receiver->map();
-        KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
-        int offset = keyed_lookup_cache->Lookup(receiver_map, key);
-        if (offset != -1) {
-          Object* value = receiver->FastPropertyAt(offset);
-          return value->IsTheHole()
-              ? isolate->heap()->undefined_value()
-              : value;
-        }
-        // Lookup cache miss.  Perform lookup and update the cache if
-        // appropriate.
-        LookupResult result(isolate);
-        receiver->LocalLookup(key, &result);
-        if (result.IsFound() && result.type() == FIELD) {
-          int offset = result.GetFieldIndex();
-          keyed_lookup_cache->Update(receiver_map, key, offset);
-          return receiver->FastPropertyAt(offset);
-        }
-      } else {
-        // Attempt dictionary lookup.
-        StringDictionary* dictionary = receiver->property_dictionary();
-        int entry = dictionary->FindEntry(key);
-        if ((entry != StringDictionary::kNotFound) &&
-            (dictionary->DetailsAt(entry).type() == NORMAL)) {
-          Object* value = dictionary->ValueAt(entry);
-          if (!receiver->IsGlobalObject()) return value;
-          value = JSGlobalPropertyCell::cast(value)->value();
-          if (!value->IsTheHole()) return value;
-          // If value is the hole do the general lookup.
-        }
+  if (args[0]->IsJSObject() &&
+      !args[0]->IsJSGlobalProxy() &&
+      !args[0]->IsAccessCheckNeeded() &&
+      args[1]->IsString()) {
+    JSObject* receiver = JSObject::cast(args[0]);
+    String* key = String::cast(args[1]);
+    if (receiver->HasFastProperties()) {
+      // Attempt to use lookup cache.
+      Map* receiver_map = receiver->map();
+      KeyedLookupCache* keyed_lookup_cache = isolate->keyed_lookup_cache();
+      int offset = keyed_lookup_cache->Lookup(receiver_map, key);
+      if (offset != -1) {
+        Object* value = receiver->FastPropertyAt(offset);
+        return value->IsTheHole() ? isolate->heap()->undefined_value() : value;
       }
-    } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) {
-      // JSObject without a string key. If the key is a Smi, check for a
-      // definite out-of-bounds access to elements, which is a strong indicator
-      // that subsequent accesses will also call the runtime. Proactively
-      // transition elements to FAST_ELEMENTS to avoid excessive boxing of
-      // doubles for those future calls in the case that the elements would
-      // become FAST_DOUBLE_ELEMENTS.
-      Handle<JSObject> js_object(args.at<JSObject>(0));
-      ElementsKind elements_kind = js_object->GetElementsKind();
-      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-          elements_kind == FAST_DOUBLE_ELEMENTS) {
-        FixedArrayBase* elements = js_object->elements();
-        if (args.at<Smi>(1)->value() >= elements->length()) {
-          MaybeObject* maybe_object = TransitionElements(js_object,
-                                                         FAST_ELEMENTS,
-                                                         isolate);
-          if (maybe_object->IsFailure()) return maybe_object;
-        }
+      // Lookup cache miss.  Perform lookup and update the cache if appropriate.
+      LookupResult result;
+      receiver->LocalLookup(key, &result);
+      if (result.IsProperty() && result.type() == FIELD) {
+        int offset = result.GetFieldIndex();
+        keyed_lookup_cache->Update(receiver_map, key, offset);
+        return receiver->FastPropertyAt(offset);
+      }
+    } else {
+      // Attempt dictionary lookup.
+      StringDictionary* dictionary = receiver->property_dictionary();
+      int entry = dictionary->FindEntry(key);
+      if ((entry != StringDictionary::kNotFound) &&
+          (dictionary->DetailsAt(entry).type() == NORMAL)) {
+        Object* value = dictionary->ValueAt(entry);
+        if (!receiver->IsGlobalObject()) return value;
+        value = JSGlobalPropertyCell::cast(value)->value();
+        if (!value->IsTheHole()) return value;
+        // If value is the hole do the general lookup.
       }
     }
   } else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4324,12 +4256,6 @@
                                     args.at<Object>(1));
 }
 
-
-static bool IsValidAccessor(Handle<Object> obj) {
-  return obj->IsUndefined() || obj->IsSpecFunction() || obj->IsNull();
-}
-
-
 // Implements part of 8.12.9 DefineOwnProperty.
 // There are 3 cases that lead here:
 // Step 4b - define a new accessor property.
@@ -4339,21 +4265,32 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) {
   ASSERT(args.length() == 5);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  RUNTIME_ASSERT(!obj->IsNull());
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
-  RUNTIME_ASSERT(IsValidAccessor(getter));
-  CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
-  RUNTIME_ASSERT(IsValidAccessor(setter));
-  CONVERT_SMI_ARG_CHECKED(unchecked, 4);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_CHECKED(Smi, flag_setter, args[2]);
+  Object* fun = args[3];
+  RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+  CONVERT_CHECKED(Smi, flag_attr, args[4]);
+  int unchecked = flag_attr->value();
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
-  PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+  RUNTIME_ASSERT(!obj->IsNull());
+  LookupResult result;
+  obj->LocalLookupRealNamedProperty(name, &result);
 
-  bool fast = obj->HasFastProperties();
-  JSObject::DefineAccessor(obj, name, getter, setter, attr);
-  if (fast) JSObject::TransformToFastProperties(obj, 0);
-  return isolate->heap()->undefined_value();
+  PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
+  // If an existing property is either FIELD, NORMAL or CONSTANT_FUNCTION
+  // delete it to avoid running into trouble in DefineAccessor, which
+  // handles this incorrectly if the property is readonly (does nothing)
+  if (result.IsProperty() &&
+      (result.type() == FIELD || result.type() == NORMAL
+       || result.type() == CONSTANT_FUNCTION)) {
+    Object* ok;
+    { MaybeObject* maybe_ok =
+          obj->DeleteProperty(name, JSReceiver::NORMAL_DELETION);
+      if (!maybe_ok->ToObject(&ok)) return maybe_ok;
+    }
+  }
+  return obj->DefineAccessor(name, flag_setter->value() == 0, fun, attr);
 }
 
 // Implements part of 8.12.9 DefineOwnProperty.
@@ -4365,36 +4302,71 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
   ASSERT(args.length() == 4);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2);
-  CONVERT_SMI_ARG_CHECKED(unchecked, 3);
+  CONVERT_ARG_CHECKED(JSObject, js_object, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
+  Handle<Object> obj_value = args.at<Object>(2);
+
+  CONVERT_CHECKED(Smi, flag, args[3]);
+  int unchecked = flag->value();
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
 
-  LookupResult result(isolate);
+  // Check if this is an element.
+  uint32_t index;
+  bool is_element = name->AsArrayIndex(&index);
+
+  // Special case for elements if any of the flags are true.
+  // If elements are in fast case we always implicitly assume that:
+  // DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
+  if (((unchecked & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) &&
+      is_element) {
+    // Normalize the elements to enable attributes on the property.
+    if (js_object->IsJSGlobalProxy()) {
+      // We do not need to do access checks here since these has already
+      // been performed by the call to GetOwnProperty.
+      Handle<Object> proto(js_object->GetPrototype());
+      // If proxy is detached, ignore the assignment. Alternatively,
+      // we could throw an exception.
+      if (proto->IsNull()) return *obj_value;
+      js_object = Handle<JSObject>::cast(proto);
+    }
+
+    // Don't allow element properties to be redefined on objects with external
+    // array elements.
+    if (js_object->HasExternalArrayElements()) {
+      Handle<Object> args[2] = { js_object, name };
+      Handle<Object> error =
+          isolate->factory()->NewTypeError("redef_external_array_element",
+                                           HandleVector(args, 2));
+      return isolate->Throw(*error);
+    }
+
+    Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
+    // Make sure that we never go back to fast case.
+    dictionary->set_requires_slow_elements();
+    PropertyDetails details = PropertyDetails(attr, NORMAL);
+    Handle<SeededNumberDictionary> extended_dictionary =
+        SeededNumberDictionarySet(dictionary, index, obj_value, details);
+    if (*extended_dictionary != *dictionary) {
+      if (js_object->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
+        FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
+      } else {
+        js_object->set_elements(*extended_dictionary);
+      }
+    }
+    return *obj_value;
+  }
+
+  LookupResult result;
   js_object->LocalLookupRealNamedProperty(*name, &result);
 
-  // Special case for callback properties.
-  if (result.IsFound() && result.type() == CALLBACKS) {
-    Object* callback = result.GetCallbackObject();
-    // To be compatible with Safari we do not change the value on API objects
-    // in Object.defineProperty(). Firefox disagrees here, and actually changes
-    // the value.
-    if (callback->IsAccessorInfo()) {
-      return isolate->heap()->undefined_value();
-    }
-    // Avoid redefining foreign callback as data property, just use the stored
-    // setter to update the value instead.
-    // TODO(mstarzinger): So far this only works if property attributes don't
-    // change, this should be fixed once we cleanup the underlying code.
-    if (callback->IsForeign() && result.GetAttributes() == attr) {
-      return js_object->SetPropertyWithCallback(callback,
-                                                *name,
-                                                *obj_value,
-                                                result.holder(),
-                                                kStrictMode);
-    }
+  // To be compatible with safari we do not change the value on API objects
+  // in defineProperty. Firefox disagrees here, and actually changes the value.
+  if (result.IsProperty() &&
+      (result.type() == CALLBACKS) &&
+      result.GetCallbackObject()->IsAccessorInfo()) {
+    return isolate->heap()->undefined_value();
   }
 
   // Take special care when attributes are different and there is already
@@ -4411,7 +4383,7 @@
       // we don't have to check for null.
       js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
     }
-    JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+    NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
     // Use IgnoreAttributes version since a readonly property may be
     // overridden and SetProperty does not allow this.
     return js_object->SetLocalPropertyIgnoreAttributes(*name,
@@ -4427,13 +4399,34 @@
 }
 
 
+// Special case for elements if any of the flags are true.
+// If elements are in fast case we always implicitly assume that:
+// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
+static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
+                                              Handle<JSObject> js_object,
+                                              uint32_t index,
+                                              Handle<Object> value,
+                                              PropertyAttributes attr) {
+  // Normalize the elements to enable attributes on the property.
+  Handle<SeededNumberDictionary> dictionary = NormalizeElements(js_object);
+  // Make sure that we never go back to fast case.
+  dictionary->set_requires_slow_elements();
+  PropertyDetails details = PropertyDetails(attr, NORMAL);
+  Handle<SeededNumberDictionary> extended_dictionary =
+      SeededNumberDictionarySet(dictionary, index, value, details);
+  if (*extended_dictionary != *dictionary) {
+    js_object->set_elements(*extended_dictionary);
+  }
+  return *value;
+}
+
+
 MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
                                         Handle<Object> object,
                                         Handle<Object> key,
                                         Handle<Object> value,
                                         PropertyAttributes attr,
                                         StrictModeFlag strict_mode) {
-  SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
   HandleScope scope(isolate);
 
   if (object->IsUndefined() || object->IsNull()) {
@@ -4444,14 +4437,6 @@
     return isolate->Throw(*error);
   }
 
-  if (object->IsJSProxy()) {
-    bool has_pending_exception = false;
-    Handle<Object> name = Execution::ToString(key, &has_pending_exception);
-    if (has_pending_exception) return Failure::Exception();
-    return JSProxy::cast(*object)->SetProperty(
-        String::cast(*name), *value, attr, strict_mode);
-  }
-
   // If the object isn't a JavaScript object, we ignore the store.
   if (!object->IsJSObject()) return *value;
 
@@ -4471,8 +4456,11 @@
       return *value;
     }
 
-    Handle<Object> result = JSObject::SetElement(
-        js_object, index, value, attr, strict_mode, set_mode);
+    if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+      return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
+    }
+
+    Handle<Object> result = SetElement(js_object, index, value, strict_mode);
     if (result.is_null()) return Failure::Exception();
     return *value;
   }
@@ -4480,13 +4468,18 @@
   if (key->IsString()) {
     Handle<Object> result;
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
-      result = JSObject::SetElement(
-          js_object, index, value, attr, strict_mode, set_mode);
+      if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
+        return NormalizeObjectSetElement(isolate,
+                                         js_object,
+                                         index,
+                                         value,
+                                         attr);
+      }
+      result = SetElement(js_object, index, value, strict_mode);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
       key_string->TryFlatten();
-      result = JSReceiver::SetProperty(
-          js_object, key_string, value, attr, strict_mode);
+      result = SetProperty(js_object, key_string, value, attr, strict_mode);
     }
     if (result.is_null()) return Failure::Exception();
     return *value;
@@ -4499,8 +4492,7 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(
-        index, *value, attr, strict_mode, true, set_mode);
+    return js_object->SetElement(index, *value, strict_mode, true);
   } else {
     return js_object->SetProperty(*name, *value, attr, strict_mode);
   }
@@ -4528,14 +4520,12 @@
       return *value;
     }
 
-    return js_object->SetElement(
-        index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+    return js_object->SetElement(index, *value, kNonStrictMode, true);
   }
 
   if (key->IsString()) {
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
-      return js_object->SetElement(
-          index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+      return js_object->SetElement(index, *value, kNonStrictMode, true);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
       key_string->TryFlatten();
@@ -4552,8 +4542,7 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(
-        index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
+    return js_object->SetElement(index, *value, kNonStrictMode, true);
   } else {
     return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
   }
@@ -4567,7 +4556,7 @@
 
   // Check if the given key is an array index.
   uint32_t index;
-  if (key->ToArrayIndex(&index)) {
+  if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
     // In Firefox/SpiderMonkey, Safari and Opera you can access the
     // characters of a string using [] notation.  In the case of a
     // String object we just need to redirect the deletion to the
@@ -4578,7 +4567,8 @@
       return isolate->heap()->true_value();
     }
 
-    return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
+    return JSObject::cast(*receiver)->DeleteElement(
+        index, JSReceiver::FORCE_DELETION);
   }
 
   Handle<String> key_string;
@@ -4613,8 +4603,10 @@
 
   StrictModeFlag strict_mode = kNonStrictMode;
   if (args.length() == 5) {
-    CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_flag, 4);
-    strict_mode = strict_mode_flag;
+    CONVERT_SMI_ARG_CHECKED(strict_unchecked, 4);
+    RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
+                   strict_unchecked == kNonStrictMode);
+    strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
   }
 
   return Runtime::SetObjectProperty(isolate,
@@ -4626,22 +4618,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
-  NoHandleAllocation ha;
-  RUNTIME_ASSERT(args.length() == 1);
-  Handle<Object> object = args.at<Object>(0);
-  return TransitionElements(object, FAST_DOUBLE_ELEMENTS, isolate);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
-  NoHandleAllocation ha;
-  RUNTIME_ASSERT(args.length() == 1);
-  Handle<Object> object = args.at<Object>(0);
-  return TransitionElements(object, FAST_ELEMENTS, isolate);
-}
-
-
 // Set the native flag on the function.
 // This is used to decide if we should transform null and undefined
 // into the global object when doing call and apply.
@@ -4659,57 +4635,18 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
-  RUNTIME_ASSERT(args.length() == 5);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-  CONVERT_SMI_ARG_CHECKED(store_index, 1);
-  Handle<Object> value = args.at<Object>(2);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
-  CONVERT_SMI_ARG_CHECKED(literal_index, 4);
-  HandleScope scope;
-
-  Object* raw_boilerplate_object = literals->get(literal_index);
-  Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
-#if DEBUG
-  ElementsKind elements_kind = object->GetElementsKind();
-#endif
-  ASSERT(elements_kind <= FAST_DOUBLE_ELEMENTS);
-  // Smis should never trigger transitions.
-  ASSERT(!value->IsSmi());
-
-  if (value->IsNumber()) {
-    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
-    JSObject::TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
-    JSObject::TransitionElementsKind(boilerplate_object, FAST_DOUBLE_ELEMENTS);
-    ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
-    FixedDoubleArray* double_array =
-        FixedDoubleArray::cast(object->elements());
-    HeapNumber* number = HeapNumber::cast(*value);
-    double_array->set(store_index, number->Number());
-  } else {
-    ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-           elements_kind == FAST_DOUBLE_ELEMENTS);
-    JSObject::TransitionElementsKind(object, FAST_ELEMENTS);
-    JSObject::TransitionElementsKind(boilerplate_object, FAST_ELEMENTS);
-    FixedArray* object_array =
-        FixedArray::cast(object->elements());
-    object_array->set(store_index, *value);
-  }
-  return *object;
-}
-
-
 // Set a local property, even if it is READ_ONLY.  If the property does not
 // exist, it will be added with attributes NONE.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
   NoHandleAllocation ha;
   RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
+  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
   // Compute attributes.
   PropertyAttributes attributes = NONE;
   if (args.length() == 4) {
-    CONVERT_SMI_ARG_CHECKED(unchecked_value, 3);
+    CONVERT_CHECKED(Smi, value_obj, args[3]);
+    int unchecked_value = value_obj->value();
     // Only attribute bits should be set.
     RUNTIME_ASSERT(
         (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -4725,10 +4662,10 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_CHECKED(JSReceiver, object, 0);
-  CONVERT_ARG_CHECKED(String, key, 1);
-  CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
-  return object->DeleteProperty(key, (strict_mode == kStrictMode)
+  CONVERT_CHECKED(JSReceiver, object, args[0]);
+  CONVERT_CHECKED(String, key, args[1]);
+  CONVERT_SMI_ARG_CHECKED(strict, 2);
+  return object->DeleteProperty(key, (strict == kStrictMode)
                                       ? JSReceiver::STRICT_DELETION
                                       : JSReceiver::NORMAL_DELETION);
 }
@@ -4755,7 +4692,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(String, key, 1);
+  CONVERT_CHECKED(String, key, args[1]);
 
   uint32_t index;
   const bool key_is_array_index = key->AsArrayIndex(&index);
@@ -4793,24 +4730,29 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
-  CONVERT_ARG_CHECKED(String, key, 1);
 
-  bool result = receiver->HasProperty(key);
-  if (isolate->has_pending_exception()) return Failure::Exception();
-  return isolate->heap()->ToBoolean(result);
+  // Only JS receivers can have properties.
+  if (args[0]->IsJSReceiver()) {
+    JSReceiver* receiver = JSReceiver::cast(args[0]);
+    CONVERT_CHECKED(String, key, args[1]);
+    if (receiver->HasProperty(key)) return isolate->heap()->true_value();
+  }
+  return isolate->heap()->false_value();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
-  CONVERT_SMI_ARG_CHECKED(index, 1);
 
-  bool result = receiver->HasElement(index);
-  if (isolate->has_pending_exception()) return Failure::Exception();
-  return isolate->heap()->ToBoolean(result);
+  // Only JS objects can have elements.
+  if (args[0]->IsJSObject()) {
+    JSObject* object = JSObject::cast(args[0]);
+    CONVERT_CHECKED(Smi, index_obj, args[1]);
+    uint32_t index = index_obj->value();
+    if (object->HasElement(index)) return isolate->heap()->true_value();
+  }
+  return isolate->heap()->false_value();
 }
 
 
@@ -4818,42 +4760,12 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
-  CONVERT_ARG_CHECKED(String, key, 1);
+  CONVERT_CHECKED(JSObject, object, args[0]);
+  CONVERT_CHECKED(String, key, args[1]);
 
   uint32_t index;
   if (key->AsArrayIndex(&index)) {
-    JSObject::LocalElementType type = object->HasLocalElement(index);
-    switch (type) {
-      case JSObject::UNDEFINED_ELEMENT:
-      case JSObject::STRING_CHARACTER_ELEMENT:
-        return isolate->heap()->false_value();
-      case JSObject::INTERCEPTED_ELEMENT:
-      case JSObject::FAST_ELEMENT:
-        return isolate->heap()->true_value();
-      case JSObject::DICTIONARY_ELEMENT: {
-        if (object->IsJSGlobalProxy()) {
-          Object* proto = object->GetPrototype();
-          if (proto->IsNull()) {
-            return isolate->heap()->false_value();
-          }
-          ASSERT(proto->IsJSGlobalObject());
-          object = JSObject::cast(proto);
-        }
-        FixedArray* elements = FixedArray::cast(object->elements());
-        SeededNumberDictionary* dictionary = NULL;
-        if (elements->map() ==
-            isolate->heap()->non_strict_arguments_elements_map()) {
-          dictionary = SeededNumberDictionary::cast(elements->get(1));
-        } else {
-          dictionary = SeededNumberDictionary::cast(elements);
-        }
-        int entry = dictionary->FindEntry(index);
-        ASSERT(entry != SeededNumberDictionary::kNotFound);
-        PropertyDetails details = dictionary->DetailsAt(entry);
-        return isolate->heap()->ToBoolean(!details.IsDontEnum());
-      }
-    }
+    return isolate->heap()->ToBoolean(object->HasElement(index));
   }
 
   PropertyAttributes att = object->GetLocalPropertyAttribute(key);
@@ -4864,11 +4776,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
-  bool threw = false;
-  Handle<JSArray> result = GetKeysFor(object, &threw);
-  if (threw) return Failure::Exception();
-  return *result;
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  return *GetKeysFor(object);
 }
 
 
@@ -4880,16 +4789,14 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) {
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
+  CONVERT_CHECKED(JSObject, raw_object, args[0]);
 
   if (raw_object->IsSimpleEnum()) return raw_object->map();
 
   HandleScope scope(isolate);
-  Handle<JSReceiver> object(raw_object);
-  bool threw = false;
-  Handle<FixedArray> content =
-      GetKeysInFixedArrayFor(object, INCLUDE_PROTOS, &threw);
-  if (threw) return Failure::Exception();
+  Handle<JSObject> object(raw_object);
+  Handle<FixedArray> content = GetKeysInFixedArrayFor(object,
+                                                      INCLUDE_PROTOS);
 
   // Test again, since cache may have been built by preceding call.
   if (object->IsSimpleEnum()) return object->map();
@@ -4921,7 +4828,7 @@
   if (!args[0]->IsJSObject()) {
     return isolate->heap()->undefined_value();
   }
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
 
   // Skip the global proxy as it has no properties and always delegates to the
   // real global object.
@@ -4954,7 +4861,7 @@
       return *isolate->factory()->NewJSArray(0);
     }
     int n;
-    n = jsproto->NumberOfLocalProperties();
+    n = jsproto->NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE));
     local_property_count[i] = n;
     total_property_count += n;
     if (i < length - 1) {
@@ -5008,7 +4915,7 @@
   if (!args[0]->IsJSObject()) {
     return isolate->heap()->undefined_value();
   }
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
 
   int n = obj->NumberOfLocalElements(static_cast<PropertyAttributes>(NONE));
   Handle<FixedArray> names = isolate->factory()->NewFixedArray(n);
@@ -5025,7 +4932,7 @@
   if (!args[0]->IsJSObject()) {
     return Smi::FromInt(0);
   }
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
 
   int result = 0;
   if (obj->HasNamedInterceptor()) result |= 2;
@@ -5040,7 +4947,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetNamedInterceptorPropertyNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
 
   if (obj->HasNamedInterceptor()) {
     v8::Handle<v8::Array> result = GetKeysForNamedInterceptor(obj, obj);
@@ -5055,7 +4962,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
 
   if (obj->HasIndexedInterceptor()) {
     v8::Handle<v8::Array> result = GetKeysForIndexedInterceptor(obj, obj);
@@ -5067,7 +4974,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
   ASSERT_EQ(args.length(), 1);
-  CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
+  CONVERT_CHECKED(JSObject, raw_object, args[0]);
   HandleScope scope(isolate);
   Handle<JSObject> object(raw_object);
 
@@ -5086,11 +4993,8 @@
     object = Handle<JSObject>::cast(proto);
   }
 
-  bool threw = false;
-  Handle<FixedArray> contents =
-      GetKeysInFixedArrayFor(object, LOCAL_ONLY, &threw);
-  if (threw) return Failure::Exception();
-
+  Handle<FixedArray> contents = GetKeysInFixedArrayFor(object,
+                                                       LOCAL_ONLY);
   // Some fast paths through GetKeysInFixedArrayFor reuse a cached
   // property array and since the result is mutable we have to create
   // a fresh clone on each invocation.
@@ -5154,7 +5058,7 @@
   if (key->Equals(isolate->heap()->callee_symbol())) {
     Object* function = frame->function();
     if (function->IsJSFunction() &&
-        !JSFunction::cast(function)->shared()->is_classic_mode()) {
+        JSFunction::cast(function)->shared()->strict_mode()) {
       return isolate->Throw(*isolate->factory()->NewTypeError(
           "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
     }
@@ -5167,20 +5071,31 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
+  HandleScope scope(isolate);
+
   ASSERT(args.length() == 1);
-  Object* object = args[0];
-  return (object->IsJSObject() && !object->IsGlobalObject())
-      ? JSObject::cast(object)->TransformToFastProperties(0)
-      : object;
+  Handle<Object> object = args.at<Object>(0);
+  if (object->IsJSObject()) {
+    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+    if (!js_object->HasFastProperties() && !js_object->IsGlobalObject()) {
+      MaybeObject* ok = js_object->TransformToFastProperties(0);
+      if (ok->IsRetryAfterGC()) return ok;
+    }
+  }
+  return *object;
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ToSlowProperties) {
+  HandleScope scope(isolate);
+
   ASSERT(args.length() == 1);
-  Object* obj = args[0];
-  return (obj->IsJSObject() && !obj->IsJSGlobalProxy())
-      ? JSObject::cast(obj)->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0)
-      : obj;
+  Handle<Object> object = args.at<Object>(0);
+  if (object->IsJSObject() && !object->IsJSGlobalProxy()) {
+    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+    NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
+  }
+  return *object;
 }
 
 
@@ -5259,7 +5174,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_CHECKED(String, subject, args[0]);
   subject->TryFlatten();
 
   // Fast case: short integer or some sorts of junk values.
@@ -5315,7 +5230,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSArray, codes, 0);
+  CONVERT_CHECKED(JSArray, codes, args[0]);
   int length = Smi::cast(codes->length())->value();
 
   // Check if the string can be ASCII.
@@ -5395,7 +5310,7 @@
   const char hex_chars[] = "0123456789ABCDEF";
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(String, source, 0);
+  CONVERT_CHECKED(String, source, args[0]);
 
   source->TryFlatten();
 
@@ -5513,7 +5428,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(String, source, 0);
+  CONVERT_CHECKED(String, source, args[0]);
 
   source->TryFlatten();
 
@@ -5664,7 +5579,7 @@
   StringType* new_string = StringType::cast(new_object);
 
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqString::kHeaderSize);
+      new_string->address() + SeqAsciiString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   *(write_cursor++) = '"';
 
@@ -5752,15 +5667,16 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqString::kHeaderSize);
+      new_string->address() + SeqAsciiString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
                                                   write_cursor,
                                                   characters);
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqString::kHeaderSize));
+          new_string->address() + SeqAsciiString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -5770,7 +5686,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
   NoHandleAllocation ha;
-  CONVERT_ARG_CHECKED(String, str, 0);
+  CONVERT_CHECKED(String, str, args[0]);
   if (!str->IsFlat()) {
     MaybeObject* try_flatten = str->TryFlatten();
     Object* flat;
@@ -5794,7 +5710,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
   NoHandleAllocation ha;
-  CONVERT_ARG_CHECKED(String, str, 0);
+  CONVERT_CHECKED(String, str, args[0]);
   if (!str->IsFlat()) {
     MaybeObject* try_flatten = str->TryFlatten();
     Object* flat;
@@ -5838,8 +5754,9 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqString::kHeaderSize);
+      new_string->address() + SeqAsciiString::kHeaderSize);
   *(write_cursor++) = '[';
   for (int i = 0; i < length; i++) {
     if (i != 0) *(write_cursor++) = ',';
@@ -5860,7 +5777,7 @@
 
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqString::kHeaderSize));
+          new_string->address() + SeqAsciiString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -5871,7 +5788,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSArray, array, 0);
+  CONVERT_CHECKED(JSArray, array, args[0]);
 
   if (!array->HasFastElements()) return isolate->heap()->undefined_value();
   FixedArray* elements = FixedArray::cast(array->elements());
@@ -5913,7 +5830,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
   NoHandleAllocation ha;
 
-  CONVERT_ARG_CHECKED(String, s, 0);
+  CONVERT_CHECKED(String, s, args[0]);
   CONVERT_SMI_ARG_CHECKED(radix, 1);
 
   s->TryFlatten();
@@ -5926,7 +5843,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
   NoHandleAllocation ha;
-  CONVERT_ARG_CHECKED(String, str, 0);
+  CONVERT_CHECKED(String, str, args[0]);
 
   // ECMA-262 section 15.1.2.3, empty string is NaN
   double value = StringToDouble(isolate->unicode_cache(),
@@ -5951,8 +5868,8 @@
   //
   // Allocate the resulting string.
   //
-  // NOTE: This assumes that the upper/lower case of an ASCII
-  // character is also ASCII.  This is currently the case, but it
+  // NOTE: This assumes that the upper/lower case of an ascii
+  // character is also ascii.  This is currently the case, but it
   // might break in the future if we implement more context and locale
   // dependent upper/lower conversions.
   Object* o;
@@ -6052,9 +5969,9 @@
 // This function is only useful when it can be inlined and the
 // boundaries are statically known.
 // Requires: all bytes in the input word and the boundaries must be
-// ASCII (less than 0x7F).
+// ascii (less than 0x7F).
 static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
-  // Every byte in an ASCII string is less than or equal to 0x7F.
+  // Every byte in an ascii string is less than or equal to 0x7F.
   ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
   // Use strict inequalities since in edge cases the function could be
   // further simplified.
@@ -6175,17 +6092,17 @@
     Isolate* isolate,
     unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
   NoHandleAllocation ha;
-  CONVERT_ARG_CHECKED(String, s, 0);
+  CONVERT_CHECKED(String, s, args[0]);
   s = s->TryFlattenGetString();
 
   const int length = s->length();
   // Assume that the string is not empty; we need this assumption later
   if (length == 0) return s;
 
-  // Simpler handling of ASCII strings.
+  // Simpler handling of ascii strings.
   //
-  // NOTE: This assumes that the upper/lower case of an ASCII
-  // character is also ASCII.  This is currently the case, but it
+  // NOTE: This assumes that the upper/lower case of an ascii
+  // character is also ascii.  This is currently the case, but it
   // might break in the future if we implement more context and locale
   // dependent upper/lower conversions.
   if (s->IsSeqAsciiString()) {
@@ -6229,7 +6146,7 @@
 
 
 static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
-  return unibrow::WhiteSpace::Is(c) || c == 0x200b || c == 0xfeff;
+  return unibrow::WhiteSpace::Is(c) || c == 0x200b;
 }
 
 
@@ -6237,9 +6154,9 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_CHECKED(String, s, 0);
-  CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
+  CONVERT_CHECKED(String, s, args[0]);
+  CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]);
+  CONVERT_BOOLEAN_CHECKED(trimRight, args[2]);
 
   s->TryFlatten();
   int length = s->length();
@@ -6264,8 +6181,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
   ASSERT(args.length() == 3);
   HandleScope handle_scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
+  CONVERT_ARG_CHECKED(String, subject, 0);
+  CONVERT_ARG_CHECKED(String, pattern, 1);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
 
   int subject_length = subject->length();
@@ -6312,8 +6229,6 @@
   int part_count = indices.length();
 
   Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
-  MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
-  if (maybe_result->IsFailure()) return maybe_result;
   result->set_length(Smi::FromInt(part_count));
 
   ASSERT(result->HasFastElements());
@@ -6348,7 +6263,7 @@
 }
 
 
-// Copies ASCII characters to the given fixed array looking up
+// Copies ascii characters to the given fixed array looking up
 // one-char strings in the cache. Gives up on the first char that is
 // not in the cache and fills the remainder with smi zeros. Returns
 // the length of the successfully copied prefix.
@@ -6360,11 +6275,11 @@
   FixedArray* ascii_cache = heap->single_character_string_cache();
   Object* undefined = heap->undefined_value();
   int i;
-  WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
   for (i = 0; i < length; ++i) {
     Object* value = ascii_cache->get(chars[i]);
     if (value == undefined) break;
-    elements->set(i, value, mode);
+    ASSERT(!heap->InNewSpace(value));
+    elements->set(i, value, SKIP_WRITE_BARRIER);
   }
   if (i < length) {
     ASSERT(Smi::FromInt(0) == 0);
@@ -6386,7 +6301,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+  CONVERT_ARG_CHECKED(String, s, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
 
   s = FlattenGetString(s);
@@ -6437,7 +6352,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(String, value, 0);
+  CONVERT_CHECKED(String, value, args[0]);
   return value->ToObject();
 }
 
@@ -6628,8 +6543,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(String, str1, 0);
-  CONVERT_ARG_CHECKED(String, str2, 1);
+  CONVERT_CHECKED(String, str1, args[0]);
+  CONVERT_CHECKED(String, str2, args[1]);
   isolate->counters()->string_add_runtime()->Increment();
   return isolate->heap()->AllocateConsString(str1, str2);
 }
@@ -6677,20 +6592,17 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSArray, array, 0);
+  CONVERT_CHECKED(JSArray, array, args[0]);
   if (!args[1]->IsSmi()) {
     isolate->context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
   int array_length = args.smi_at(1);
-  CONVERT_ARG_CHECKED(String, special, 2);
+  CONVERT_CHECKED(String, special, args[2]);
 
   // This assumption is used by the slice encoding in one or two smis.
   ASSERT(Smi::kMaxValue >= String::kMaxLength);
 
-  MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
-  if (maybe_result->IsFailure()) return maybe_result;
-
   int special_length = special->length();
   if (!array->HasFastElements()) {
     return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6752,7 +6664,6 @@
         ascii = false;
       }
     } else {
-      ASSERT(!elt->IsTheHole());
       return isolate->Throw(isolate->heap()->illegal_argument_symbol());
     }
     if (increment > String::kMaxLength - position) {
@@ -6794,13 +6705,13 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSArray, array, 0);
+  CONVERT_CHECKED(JSArray, array, args[0]);
   if (!args[1]->IsSmi()) {
     isolate->context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
   int array_length = args.smi_at(1);
-  CONVERT_ARG_CHECKED(String, separator, 2);
+  CONVERT_CHECKED(String, separator, args[2]);
 
   if (!array->HasFastElements()) {
     return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6918,11 +6829,10 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
-  RUNTIME_ASSERT(elements_array->HasFastElements() ||
-                 elements_array->HasFastSmiOnlyElements());
+  CONVERT_CHECKED(JSArray, elements_array, args[0]);
+  RUNTIME_ASSERT(elements_array->HasFastElements());
   CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
-  CONVERT_ARG_CHECKED(String, separator, 2);
+  CONVERT_CHECKED(String, separator, args[2]);
   // elements_array is fast-mode JSarray of alternating positions
   // (increasing order) and strings.
   // array_length is length of original array (used to add separators);
@@ -6944,8 +6854,7 @@
   FixedArray* elements = FixedArray::cast(elements_array->elements());
   for (int i = 0; i < elements_length; i += 2) {
     RUNTIME_ASSERT(elements->get(i)->IsNumber());
-    RUNTIME_ASSERT(elements->get(i + 1)->IsString());
-    String* string = String::cast(elements->get(i + 1));
+    CONVERT_CHECKED(String, string, elements->get(i + 1));
     int length = string->length();
     if (is_ascii && !string->IsAsciiRepresentation()) {
       is_ascii = false;
@@ -7103,8 +7012,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(String, x, 0);
-  CONVERT_ARG_CHECKED(String, y, 1);
+  CONVERT_CHECKED(String, x, args[0]);
+  CONVERT_CHECKED(String, y, args[1]);
 
   bool not_equal = !x->Equals(y);
   // This is slightly convoluted because the value that signifies
@@ -7135,8 +7044,12 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  CONVERT_SMI_ARG_CHECKED(x_value, 0);
-  CONVERT_SMI_ARG_CHECKED(y_value, 1);
+
+  // Extract the integer values from the Smis.
+  CONVERT_CHECKED(Smi, x, args[0]);
+  CONVERT_CHECKED(Smi, y, args[1]);
+  int x_value = x->value();
+  int y_value = y->value();
 
   // If the integers are equal so are the string representations.
   if (x_value == y_value) return Smi::FromInt(EQUAL);
@@ -7276,8 +7189,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_CHECKED(String, x, 0);
-  CONVERT_ARG_CHECKED(String, y, 1);
+  CONVERT_CHECKED(String, x, args[0]);
+  CONVERT_CHECKED(String, y, args[1]);
 
   isolate->counters()->string_compare_runtime()->Increment();
 
@@ -7412,8 +7325,7 @@
   return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
 }
 
-// Slow version of Math.pow.  We check for fast paths for special cases.
-// Used if SSE2/VFP3 is not available.
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
@@ -7429,38 +7341,22 @@
   }
 
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
-  int y_int = static_cast<int>(y);
-  double result;
-  if (y == y_int) {
-    result = power_double_int(x, y_int);  // Returns 1 if exponent is 0.
-  } else  if (y == 0.5) {
-    result = (isinf(x)) ? V8_INFINITY
-                        : fast_sqrt(x + 0.0);  // Convert -0 to +0.
-  } else if (y == -0.5) {
-    result = (isinf(x)) ? 0
-                        : 1.0 / fast_sqrt(x + 0.0);  // Convert -0 to +0.
-  } else {
-    result = power_double_double(x, y);
-  }
-  if (isnan(result)) return isolate->heap()->nan_value();
-  return isolate->heap()->AllocateHeapNumber(result);
+  return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
 }
 
-// Fast version of Math.pow if we know that y is not an integer and y is not
-// -0.5 or 0.5.  Used as slow case from full codegen.
+// Fast version of Math.pow if we know that y is not an integer and
+// y is not -0.5 or 0.5. Used as slowcase from codegen.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  isolate->counters()->math_pow()->Increment();
-
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
   if (y == 0) {
     return Smi::FromInt(1);
+  } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
+    return isolate->heap()->nan_value();
   } else {
-    double result = power_double_double(x, y);
-    if (isnan(result)) return isolate->heap()->nan_value();
-    return isolate->heap()->AllocateHeapNumber(result);
+    return isolate->heap()->AllocateHeapNumber(pow(x, y));
   }
 }
 
@@ -7490,7 +7386,7 @@
 
   // We compare with kSmiValueSize - 2 because (2^30 - 0.1) has exponent 29 and
   // should be rounded to 2^30, which is not smi (for 31-bit smis, similar
-  // argument holds for 32-bit smis).
+  // agument holds for 32-bit smis).
   if (!sign && exponent < kSmiValueSize - 2) {
     return Smi::FromInt(static_cast<int>(value + 0.5));
   }
@@ -7524,7 +7420,7 @@
   isolate->counters()->math_sqrt()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->heap()->AllocateHeapNumber(fast_sqrt(x));
+  return isolate->heap()->AllocateHeapNumber(sqrt(x));
 }
 
 
@@ -7538,51 +7434,370 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
+static int MakeDay(int year, int month, int day) {
+  static const int day_from_month[] = {0, 31, 59, 90, 120, 151,
+                                       181, 212, 243, 273, 304, 334};
+  static const int day_from_month_leap[] = {0, 31, 60, 91, 121, 152,
+                                            182, 213, 244, 274, 305, 335};
 
-  CONVERT_SMI_ARG_CHECKED(year, 0);
-  CONVERT_SMI_ARG_CHECKED(month, 1);
+  year += month / 12;
+  month %= 12;
+  if (month < 0) {
+    year--;
+    month += 12;
+  }
 
-  return Smi::FromInt(isolate->date_cache()->DaysFromYearMonth(year, month));
+  ASSERT(month >= 0);
+  ASSERT(month < 12);
+
+  // year_delta is an arbitrary number such that:
+  // a) year_delta = -1 (mod 400)
+  // b) year + year_delta > 0 for years in the range defined by
+  //    ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+  //    Jan 1 1970. This is required so that we don't run into integer
+  //    division of negative numbers.
+  // c) there shouldn't be an overflow for 32-bit integers in the following
+  //    operations.
+  static const int year_delta = 399999;
+  static const int base_day = 365 * (1970 + year_delta) +
+                              (1970 + year_delta) / 4 -
+                              (1970 + year_delta) / 100 +
+                              (1970 + year_delta) / 400;
+
+  int year1 = year + year_delta;
+  int day_from_year = 365 * year1 +
+                      year1 / 4 -
+                      year1 / 100 +
+                      year1 / 400 -
+                      base_day;
+
+  if (year % 4 || (year % 100 == 0 && year % 400 != 0)) {
+    return day_from_year + day_from_month[month] + day - 1;
+  }
+
+  return day_from_year + day_from_month_leap[month] + day - 1;
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) {
-  HandleScope scope(isolate);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
+  NoHandleAllocation ha;
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 0);
-  CONVERT_DOUBLE_ARG_CHECKED(time, 1);
-  CONVERT_SMI_ARG_CHECKED(is_utc, 2);
+  CONVERT_SMI_ARG_CHECKED(year, 0);
+  CONVERT_SMI_ARG_CHECKED(month, 1);
+  CONVERT_SMI_ARG_CHECKED(date, 2);
 
-  DateCache* date_cache = isolate->date_cache();
+  return Smi::FromInt(MakeDay(year, month, date));
+}
 
-  Object* value = NULL;
-  bool is_value_nan = false;
-  if (isnan(time)) {
-    value = isolate->heap()->nan_value();
-    is_value_nan = true;
-  } else if (!is_utc &&
-             (time < -DateCache::kMaxTimeBeforeUTCInMs ||
-              time > DateCache::kMaxTimeBeforeUTCInMs)) {
-    value = isolate->heap()->nan_value();
-    is_value_nan = true;
+
+static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
+static const int kDaysIn4Years = 4 * 365 + 1;
+static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
+static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
+static const int kDays1970to2000 = 30 * 365 + 7;
+static const int kDaysOffset = 1000 * kDaysIn400Years + 5 * kDaysIn400Years -
+                               kDays1970to2000;
+static const int kYearsOffset = 400000;
+
+static const char kDayInYear[] = {
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30,
+      1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+      22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
+
+static const char kMonthInYear[] = {
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+
+      0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0,
+      1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+      1, 1, 1,
+      2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+      2, 2, 2, 2, 2, 2,
+      3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+      3, 3, 3, 3, 3,
+      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+      4, 4, 4, 4, 4, 4,
+      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 5, 5, 5, 5,
+      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+      6, 6, 6, 6, 6, 6,
+      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+      7, 7, 7, 7, 7, 7,
+      8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+      8, 8, 8, 8, 8,
+      9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
+      9, 9, 9, 9, 9, 9,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11};
+
+
+// This function works for dates from 1970 to 2099.
+static inline void DateYMDFromTimeAfter1970(int date,
+                                            int& year, int& month, int& day) {
+#ifdef DEBUG
+  int save_date = date;  // Need this for ASSERT in the end.
+#endif
+
+  year = 1970 + (4 * date + 2) / kDaysIn4Years;
+  date %= kDaysIn4Years;
+
+  month = kMonthInYear[date];
+  day = kDayInYear[date];
+
+  ASSERT(MakeDay(year, month, day) == save_date);
+}
+
+
+static inline void DateYMDFromTimeSlow(int date,
+                                       int& year, int& month, int& day) {
+#ifdef DEBUG
+  int save_date = date;  // Need this for ASSERT in the end.
+#endif
+
+  date += kDaysOffset;
+  year = 400 * (date / kDaysIn400Years) - kYearsOffset;
+  date %= kDaysIn400Years;
+
+  ASSERT(MakeDay(year, 0, 1) + date == save_date);
+
+  date--;
+  int yd1 = date / kDaysIn100Years;
+  date %= kDaysIn100Years;
+  year += 100 * yd1;
+
+  date++;
+  int yd2 = date / kDaysIn4Years;
+  date %= kDaysIn4Years;
+  year += 4 * yd2;
+
+  date--;
+  int yd3 = date / 365;
+  date %= 365;
+  year += yd3;
+
+  bool is_leap = (!yd1 || yd2) && !yd3;
+
+  ASSERT(date >= -1);
+  ASSERT(is_leap || (date >= 0));
+  ASSERT((date < 365) || (is_leap && (date < 366)));
+  ASSERT(is_leap == ((year % 4 == 0) && (year % 100 || (year % 400 == 0))));
+  ASSERT(is_leap || ((MakeDay(year, 0, 1) + date) == save_date));
+  ASSERT(!is_leap || ((MakeDay(year, 0, 1) + date + 1) == save_date));
+
+  if (is_leap) {
+    day = kDayInYear[2*365 + 1 + date];
+    month = kMonthInYear[2*365 + 1 + date];
   } else {
-    time = is_utc ? time : date_cache->ToUTC(static_cast<int64_t>(time));
-    if (time < -DateCache::kMaxTimeInMs ||
-        time > DateCache::kMaxTimeInMs) {
-      value = isolate->heap()->nan_value();
-      is_value_nan = true;
-    } else  {
-      MaybeObject* maybe_result =
-          isolate->heap()->AllocateHeapNumber(DoubleToInteger(time));
-      if (!maybe_result->ToObject(&value)) return maybe_result;
-    }
+    day = kDayInYear[date];
+    month = kMonthInYear[date];
   }
-  date->SetValue(value, is_value_nan);
-  return *date;
+
+  ASSERT(MakeDay(year, month, day) == save_date);
+}
+
+
+static inline void DateYMDFromTime(int date,
+                                   int& year, int& month, int& day) {
+  if (date >= 0 && date < 32 * kDaysIn4Years) {
+    DateYMDFromTimeAfter1970(date, year, month, day);
+  } else {
+    DateYMDFromTimeSlow(date, year, month, day);
+  }
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateYMDFromTime) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+
+  CONVERT_DOUBLE_ARG_CHECKED(t, 0);
+  CONVERT_CHECKED(JSArray, res_array, args[1]);
+
+  int year, month, day;
+  DateYMDFromTime(static_cast<int>(floor(t / 86400000)), year, month, day);
+
+  RUNTIME_ASSERT(res_array->elements()->map() ==
+                 isolate->heap()->fixed_array_map());
+  FixedArray* elms = FixedArray::cast(res_array->elements());
+  RUNTIME_ASSERT(elms->length() == 3);
+
+  elms->set(0, Smi::FromInt(year));
+  elms->set(1, Smi::FromInt(month));
+  elms->set(2, Smi::FromInt(day));
+
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -7631,14 +7846,14 @@
         --index;
       }
 
-      Handle<ScopeInfo> scope_info(callee->shared()->scope_info());
+      ScopeInfo<> scope_info(callee->shared()->scope_info());
       while (index >= 0) {
         // Detect duplicate names to the right in the parameter list.
-        Handle<String> name(scope_info->ParameterName(index));
-        int context_local_count = scope_info->ContextLocalCount();
+        Handle<String> name = scope_info.parameter_name(index);
+        int context_slot_count = scope_info.number_of_context_slots();
         bool duplicate = false;
         for (int j = index + 1; j < parameter_count; ++j) {
-          if (scope_info->ParameterName(j) == *name) {
+          if (scope_info.parameter_name(j).is_identical_to(name)) {
             duplicate = true;
             break;
           }
@@ -7653,16 +7868,17 @@
           // The context index goes in the parameter map with a hole in the
           // arguments array.
           int context_index = -1;
-          for (int j = 0; j < context_local_count; ++j) {
-            if (scope_info->ContextLocalName(j) == *name) {
+          for (int j = Context::MIN_CONTEXT_SLOTS;
+               j < context_slot_count;
+               ++j) {
+            if (scope_info.context_slot_name(j).is_identical_to(name)) {
               context_index = j;
               break;
             }
           }
           ASSERT(context_index >= 0);
           arguments->set_the_hole(index);
-          parameter_map->set(index + 2, Smi::FromInt(
-              Context::MIN_CONTEXT_SLOTS + context_index));
+          parameter_map->set(index + 2, Smi::FromInt(context_index));
         }
 
         --index;
@@ -7705,7 +7921,7 @@
 
     AssertNoAllocation no_gc;
     FixedArray* array = reinterpret_cast<FixedArray*>(obj);
-    array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
+    array->set_map(isolate->heap()->fixed_array_map());
     array->set_length(length);
 
     WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -7721,9 +7937,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
-  CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(pretenure, 2);
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1);
+  CONVERT_BOOLEAN_CHECKED(pretenure, args[2]);
 
   // The caller ensures that we pretenure closures that are assigned
   // directly to properties.
@@ -7736,170 +7952,76 @@
 }
 
 
-// Find the arguments of the JavaScript function invocation that called
-// into C++ code. Collect these in a newly allocated array of handles (possibly
-// prefixed by a number of empty handles).
-static SmartArrayPointer<Handle<Object> > GetCallerArguments(
-    int prefix_argc,
-    int* total_argc) {
+static SmartArrayPointer<Object**> GetNonBoundArguments(int bound_argc,
+                                                        int* total_argc) {
   // Find frame containing arguments passed to the caller.
   JavaScriptFrameIterator it;
   JavaScriptFrame* frame = it.frame();
   List<JSFunction*> functions(2);
   frame->GetFunctions(&functions);
   if (functions.length() > 1) {
-    int inlined_jsframe_index = functions.length() - 1;
-    JSFunction* inlined_function = functions[inlined_jsframe_index];
-    Vector<SlotRef> args_slots =
-        SlotRef::ComputeSlotMappingForArguments(
-            frame,
-            inlined_jsframe_index,
-            inlined_function->shared()->formal_parameter_count());
+    int inlined_frame_index = functions.length() - 1;
+    JSFunction* inlined_function = functions[inlined_frame_index];
+    int args_count = inlined_function->shared()->formal_parameter_count();
+    ScopedVector<SlotRef> args_slots(args_count);
+    SlotRef::ComputeSlotMappingForArguments(frame,
+                                            inlined_frame_index,
+                                            &args_slots);
 
-    int args_count = args_slots.length();
-
-    *total_argc = prefix_argc + args_count;
-    SmartArrayPointer<Handle<Object> > param_data(
-        NewArray<Handle<Object> >(*total_argc));
+    *total_argc = bound_argc + args_count;
+    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = args_slots[i].GetValue();
-      param_data[prefix_argc + i] = val;
+      param_data[bound_argc + i] = val.location();
     }
-
-    args_slots.Dispose();
-
     return param_data;
   } else {
     it.AdvanceToArgumentsFrame();
     frame = it.frame();
     int args_count = frame->ComputeParametersCount();
 
-    *total_argc = prefix_argc + args_count;
-    SmartArrayPointer<Handle<Object> > param_data(
-        NewArray<Handle<Object> >(*total_argc));
+    *total_argc = bound_argc + args_count;
+    SmartArrayPointer<Object**> param_data(NewArray<Object**>(*total_argc));
     for (int i = 0; i < args_count; i++) {
       Handle<Object> val = Handle<Object>(frame->GetParameter(i));
-      param_data[prefix_argc + i] = val;
+      param_data[bound_argc + i] = val.location();
     }
     return param_data;
   }
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
-  RUNTIME_ASSERT(args[3]->IsNumber());
-  Handle<Object> bindee = args.at<Object>(1);
-
-  // TODO(lrn): Create bound function in C++ code from premade shared info.
-  bound_function->shared()->set_bound(true);
-  // Get all arguments of calling function (Function.prototype.bind).
-  int argc = 0;
-  SmartArrayPointer<Handle<Object> > arguments = GetCallerArguments(0, &argc);
-  // Don't count the this-arg.
-  if (argc > 0) {
-    ASSERT(*arguments[0] == args[2]);
-    argc--;
-  } else {
-    ASSERT(args[2]->IsUndefined());
-  }
-  // Initialize array of bindings (function, this, and any existing arguments
-  // if the function was already bound).
-  Handle<FixedArray> new_bindings;
-  int i;
-  if (bindee->IsJSFunction() && JSFunction::cast(*bindee)->shared()->bound()) {
-    Handle<FixedArray> old_bindings(
-        JSFunction::cast(*bindee)->function_bindings());
-    new_bindings =
-        isolate->factory()->NewFixedArray(old_bindings->length() + argc);
-    bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex));
-    i = 0;
-    for (int n = old_bindings->length(); i < n; i++) {
-      new_bindings->set(i, old_bindings->get(i));
-    }
-  } else {
-    int array_size = JSFunction::kBoundArgumentsStartIndex + argc;
-    new_bindings = isolate->factory()->NewFixedArray(array_size);
-    new_bindings->set(JSFunction::kBoundFunctionIndex, *bindee);
-    new_bindings->set(JSFunction::kBoundThisIndex, args[2]);
-    i = 2;
-  }
-  // Copy arguments, skipping the first which is "this_arg".
-  for (int j = 0; j < argc; j++, i++) {
-    new_bindings->set(i, *arguments[j + 1]);
-  }
-  new_bindings->set_map_no_write_barrier(
-      isolate->heap()->fixed_cow_array_map());
-  bound_function->set_function_bindings(*new_bindings);
-
-  // Update length.
-  Handle<String> length_symbol = isolate->factory()->length_symbol();
-  Handle<Object> new_length(args.at<Object>(3));
-  PropertyAttributes attr =
-      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
-  ForceSetProperty(bound_function, length_symbol, new_length, attr);
-  return *bound_function;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BoundFunctionGetBindings) {
-  HandleScope handles(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0);
-  if (callable->IsJSFunction()) {
-    Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
-    if (function->shared()->bound()) {
-      Handle<FixedArray> bindings(function->function_bindings());
-      ASSERT(bindings->map() == isolate->heap()->fixed_cow_array_map());
-      return *isolate->factory()->NewJSArrayWithElements(bindings);
-    }
-  }
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  ASSERT(args.length() == 2);
   // First argument is a function to use as a constructor.
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  RUNTIME_ASSERT(function->shared()->bound());
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
 
-  // The argument is a bound function. Extract its bound arguments
-  // and callable.
-  Handle<FixedArray> bound_args =
-      Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
-  int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
-  Handle<Object> bound_function(
-      JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)));
-  ASSERT(!bound_function->IsJSFunction() ||
-         !Handle<JSFunction>::cast(bound_function)->shared()->bound());
+  // Second argument is either null or an array of bound arguments.
+  Handle<FixedArray> bound_args;
+  int bound_argc = 0;
+  if (!args[1]->IsNull()) {
+    CONVERT_ARG_CHECKED(JSArray, params, 1);
+    RUNTIME_ASSERT(params->HasFastElements());
+    bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
+    bound_argc = Smi::cast(params->length())->value();
+  }
 
   int total_argc = 0;
-  SmartArrayPointer<Handle<Object> > param_data =
-      GetCallerArguments(bound_argc, &total_argc);
+  SmartArrayPointer<Object**> param_data =
+      GetNonBoundArguments(bound_argc, &total_argc);
   for (int i = 0; i < bound_argc; i++) {
-    param_data[i] = Handle<Object>(bound_args->get(
-        JSFunction::kBoundArgumentsStartIndex + i));
+    Handle<Object> val = Handle<Object>(bound_args->get(i));
+    param_data[i] = val.location();
   }
 
-  if (!bound_function->IsJSFunction()) {
-    bool exception_thrown;
-    bound_function = Execution::TryGetConstructorDelegate(bound_function,
-                                                          &exception_thrown);
-    if (exception_thrown) return Failure::Exception();
-  }
-  ASSERT(bound_function->IsJSFunction());
-
   bool exception = false;
   Handle<Object> result =
-      Execution::New(Handle<JSFunction>::cast(bound_function),
-                     total_argc, *param_data, &exception);
+      Execution::New(function, total_argc, *param_data, &exception);
   if (exception) {
-    return Failure::Exception();
+      return Failure::Exception();
   }
+
   ASSERT(!result.is_null());
   return *result;
 }
@@ -7912,9 +8034,12 @@
     prototype = Handle<Object>(function->instance_prototype(), isolate);
   }
   if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
-    ConstructStubCompiler compiler(isolate);
-    Handle<Code> code = compiler.CompileConstructStub(function);
-    function->shared()->set_construct_stub(*code);
+    ConstructStubCompiler compiler;
+    MaybeObject* code = compiler.CompileConstructStub(*function);
+    if (!code->IsFailure()) {
+      function->shared()->set_construct_stub(
+          Code::cast(code->ToObjectUnchecked()));
+    }
   }
 }
 
@@ -7973,11 +8098,9 @@
   // available. We cannot use EnsureCompiled because that forces a
   // compilation through the shared function info which makes it
   // impossible for us to optimize.
-  if (!function->is_compiled()) {
-    JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
-  }
-
   Handle<SharedFunctionInfo> shared(function->shared(), isolate);
+  if (!function->is_compiled()) CompileLazy(function, CLEAR_EXCEPTION);
+
   if (!function->has_initial_map() &&
       shared->IsInobjectSlackTrackingInProgress()) {
     // The tracking is already in progress for another function. We can only
@@ -8005,7 +8128,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   function->shared()->CompleteInobjectSlackTracking();
   TrySettingInlineConstructStub(isolate, function);
 
@@ -8028,7 +8151,7 @@
 
   // Compile the target function.
   ASSERT(!function->is_compiled());
-  if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
+  if (!CompileLazy(function, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
 
@@ -8043,8 +8166,6 @@
   ASSERT(args.length() == 1);
   Handle<JSFunction> function = args.at<JSFunction>(0);
 
-  function->shared()->set_profiler_ticks(0);
-
   // If the function is not compiled ignore the lazy
   // recompilation. This can happen if the debugger is activated and
   // the function is returned to the not compiled state.
@@ -8067,9 +8188,7 @@
     function->ReplaceCode(function->shared()->code());
     return function->code();
   }
-  if (JSFunction::CompileOptimized(function,
-                                   AstNode::kNoNumber,
-                                   CLEAR_EXCEPTION)) {
+  if (CompileOptimized(function, AstNode::kNoNumber, CLEAR_EXCEPTION)) {
     return function->code();
   }
   if (FLAG_trace_opt) {
@@ -8082,33 +8201,25 @@
 }
 
 
-class ActivationsFinder : public ThreadVisitor {
- public:
-  explicit ActivationsFinder(JSFunction* function)
-      : function_(function), has_activations_(false) {}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  RUNTIME_ASSERT(args[0]->IsSmi());
+  Deoptimizer::BailoutType type =
+      static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
+  Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+  ASSERT(isolate->heap()->IsAllocationAllowed());
+  int frames = deoptimizer->output_count();
 
-  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
-    if (has_activations_) return;
+  deoptimizer->MaterializeHeapNumbers();
+  delete deoptimizer;
 
-    for (JavaScriptFrameIterator it(isolate, top); !it.done(); it.Advance()) {
-      JavaScriptFrame* frame = it.frame();
-      if (frame->is_optimized() && frame->function() == function_) {
-        has_activations_ = true;
-        return;
-      }
-    }
-  }
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = NULL;
+  for (int i = 0; i < frames - 1; i++) it.Advance();
+  frame = it.frame();
 
-  bool has_activations() { return has_activations_; }
-
- private:
-  JSFunction* function_;
-  bool has_activations_;
-};
-
-
-static void MaterializeArgumentsObjectInFrame(Isolate* isolate,
-                                              JavaScriptFrame* frame) {
+  RUNTIME_ASSERT(frame->function()->IsJSFunction());
   Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
   Handle<Object> arguments;
   for (int i = frame->ComputeExpressionsCount() - 1; i >= 0; --i) {
@@ -8125,32 +8236,6 @@
       frame->SetExpression(i, *arguments);
     }
   }
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  RUNTIME_ASSERT(args[0]->IsSmi());
-  Deoptimizer::BailoutType type =
-      static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
-  Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
-  ASSERT(isolate->heap()->IsAllocationAllowed());
-  int jsframes = deoptimizer->jsframe_count();
-
-  deoptimizer->MaterializeHeapNumbers();
-  delete deoptimizer;
-
-  JavaScriptFrameIterator it(isolate);
-  for (int i = 0; i < jsframes - 1; i++) {
-    MaterializeArgumentsObjectInFrame(isolate, it.frame());
-    it.Advance();
-  }
-
-  JavaScriptFrame* frame = it.frame();
-  RUNTIME_ASSERT(frame->function()->IsJSFunction());
-  Handle<JSFunction> function(JSFunction::cast(frame->function()), isolate);
-  MaterializeArgumentsObjectInFrame(isolate, frame);
 
   if (type == Deoptimizer::EAGER) {
     RUNTIME_ASSERT(function->IsOptimized());
@@ -8162,24 +8247,17 @@
     return isolate->heap()->undefined_value();
   }
 
-  // Find other optimized activations of the function.
-  bool has_other_activations = false;
+  // Count the number of optimized activations of the function.
+  int activations = 0;
   while (!it.done()) {
     JavaScriptFrame* frame = it.frame();
     if (frame->is_optimized() && frame->function() == *function) {
-      has_other_activations = true;
-      break;
+      activations++;
     }
     it.Advance();
   }
 
-  if (!has_other_activations) {
-    ActivationsFinder activations_finder(*function);
-    isolate->thread_manager()->IterateArchivedThreads(&activations_finder);
-    has_other_activations = activations_finder.has_activations();
-  }
-
-  if (!has_other_activations) {
+  if (activations == 0) {
     if (FLAG_trace_deopt) {
       PrintF("[removing optimized code for: ");
       function->PrintName();
@@ -8203,7 +8281,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   if (!function->IsOptimized()) return isolate->heap()->undefined_value();
 
   Deoptimizer::DeoptimizeFunction(*function);
@@ -8223,22 +8301,10 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
   function->MarkForLazyRecompilation();
-
-  Code* unoptimized = function->shared()->code();
-  if (args.length() == 2 &&
-      unoptimized->kind() == Code::FUNCTION) {
-    CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
-    CHECK(type->IsEqualTo(CStrVector("osr")));
-    isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
-    unoptimized->set_allow_osr_at_loop_nesting_level(
-        Code::kMaxLoopNestingMarker);
-  }
-
   return isolate->heap()->undefined_value();
 }
 
@@ -8246,15 +8312,13 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  // The least significant bit (after untagging) indicates whether the
-  // function is currently optimized, regardless of reason.
   if (!V8::UseCrankshaft()) {
     return Smi::FromInt(4);  // 4 == "never".
   }
   if (FLAG_always_opt) {
     return Smi::FromInt(3);  // 3 == "always".
   }
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
                                  : Smi::FromInt(2);  // 2 == "no".
 }
@@ -8263,7 +8327,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   return Smi::FromInt(function->shared()->opt_count());
 }
 
@@ -8271,7 +8335,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
 
   // We're not prepared to handle a function with arguments object.
   ASSERT(!function->shared()->uses_arguments());
@@ -8331,7 +8395,7 @@
     // Try to compile the optimized code.  A true return value from
     // CompileOptimized means that compilation succeeded, not necessarily
     // that optimization succeeded.
-    if (JSFunction::CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
+    if (CompileOptimized(function, ast_id, CLEAR_EXCEPTION) &&
         function->IsOptimized()) {
       DeoptimizationInputData* data = DeoptimizationInputData::cast(
           function->code()->deoptimization_data());
@@ -8357,19 +8421,8 @@
     function->PrintName();
     PrintF("]\n");
   }
-  Handle<Code> check_code;
-#if defined(V8_TARGET_ARCH_IA32) || \
-    defined(V8_TARGET_ARCH_ARM) || \
-    defined(V8_TARGET_ARCH_MIPS)
-  if (FLAG_count_based_interrupts) {
-    InterruptStub interrupt_stub;
-    check_code = interrupt_stub.GetCode();
-  } else  // NOLINT
-#endif
-  {  // NOLINT
-    StackCheckStub check_stub;
-    check_code = check_stub.GetCode();
-  }
+  StackCheckStub check_stub;
+  Handle<Code> check_code = check_stub.GetCode();
   Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
   Deoptimizer::RevertStackCheckCode(*unoptimized,
                                     *check_code,
@@ -8399,50 +8452,17 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() >= 2);
-  int argc = args.length() - 2;
-  CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
-  Object* receiver = args[0];
-
-  // If there are too many arguments, allocate argv via malloc.
-  const int argv_small_size = 10;
-  Handle<Object> argv_small_buffer[argv_small_size];
-  SmartArrayPointer<Handle<Object> > argv_large_buffer;
-  Handle<Object>* argv = argv_small_buffer;
-  if (argc > argv_small_size) {
-    argv = new Handle<Object>[argc];
-    if (argv == NULL) return isolate->StackOverflow();
-    argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
-  }
-
-  for (int i = 0; i < argc; ++i) {
-     MaybeObject* maybe = args[1 + i];
-     Object* object;
-     if (!maybe->To<Object>(&object)) return maybe;
-     argv[i] = Handle<Object>(object);
-  }
-
-  bool threw;
-  Handle<JSReceiver> hfun(fun);
-  Handle<Object> hreceiver(receiver);
-  Handle<Object> result =
-      Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
-
-  if (threw) return Failure::Exception();
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Apply) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 5);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
-  Handle<Object> receiver = args.at<Object>(1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
-  CONVERT_SMI_ARG_CHECKED(offset, 3);
-  CONVERT_SMI_ARG_CHECKED(argc, 4);
+  CONVERT_CHECKED(JSReceiver, fun, args[0]);
+  Object* receiver = args[1];
+  CONVERT_CHECKED(JSObject, arguments, args[2]);
+  CONVERT_CHECKED(Smi, shift, args[3]);
+  CONVERT_CHECKED(Smi, arity, args[4]);
+
+  int offset = shift->value();
+  int argc = arity->value();
   ASSERT(offset >= 0);
   ASSERT(argc >= 0);
 
@@ -8458,12 +8478,17 @@
   }
 
   for (int i = 0; i < argc; ++i) {
-    argv[i] = Object::GetElement(arguments, offset + i);
+     MaybeObject* maybe = arguments->GetElement(offset + i);
+     Object* object;
+     if (!maybe->To<Object>(&object)) return maybe;
+     argv[i] = Handle<Object>(object);
   }
 
-  bool threw;
-  Handle<Object> result =
-      Execution::Call(fun, receiver, argc, argv, &threw, true);
+  bool threw = false;
+  Handle<JSReceiver> hfun(fun);
+  Handle<Object> hreceiver(receiver);
+  Handle<Object> result = Execution::Call(
+      hfun, hreceiver, argc, reinterpret_cast<Object***>(argv), &threw, true);
 
   if (threw) return Failure::Exception();
   return *result;
@@ -8490,8 +8515,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, function, 0);
-  int length = function->shared()->scope_info()->ContextLength();
+  CONVERT_CHECKED(JSFunction, function, args[0]);
+  int length = function->shared()->scope_info()->NumberOfContextSlots();
   Object* result;
   { MaybeObject* maybe_result =
         isolate->heap()->AllocateFunctionContext(length, function);
@@ -8577,7 +8602,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
-  ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
+  SerializedScopeInfo* scope_info = SerializedScopeInfo::cast(args[0]);
   JSFunction* function;
   if (args[1]->IsSmi()) {
     // A smi sentinel indicates a context nested inside global code rather
@@ -8602,8 +8627,8 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
 
   int index;
   PropertyAttributes attributes;
@@ -8626,10 +8651,18 @@
   }
 
   // The slot was found in a JSObject, either a context extension object,
-  // the global object, or the subject of a with.  Try to delete it
-  // (respecting DONT_DELETE).
+  // the global object, or an arguments object.  Try to delete it
+  // (respecting DONT_DELETE).  For consistency with V8's usual behavior,
+  // which allows deleting all parameters in functions that mention
+  // 'arguments', we do this even for the case of slots found on an
+  // arguments object.  The slot was found on an arguments object if the
+  // index is non-negative.
   Handle<JSObject> object = Handle<JSObject>::cast(holder);
-  return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
+  if (index >= 0) {
+    return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
+  } else {
+    return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
+  }
 }
 
 
@@ -8714,53 +8747,52 @@
                                           &attributes,
                                           &binding_flags);
 
-  // If the index is non-negative, the slot has been found in a context.
+  // If the index is non-negative, the slot has been found in a local
+  // variable or a parameter. Read it from the context object or the
+  // arguments object.
   if (index >= 0) {
-    ASSERT(holder->IsContext());
-    // If the "property" we were looking for is a local variable, the
-    // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
+    // If the "property" we were looking for is a local variable or an
+    // argument in a context, the receiver is the global object; see
+    // ECMA-262, 3rd., 10.1.6 and 10.2.3.
     //
-    // Use the hole as the receiver to signal that the receiver is implicit
-    // and that the global receiver should be used (as distinguished from an
-    // explicit receiver that happens to be a global object).
+    // Use the hole as the receiver to signal that the receiver is
+    // implicit and that the global receiver should be used.
     Handle<Object> receiver = isolate->factory()->the_hole_value();
-    Object* value = Context::cast(*holder)->get(index);
+    MaybeObject* value = (holder->IsContext())
+        ? Context::cast(*holder)->get(index)
+        : JSObject::cast(*holder)->GetElement(index);
     // Check for uninitialized bindings.
-    switch (binding_flags) {
-      case MUTABLE_CHECK_INITIALIZED:
-      case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
-        if (value->IsTheHole()) {
-          Handle<Object> reference_error =
-              isolate->factory()->NewReferenceError("not_defined",
-                                                    HandleVector(&name, 1));
-          return MakePair(isolate->Throw(*reference_error), NULL);
-        }
-        // FALLTHROUGH
-      case MUTABLE_IS_INITIALIZED:
-      case IMMUTABLE_IS_INITIALIZED:
-      case IMMUTABLE_IS_INITIALIZED_HARMONY:
-        ASSERT(!value->IsTheHole());
-        return MakePair(value, *receiver);
-      case IMMUTABLE_CHECK_INITIALIZED:
-        return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
-      case MISSING_BINDING:
-        UNREACHABLE();
-        return MakePair(NULL, NULL);
+    if (holder->IsContext() &&
+        binding_flags == MUTABLE_CHECK_INITIALIZED &&
+        value->IsTheHole()) {
+      Handle<Object> reference_error =
+          isolate->factory()->NewReferenceError("not_defined",
+                                                HandleVector(&name, 1));
+      return MakePair(isolate->Throw(*reference_error), NULL);
+    } else {
+      return MakePair(Unhole(isolate->heap(), value, attributes), *receiver);
     }
   }
 
-  // Otherwise, if the slot was found the holder is a context extension
-  // object, subject of a with, or a global object.  We read the named
-  // property from it.
-  if (!holder.is_null()) {
-    Handle<JSObject> object = Handle<JSObject>::cast(holder);
-    ASSERT(object->HasProperty(*name));
-    // GetProperty below can cause GC.
-    Handle<Object> receiver_handle(object->IsGlobalObject()
-        ? GlobalObject::cast(*object)->global_receiver()
-        : ComputeReceiverForNonGlobal(isolate, *object));
+  // If the holder is found, we read the property from it.
+  if (!holder.is_null() && holder->IsJSObject()) {
+    ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
+    JSObject* object = JSObject::cast(*holder);
+    Object* receiver;
+    if (object->IsGlobalObject()) {
+      receiver = GlobalObject::cast(object)->global_receiver();
+    } else if (context->is_exception_holder(*holder)) {
+      // Use the hole as the receiver to signal that the receiver is
+      // implicit and that the global receiver should be used.
+      receiver = isolate->heap()->the_hole_value();
+    } else {
+      receiver = ComputeReceiverForNonGlobal(isolate, object);
+    }
 
-    // No need to unhole the value here.  This is taken care of by the
+    // GetProperty below can cause GC.
+    Handle<Object> receiver_handle(receiver);
+
+    // No need to unhole the value here. This is taken care of by the
     // GetProperty function.
     MaybeObject* value = object->GetProperty(*name);
     return MakePair(value, *receiver_handle);
@@ -8795,11 +8827,12 @@
   ASSERT(args.length() == 4);
 
   Handle<Object> value(args[0], isolate);
-  CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
-  CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
-  StrictModeFlag strict_mode = (language_mode == CLASSIC_MODE)
-      ? kNonStrictMode : kStrictMode;
+  CONVERT_ARG_CHECKED(Context, context, 1);
+  CONVERT_ARG_CHECKED(String, name, 2);
+  CONVERT_SMI_ARG_CHECKED(strict_unchecked, 3);
+  RUNTIME_ASSERT(strict_unchecked == kStrictMode ||
+                 strict_unchecked == kNonStrictMode);
+  StrictModeFlag strict_mode = static_cast<StrictModeFlag>(strict_unchecked);
 
   int index;
   PropertyAttributes attributes;
@@ -8812,37 +8845,45 @@
                                           &binding_flags);
 
   if (index >= 0) {
-    // The property was found in a context slot.
-    Handle<Context> context = Handle<Context>::cast(holder);
-    if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
-        context->get(index)->IsTheHole()) {
-      Handle<Object> error =
-          isolate->factory()->NewReferenceError("not_defined",
-                                                HandleVector(&name, 1));
-      return isolate->Throw(*error);
-    }
-    // Ignore if read_only variable.
-    if ((attributes & READ_ONLY) == 0) {
-      // Context is a fixed array and set cannot fail.
-      context->set(index, *value);
-    } else if (strict_mode == kStrictMode) {
-      // Setting read only property in strict mode.
-      Handle<Object> error =
-          isolate->factory()->NewTypeError("strict_cannot_assign",
-                                           HandleVector(&name, 1));
-      return isolate->Throw(*error);
+    if (holder->IsContext()) {
+      Handle<Context> context = Handle<Context>::cast(holder);
+      if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
+          context->get(index)->IsTheHole()) {
+        Handle<Object> error =
+            isolate->factory()->NewReferenceError("not_defined",
+                                                  HandleVector(&name, 1));
+        return isolate->Throw(*error);
+      }
+      // Ignore if read_only variable.
+      if ((attributes & READ_ONLY) == 0) {
+        // Context is a fixed array and set cannot fail.
+        context->set(index, *value);
+      } else if (strict_mode == kStrictMode) {
+        // Setting read only property in strict mode.
+        Handle<Object> error =
+            isolate->factory()->NewTypeError("strict_cannot_assign",
+                                             HandleVector(&name, 1));
+        return isolate->Throw(*error);
+      }
+    } else {
+      ASSERT((attributes & READ_ONLY) == 0);
+      Handle<Object> result =
+          SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
+      if (result.is_null()) {
+        ASSERT(isolate->has_pending_exception());
+        return Failure::Exception();
+      }
     }
     return *value;
   }
 
-  // Slow case: The property is not in a context slot.  It is either in a
-  // context extension object, a property of the subject of a with, or a
-  // property of the global object.
-  Handle<JSObject> object;
+  // Slow case: The property is not in a FixedArray context.
+  // It is either in an JSObject extension context or it was not found.
+  Handle<JSObject> context_ext;
 
   if (!holder.is_null()) {
-    // The property exists on the holder.
-    object = Handle<JSObject>::cast(holder);
+    // The property exists in the extension context.
+    context_ext = Handle<JSObject>::cast(holder);
   } else {
     // The property was not found.
     ASSERT(attributes == ABSENT);
@@ -8850,21 +8891,22 @@
     if (strict_mode == kStrictMode) {
       // Throw in strict mode (assignment to undefined variable).
       Handle<Object> error =
-          isolate->factory()->NewReferenceError(
-              "not_defined", HandleVector(&name, 1));
+        isolate->factory()->NewReferenceError(
+            "not_defined", HandleVector(&name, 1));
       return isolate->Throw(*error);
     }
-    // In non-strict mode, the property is added to the global object.
+    // In non-strict mode, the property is stored in the global context.
     attributes = NONE;
-    object = Handle<JSObject>(isolate->context()->global());
+    context_ext = Handle<JSObject>(isolate->context()->global());
   }
 
-  // Set the property if it's not read only or doesn't yet exist.
+  // Set the property, but ignore if read_only variable on the context
+  // extension object itself.
   if ((attributes & READ_ONLY) == 0 ||
-      (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
+      (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
     RETURN_IF_EMPTY_HANDLE(
         isolate,
-        JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
+        SetProperty(context_ext, name, value, NONE, strict_mode));
   } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
     // Setting read only property in strict mode.
     Handle<Object> error =
@@ -8919,13 +8961,43 @@
     return isolate->StackOverflow();
   }
 
-  return Execution::HandleStackGuardInterrupt(isolate);
+  return Execution::HandleStackGuardInterrupt();
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
-  ASSERT(args.length() == 0);
-  return Execution::HandleStackGuardInterrupt(isolate);
+// NOTE: These PrintXXX functions are defined for all builds (not just
+// DEBUG builds) because we may want to be able to trace function
+// calls in all modes.
+static void PrintString(String* str) {
+  // not uncommon to have empty strings
+  if (str->length() > 0) {
+    SmartArrayPointer<char> s =
+        str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+    PrintF("%s", *s);
+  }
+}
+
+
+static void PrintObject(Object* obj) {
+  if (obj->IsSmi()) {
+    PrintF("%d", Smi::cast(obj)->value());
+  } else if (obj->IsString() || obj->IsSymbol()) {
+    PrintString(String::cast(obj));
+  } else if (obj->IsNumber()) {
+    PrintF("%g", obj->Number());
+  } else if (obj->IsFailure()) {
+    PrintF("<failure>");
+  } else if (obj->IsUndefined()) {
+    PrintF("<undefined>");
+  } else if (obj->IsNull()) {
+    PrintF("<null>");
+  } else if (obj->IsTrue()) {
+    PrintF("<true>");
+  } else if (obj->IsFalse()) {
+    PrintF("<false>");
+  } else {
+    PrintF("%p", reinterpret_cast<void*>(obj));
+  }
 }
 
 
@@ -8947,12 +9019,33 @@
   }
 
   if (result == NULL) {
-    JavaScriptFrame::PrintTop(stdout, true, false);
-    PrintF(" {\n");
+    // constructor calls
+    JavaScriptFrameIterator it;
+    JavaScriptFrame* frame = it.frame();
+    if (frame->IsConstructor()) PrintF("new ");
+    // function name
+    Object* fun = frame->function();
+    if (fun->IsJSFunction()) {
+      PrintObject(JSFunction::cast(fun)->shared()->name());
+    } else {
+      PrintObject(fun);
+    }
+    // function arguments
+    // (we are intentionally only printing the actually
+    // supplied parameters, not all parameters required)
+    PrintF("(this=");
+    PrintObject(frame->receiver());
+    const int length = frame->ComputeParametersCount();
+    for (int i = 0; i < length; i++) {
+      PrintF(", ");
+      PrintObject(frame->GetParameter(i));
+    }
+    PrintF(") {\n");
+
   } else {
     // function result
     PrintF("} -> ");
-    result->ShortPrint();
+    PrintObject(result);
     PrintF("\n");
   }
 }
@@ -9029,14 +9122,10 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
+  CONVERT_ARG_CHECKED(String, str, 0);
   FlattenString(str);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
-
-  MaybeObject* maybe_result_array =
-      output->EnsureCanContainHeapObjectElements();
-  if (maybe_result_array->IsFailure()) return maybe_result_array;
+  CONVERT_ARG_CHECKED(JSArray, output, 1);
   RUNTIME_ASSERT(output->HasFastElements());
 
   AssertNoAllocation no_allocation;
@@ -9069,20 +9158,25 @@
   ASSERT(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  int64_t time = isolate->date_cache()->EquivalentTime(static_cast<int64_t>(x));
-  const char* zone = OS::LocalTimezone(static_cast<double>(time));
+  const char* zone = OS::LocalTimezone(x);
   return isolate->heap()->AllocateStringFromUtf8(CStrVector(zone));
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimeOffset) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 0);
+
+  return isolate->heap()->NumberFromDouble(OS::LocalTimeOffset());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DateDaylightSavingsOffset) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  int64_t time = isolate->date_cache()->ToUTC(static_cast<int64_t>(x));
-
-  return isolate->heap()->NumberFromDouble(static_cast<double>(time));
+  return isolate->heap()->NumberFromDouble(OS::DaylightSavingsOffset(x));
 }
 
 
@@ -9097,10 +9191,10 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+  CONVERT_ARG_CHECKED(String, source, 0);
 
   source = Handle<String>(source->TryFlattenGetString());
-  // Optimized fast case where we only have ASCII characters.
+  // Optimized fast case where we only have ascii characters.
   Handle<Object> result;
   if (source->IsSeqAsciiString()) {
     result = JsonParser<true>::Parse(source);
@@ -9118,40 +9212,43 @@
 
 bool CodeGenerationFromStringsAllowed(Isolate* isolate,
                                       Handle<Context> context) {
-  ASSERT(context->allow_code_gen_from_strings()->IsFalse());
-  // Check with callback if set.
-  AllowCodeGenerationFromStringsCallback callback =
-      isolate->allow_code_gen_callback();
-  if (callback == NULL) {
-    // No callback set and code generation disallowed.
-    return false;
-  } else {
-    // Callback set. Let it decide if code generation is allowed.
-    VMState state(isolate, EXTERNAL);
-    return callback(v8::Utils::ToLocal(context));
+  if (context->allow_code_gen_from_strings()->IsFalse()) {
+    // Check with callback if set.
+    AllowCodeGenerationFromStringsCallback callback =
+        isolate->allow_code_gen_callback();
+    if (callback == NULL) {
+      // No callback set and code generation disallowed.
+      return false;
+    } else {
+      // Callback set. Let it decide if code generation is allowed.
+      VMState state(isolate, EXTERNAL);
+      return callback(v8::Utils::ToLocal(context));
+    }
   }
+  return true;
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) {
   HandleScope scope(isolate);
   ASSERT_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+  CONVERT_ARG_CHECKED(String, source, 0);
 
   // Extract global context.
   Handle<Context> context(isolate->context()->global_context());
 
   // Check if global context allows code generation from
   // strings. Throw an exception if it doesn't.
-  if (context->allow_code_gen_from_strings()->IsFalse() &&
-      !CodeGenerationFromStringsAllowed(isolate, context)) {
+  if (!CodeGenerationFromStringsAllowed(isolate, context)) {
     return isolate->Throw(*isolate->factory()->NewError(
         "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
   }
 
   // Compile source string in the global context.
-  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
-      source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition);
+  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source,
+                                                            context,
+                                                            true,
+                                                            kNonStrictMode);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> fun =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
@@ -9164,15 +9261,13 @@
 static ObjectPair CompileGlobalEval(Isolate* isolate,
                                     Handle<String> source,
                                     Handle<Object> receiver,
-                                    LanguageMode language_mode,
-                                    int scope_position) {
+                                    StrictModeFlag strict_mode) {
   Handle<Context> context = Handle<Context>(isolate->context());
   Handle<Context> global_context = Handle<Context>(context->global_context());
 
   // Check if global context allows code generation from
   // strings. Throw an exception if it doesn't.
-  if (global_context->allow_code_gen_from_strings()->IsFalse() &&
-      !CodeGenerationFromStringsAllowed(isolate, global_context)) {
+  if (!CodeGenerationFromStringsAllowed(isolate, global_context)) {
     isolate->Throw(*isolate->factory()->NewError(
         "code_gen_from_strings", HandleVector<Object>(NULL, 0)));
     return MakePair(Failure::Exception(), NULL);
@@ -9184,8 +9279,7 @@
       source,
       Handle<Context>(isolate->context()),
       context->IsGlobalContext(),
-      language_mode,
-      scope_position);
+      strict_mode);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -9195,28 +9289,91 @@
 
 
 RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) {
-  ASSERT(args.length() == 5);
+  ASSERT(args.length() == 4);
 
   HandleScope scope(isolate);
   Handle<Object> callee = args.at<Object>(0);
+  Handle<Object> receiver;  // Will be overwritten.
 
-  // If "eval" didn't refer to the original GlobalEval, it's not a
-  // direct call to eval.
-  // (And even if it is, but the first argument isn't a string, just let
-  // execution default to an indirect call to eval, which will also return
-  // the first argument without doing anything).
+  // Compute the calling context.
+  Handle<Context> context = Handle<Context>(isolate->context(), isolate);
+#ifdef DEBUG
+  // Make sure Isolate::context() agrees with the old code that traversed
+  // the stack frames to compute the context.
+  StackFrameLocator locator;
+  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+  ASSERT(Context::cast(frame->context()) == *context);
+#endif
+
+  // Find where the 'eval' symbol is bound. It is unaliased only if
+  // it is bound in the global context.
+  int index = -1;
+  PropertyAttributes attributes = ABSENT;
+  BindingFlags binding_flags;
+  while (true) {
+    receiver = context->Lookup(isolate->factory()->eval_symbol(),
+                               FOLLOW_PROTOTYPE_CHAIN,
+                               &index,
+                               &attributes,
+                               &binding_flags);
+    // Stop search when eval is found or when the global context is
+    // reached.
+    if (attributes != ABSENT || context->IsGlobalContext()) break;
+    context = Handle<Context>(context->previous(), isolate);
+  }
+
+  // If eval could not be resolved, it has been deleted and we need to
+  // throw a reference error.
+  if (attributes == ABSENT) {
+    Handle<Object> name = isolate->factory()->eval_symbol();
+    Handle<Object> reference_error =
+        isolate->factory()->NewReferenceError("not_defined",
+                                              HandleVector(&name, 1));
+    return MakePair(isolate->Throw(*reference_error), NULL);
+  }
+
+  if (!context->IsGlobalContext()) {
+    // 'eval' is not bound in the global context. Just call the function
+    // with the given arguments. This is not necessarily the global eval.
+    if (receiver->IsContext() || receiver->IsJSContextExtensionObject()) {
+      receiver = isolate->factory()->the_hole_value();
+    }
+    return MakePair(*callee, *receiver);
+  }
+
+  // 'eval' is bound in the global context, but it may have been overwritten.
+  // Compare it to the builtin 'GlobalEval' function to make sure.
   if (*callee != isolate->global_context()->global_eval_fun() ||
       !args[1]->IsString()) {
     return MakePair(*callee, isolate->heap()->the_hole_value());
   }
 
-  CONVERT_LANGUAGE_MODE_ARG(language_mode, 3);
-  ASSERT(args[4]->IsSmi());
+  ASSERT(args[3]->IsSmi());
   return CompileGlobalEval(isolate,
                            args.at<String>(1),
                            args.at<Object>(2),
-                           language_mode,
-                           args.smi_at(4));
+                           static_cast<StrictModeFlag>(args.smi_at(3)));
+}
+
+
+RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEvalNoLookup) {
+  ASSERT(args.length() == 4);
+
+  HandleScope scope(isolate);
+  Handle<Object> callee = args.at<Object>(0);
+
+  // 'eval' is bound in the global context, but it may have been overwritten.
+  // Compare it to the builtin 'GlobalEval' function to make sure.
+  if (*callee != isolate->global_context()->global_eval_fun() ||
+      !args[1]->IsString()) {
+    return MakePair(*callee, isolate->heap()->the_hole_value());
+  }
+
+  ASSERT(args[3]->IsSmi());
+  return CompileGlobalEval(isolate,
+                           args.at<String>(1),
+                           args.at<Object>(2),
+                           static_cast<StrictModeFlag>(args.smi_at(3)));
 }
 
 
@@ -9227,11 +9384,11 @@
   // as specified in ECMA262, 15.3.5.2.
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
+  CONVERT_ARG_CHECKED(JSFunction, func, 0);
 
-  Handle<Map> map = func->shared()->is_classic_mode()
-      ? isolate->function_instance_map()
-      : isolate->strict_mode_function_instance_map();
+  Handle<Map> map = func->shared()->strict_mode()
+                        ? isolate->strict_mode_function_instance_map()
+                        : isolate->function_instance_map();
 
   ASSERT(func->map()->instance_type() == map->instance_type());
   ASSERT(func->map()->instance_size() == map->instance_size());
@@ -9245,7 +9402,7 @@
   // Use as fallback for allocation in generated code when NewSpace
   // is full.
   ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0);
+  CONVERT_ARG_CHECKED(Smi, size_smi, 0);
   int size = size_smi->value();
   RUNTIME_ASSERT(IsAligned(size, kPointerSize));
   RUNTIME_ASSERT(size > 0);
@@ -9267,9 +9424,9 @@
 // false otherwise.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSArray, array, 0);
-  CONVERT_ARG_CHECKED(JSObject, element, 1);
-  RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
+  CONVERT_CHECKED(JSArray, array, args[0]);
+  CONVERT_CHECKED(JSObject, element, args[1]);
+  RUNTIME_ASSERT(array->HasFastElements());
   int length = Smi::cast(array->length())->value();
   FixedArray* elements = FixedArray::cast(array->elements());
   for (int i = 0; i < length; i++) {
@@ -9353,11 +9510,9 @@
         isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
     Handle<Map> map;
     if (fast_elements_) {
-      map = isolate_->factory()->GetElementsTransitionMap(array,
-                                                          FAST_ELEMENTS);
+      map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
     } else {
-      map = isolate_->factory()->GetElementsTransitionMap(array,
-                                                          DICTIONARY_ELEMENTS);
+      map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
     }
     array->set_map(*map);
     array->set_length(*length);
@@ -9413,7 +9568,6 @@
   uint32_t length = static_cast<uint32_t>(array->length()->Number());
   int element_count = 0;
   switch (array->GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
@@ -9425,10 +9579,6 @@
       }
       break;
     }
-    case FAST_DOUBLE_ELEMENTS:
-      // TODO(1810): Decide if it's worthwhile to implement this.
-      UNREACHABLE();
-      break;
     case DICTIONARY_ELEMENTS: {
       Handle<SeededNumberDictionary> dictionary(
           SeededNumberDictionary::cast(array->elements()));
@@ -9441,16 +9591,7 @@
       }
       break;
     }
-    case NON_STRICT_ARGUMENTS_ELEMENTS:
-    case EXTERNAL_BYTE_ELEMENTS:
-    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-    case EXTERNAL_SHORT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-    case EXTERNAL_INT_ELEMENTS:
-    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-    case EXTERNAL_FLOAT_ELEMENTS:
-    case EXTERNAL_DOUBLE_ELEMENTS:
-    case EXTERNAL_PIXEL_ELEMENTS:
+    default:
       // External arrays are always dense.
       return length;
   }
@@ -9516,7 +9657,6 @@
                                   List<uint32_t>* indices) {
   ElementsKind kind = object->GetElementsKind();
   switch (kind) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       Handle<FixedArray> elements(FixedArray::cast(object->elements()));
       uint32_t length = static_cast<uint32_t>(elements->length());
@@ -9528,11 +9668,6 @@
       }
       break;
     }
-    case FAST_DOUBLE_ELEMENTS: {
-      // TODO(1810): Decide if it's worthwhile to implement this.
-      UNREACHABLE();
-      break;
-    }
     case DICTIONARY_ELEMENTS: {
       Handle<SeededNumberDictionary> dict(
           SeededNumberDictionary::cast(object->elements()));
@@ -9642,7 +9777,6 @@
                             ArrayConcatVisitor* visitor) {
   uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
   switch (receiver->GetElementsKind()) {
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Run through the elements FixedArray and use HasElement and GetElement
       // to check the prototype for missing elements.
@@ -9657,18 +9791,13 @@
         } else if (receiver->HasElement(j)) {
           // Call GetElement on receiver, not its prototype, or getters won't
           // have the correct receiver.
-          element_value = Object::GetElement(receiver, j);
-          RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
+          element_value = GetElement(receiver, j);
+          if (element_value.is_null()) return false;
           visitor->visit(j, element_value);
         }
       }
       break;
     }
-    case FAST_DOUBLE_ELEMENTS: {
-      // TODO(1810): Decide if it's worthwhile to implement this.
-      UNREACHABLE();
-      break;
-    }
     case DICTIONARY_ELEMENTS: {
       Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
       List<uint32_t> indices(dict->Capacity() / 2);
@@ -9681,8 +9810,8 @@
       while (j < n) {
         HandleScope loop_scope;
         uint32_t index = indices[j];
-        Handle<Object> element = Object::GetElement(receiver, index);
-        RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
+        Handle<Object> element = GetElement(receiver, index);
+        if (element.is_null()) return false;
         visitor->visit(index, element);
         // Skip to next different index (i.e., omit duplicates).
         do {
@@ -9759,7 +9888,7 @@
   ASSERT(args.length() == 1);
   HandleScope handle_scope(isolate);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
+  CONVERT_ARG_CHECKED(JSArray, arguments, 0);
   int argument_count = static_cast<int>(arguments->length()->Number());
   RUNTIME_ASSERT(arguments->HasFastElements());
   Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
@@ -9779,13 +9908,6 @@
       uint32_t element_estimate;
       if (obj->IsJSArray()) {
         Handle<JSArray> array(Handle<JSArray>::cast(obj));
-        // TODO(1810): Find out if it's worthwhile to properly support
-        // arbitrary ElementsKinds. For now, pessimistically transition to
-        // FAST_ELEMENTS.
-        if (array->HasFastDoubleElements()) {
-          array = Handle<JSArray>::cast(
-              JSObject::TransitionElementsKind(array, FAST_ELEMENTS));
-        }
         length_estimate =
             static_cast<uint32_t>(array->length()->Number());
         element_estimate =
@@ -9854,7 +9976,7 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(String, string, 0);
+  CONVERT_CHECKED(String, string, args[0]);
   StringInputBuffer buffer(string);
   while (buffer.has_more()) {
     uint16_t character = buffer.GetNext();
@@ -9870,7 +9992,7 @@
 // Returns the number of non-undefined elements collected.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) {
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_CHECKED(JSObject, object, args[0]);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
   return object->PrepareElementsForSort(limit);
 }
@@ -9879,21 +10001,19 @@
 // Move contents of argument 0 (an array) to argument 1 (an array)
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSArray, from, 0);
-  CONVERT_ARG_CHECKED(JSArray, to, 1);
+  CONVERT_CHECKED(JSArray, from, args[0]);
+  CONVERT_CHECKED(JSArray, to, args[1]);
   FixedArrayBase* new_elements = from->elements();
   MaybeObject* maybe_new_map;
-  ElementsKind elements_kind;
   if (new_elements->map() == isolate->heap()->fixed_array_map() ||
       new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
-    elements_kind = FAST_ELEMENTS;
+    maybe_new_map = to->map()->GetFastElementsMap();
   } else if (new_elements->map() ==
              isolate->heap()->fixed_double_array_map()) {
-    elements_kind = FAST_DOUBLE_ELEMENTS;
+    maybe_new_map = to->map()->GetFastDoubleElementsMap();
   } else {
-    elements_kind = DICTIONARY_ELEMENTS;
+    maybe_new_map = to->map()->GetSlowElementsMap();
   }
-  maybe_new_map = to->GetElementsTransitionMap(isolate, elements_kind);
   Object* new_map;
   if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
   to->set_map(Map::cast(new_map));
@@ -9911,7 +10031,7 @@
 // How many elements does this object/array have?
 RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) {
   ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  CONVERT_CHECKED(JSObject, object, args[0]);
   HeapObject* elements = object->elements();
   if (elements->IsDictionary()) {
     int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
@@ -9929,7 +10049,7 @@
 
   ASSERT_EQ(3, args.length());
 
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
   Handle<Object> key1 = args.at<Object>(1);
   Handle<Object> key2 = args.at<Object>(2);
 
@@ -9940,15 +10060,15 @@
   }
 
   Handle<JSObject> jsobject = Handle<JSObject>::cast(object);
-  Handle<Object> tmp1 = Object::GetElement(jsobject, index1);
+  Handle<Object> tmp1 = GetElement(jsobject, index1);
   RETURN_IF_EMPTY_HANDLE(isolate, tmp1);
-  Handle<Object> tmp2 = Object::GetElement(jsobject, index2);
+  Handle<Object> tmp2 = GetElement(jsobject, index2);
   RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
 
-  RETURN_IF_EMPTY_HANDLE(
-      isolate, JSObject::SetElement(jsobject, index1, tmp2, NONE, kStrictMode));
-  RETURN_IF_EMPTY_HANDLE(
-      isolate, JSObject::SetElement(jsobject, index2, tmp1, NONE, kStrictMode));
+  RETURN_IF_EMPTY_HANDLE(isolate,
+                         SetElement(jsobject, index1, tmp2, kStrictMode));
+  RETURN_IF_EMPTY_HANDLE(isolate,
+                         SetElement(jsobject, index2, tmp1, kStrictMode));
 
   return isolate->heap()->undefined_value();
 }
@@ -9962,16 +10082,12 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
+  CONVERT_ARG_CHECKED(JSObject, array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
   if (array->elements()->IsDictionary()) {
     // Create an array and get all the keys into it, then remove all the
     // keys that are not integers in the range 0 to length-1.
-    bool threw = false;
-    Handle<FixedArray> keys =
-        GetKeysInFixedArrayFor(array, INCLUDE_PROTOS, &threw);
-    if (threw) return Failure::Exception();
-
+    Handle<FixedArray> keys = GetKeysInFixedArrayFor(array, INCLUDE_PROTOS);
     int keys_length = keys->length();
     for (int i = 0; i < keys_length; i++) {
       Object* key = keys->get(i);
@@ -9983,9 +10099,7 @@
     }
     return *isolate->factory()->NewJSArrayWithElements(keys);
   } else {
-    ASSERT(array->HasFastElements() ||
-           array->HasFastSmiOnlyElements() ||
-           array->HasFastDoubleElements());
+    ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
     Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
     // -1 means start of array.
     single_interval->set(0, Smi::FromInt(-1));
@@ -10001,13 +10115,37 @@
 }
 
 
+// DefineAccessor takes an optional final argument which is the
+// property attributes (eg, DONT_ENUM, DONT_DELETE).  IMPORTANT: due
+// to the way accessors are implemented, it is set for both the getter
+// and setter on the first call to DefineAccessor and ignored on
+// subsequent calls.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineAccessor) {
+  RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
+  // Compute attributes.
+  PropertyAttributes attributes = NONE;
+  if (args.length() == 5) {
+    CONVERT_CHECKED(Smi, attrs, args[4]);
+    int value = attrs->value();
+    // Only attribute bits should be set.
+    ASSERT((value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+    attributes = static_cast<PropertyAttributes>(value);
+  }
+
+  CONVERT_CHECKED(JSObject, obj, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_CHECKED(Smi, flag, args[2]);
+  CONVERT_CHECKED(JSFunction, fun, args[3]);
+  return obj->DefineAccessor(name, flag->value() == 0, fun, attributes);
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) {
   ASSERT(args.length() == 3);
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_CHECKED(String, name, 1);
-  CONVERT_SMI_ARG_CHECKED(flag, 2);
-  AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER;
-  return obj->LookupAccessor(name, component);
+  CONVERT_CHECKED(JSObject, obj, args[0]);
+  CONVERT_CHECKED(String, name, args[1]);
+  CONVERT_CHECKED(Smi, flag, args[2]);
+  return obj->LookupAccessor(name, flag->value() == 0);
 }
 
 
@@ -10025,8 +10163,8 @@
 }
 
 
-static StackFrame::Id UnwrapFrameId(int wrapped) {
-  return static_cast<StackFrame::Id>(wrapped << 2);
+static StackFrame::Id UnwrapFrameId(Smi* wrapped) {
+  return static_cast<StackFrame::Id>(wrapped->value() << 2);
 }
 
 
@@ -10080,8 +10218,8 @@
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
       if (structure->IsForeign() || structure->IsAccessorInfo()) {
-        MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
-            receiver, structure, name);
+        MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
+            receiver, structure, name, result->holder());
         if (!maybe_value->ToObject(&value)) {
           if (maybe_value->IsRetryAfterGC()) return maybe_value;
           ASSERT(maybe_value->IsException());
@@ -10103,11 +10241,10 @@
     case CONSTANT_TRANSITION:
     case NULL_DESCRIPTOR:
       return heap->undefined_value();
-    case HANDLER:
+    default:
       UNREACHABLE();
-      return heap->undefined_value();
   }
-  UNREACHABLE();  // keep the compiler happy
+  UNREACHABLE();
   return heap->undefined_value();
 }
 
@@ -10129,8 +10266,8 @@
 
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
 
   // Make sure to set the current context to the context before the debugger was
   // entered (if the debugger is entered). The reason for switching context here
@@ -10173,7 +10310,7 @@
   // Try local lookup on each of the objects.
   Handle<JSObject> jsproto = obj;
   for (int i = 0; i < length; i++) {
-    LookupResult result(isolate);
+    LookupResult result;
     jsproto->LocalLookup(*name, &result);
     if (result.IsProperty()) {
       // LookupResult is not GC safe as it holds raw object pointers.
@@ -10200,16 +10337,15 @@
       // If the callback object is a fixed array then it contains JavaScript
       // getter and/or setter.
       bool hasJavaScriptAccessors = result_type == CALLBACKS &&
-                                    result_callback_obj->IsAccessorPair();
+                                    result_callback_obj->IsFixedArray();
       Handle<FixedArray> details =
           isolate->factory()->NewFixedArray(hasJavaScriptAccessors ? 5 : 2);
       details->set(0, *value);
       details->set(1, property_details);
       if (hasJavaScriptAccessors) {
-        AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
         details->set(2, isolate->heap()->ToBoolean(caught_exception));
-        details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
-        details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
+        details->set(3, FixedArray::cast(*result_callback_obj)->get(0));
+        details->set(4, FixedArray::cast(*result_callback_obj)->get(1));
       }
 
       return *isolate->factory()->NewJSArrayWithElements(details);
@@ -10228,10 +10364,10 @@
 
   ASSERT(args.length() == 2);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(String, name, 1);
 
-  LookupResult result(isolate);
+  LookupResult result;
   obj->Lookup(*name, &result);
   if (result.IsProperty()) {
     return DebugLookupResultValue(isolate->heap(), *obj, *name, &result, NULL);
@@ -10244,8 +10380,9 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) {
   ASSERT(args.length() == 1);
-  CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
-  return Smi::FromInt(static_cast<int>(details.type()));
+  CONVERT_CHECKED(Smi, details, args[0]);
+  PropertyType type = PropertyDetails(details).type();
+  return Smi::FromInt(static_cast<int>(type));
 }
 
 
@@ -10253,8 +10390,9 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) {
   ASSERT(args.length() == 1);
-  CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
-  return Smi::FromInt(static_cast<int>(details.attributes()));
+  CONVERT_CHECKED(Smi, details, args[0]);
+  PropertyAttributes attributes = PropertyDetails(details).attributes();
+  return Smi::FromInt(static_cast<int>(attributes));
 }
 
 
@@ -10262,8 +10400,9 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) {
   ASSERT(args.length() == 1);
-  CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
-  return Smi::FromInt(details.index());
+  CONVERT_CHECKED(Smi, details, args[0]);
+  int index = PropertyDetails(details).index();
+  return Smi::FromInt(index);
 }
 
 
@@ -10273,9 +10412,9 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(obj->HasNamedInterceptor());
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  CONVERT_ARG_CHECKED(String, name, 1);
 
   PropertyAttributes attributes;
   return obj->GetPropertyWithInterceptor(*obj, *name, &attributes);
@@ -10288,7 +10427,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(obj->HasIndexedInterceptor());
   CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
 
@@ -10339,16 +10478,15 @@
 class FrameInspector {
  public:
   FrameInspector(JavaScriptFrame* frame,
-                 int inlined_jsframe_index,
+                 int inlined_frame_index,
                  Isolate* isolate)
       : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
     // Calculate the deoptimized frame.
     if (frame->is_optimized()) {
       deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
-          frame, inlined_jsframe_index, isolate);
+          frame, inlined_frame_index, isolate);
     }
     has_adapted_arguments_ = frame_->has_adapted_arguments();
-    is_bottommost_ = inlined_jsframe_index == 0;
     is_optimized_ = frame_->is_optimized();
   }
 
@@ -10381,16 +10519,6 @@
         ? deoptimized_frame_->GetExpression(index)
         : frame_->GetExpression(index);
   }
-  int GetSourcePosition() {
-    return is_optimized_
-        ? deoptimized_frame_->GetSourcePosition()
-        : frame_->LookupCode()->SourcePosition(frame_->pc());
-  }
-  bool IsConstructor() {
-    return is_optimized_ && !is_bottommost_
-        ? deoptimized_frame_->HasConstructStub()
-        : frame_->IsConstructor();
-  }
 
   // To inspect all the provided arguments the frame might need to be
   // replaced with the arguments frame.
@@ -10406,7 +10534,6 @@
   DeoptimizedFrameInfo* deoptimized_frame_;
   Isolate* isolate_;
   bool is_optimized_;
-  bool is_bottommost_;
   bool has_adapted_arguments_;
 
   DISALLOW_COPY_AND_ASSIGN(FrameInspector);
@@ -10424,18 +10551,6 @@
 static const int kFrameDetailsFlagsIndex = 8;
 static const int kFrameDetailsFirstDynamicIndex = 9;
 
-
-static SaveContext* FindSavedContextForFrame(Isolate* isolate,
-                                             JavaScriptFrame* frame) {
-  SaveContext* save = isolate->save_context();
-  while (save != NULL && !save->IsBelowFrame(frame)) {
-    save = save->prev();
-  }
-  ASSERT(save != NULL);
-  return save;
-}
-
-
 // Return an array with frame details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -10473,6 +10588,8 @@
     return heap->undefined_value();
   }
 
+  int inlined_frame_index = 0;  // Inlined frame index in optimized frame.
+
   int count = 0;
   JavaScriptFrameIterator it(isolate, id);
   for (; !it.done(); it.Advance()) {
@@ -10481,33 +10598,38 @@
   }
   if (it.done()) return heap->undefined_value();
 
-  bool is_optimized = it.frame()->is_optimized();
-
-  int inlined_jsframe_index = 0;  // Inlined frame index in optimized frame.
-  if (is_optimized) {
-    inlined_jsframe_index =
+  if (it.frame()->is_optimized()) {
+    inlined_frame_index =
         it.frame()->GetInlineCount() - (index - count) - 1;
   }
-  FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
+  FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = FindSavedContextForFrame(isolate, it.frame());
+  SaveContext* save = isolate->save_context();
+  while (save != NULL && !save->below(it.frame())) {
+    save = save->prev();
+  }
+  ASSERT(save != NULL);
 
   // Get the frame id.
   Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
 
-  // Find source position in unoptimized code.
-  int position = frame_inspector.GetSourcePosition();
+  // Find source position.
+  int position =
+      it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
 
-  // Check for constructor frame.
-  bool constructor = frame_inspector.IsConstructor();
+  // Check for constructor frame. Inlined frames cannot be construct calls.
+  bool inlined_frame =
+      it.frame()->is_optimized() && inlined_frame_index != 0;
+  bool constructor = !inlined_frame && it.frame()->IsConstructor();
 
   // Get scope info and read from it for local variable information.
-  Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+  Handle<JSFunction> function(JSFunction::cast(it.frame()->function()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<ScopeInfo> scope_info(shared->scope_info());
-  ASSERT(*scope_info != ScopeInfo::Empty());
+  Handle<SerializedScopeInfo> scope_info(shared->scope_info());
+  ASSERT(*scope_info != SerializedScopeInfo::Empty());
+  ScopeInfo<> info(*scope_info);
 
   // Get the locals names and values into a temporary array.
   //
@@ -10515,33 +10637,31 @@
   // (e.g. .result)?  For users of the debugger, they will probably be
   // confusing.
   Handle<FixedArray> locals =
-      isolate->factory()->NewFixedArray(scope_info->LocalCount() * 2);
+      isolate->factory()->NewFixedArray(info.NumberOfLocals() * 2);
 
   // Fill in the values of the locals.
   int i = 0;
-  for (; i < scope_info->StackLocalCount(); ++i) {
+  for (; i < info.number_of_stack_slots(); ++i) {
     // Use the value from the stack.
-    locals->set(i * 2, scope_info->LocalName(i));
+    locals->set(i * 2, *info.LocalName(i));
     locals->set(i * 2 + 1, frame_inspector.GetExpression(i));
   }
-  if (i < scope_info->LocalCount()) {
+  if (i < info.NumberOfLocals()) {
     // Get the context containing declarations.
     Handle<Context> context(
         Context::cast(it.frame()->context())->declaration_context());
-    for (; i < scope_info->LocalCount(); ++i) {
-      Handle<String> name(scope_info->LocalName(i));
-      VariableMode mode;
-      InitializationFlag init_flag;
+    for (; i < info.NumberOfLocals(); ++i) {
+      Handle<String> name = info.LocalName(i);
       locals->set(i * 2, *name);
-      locals->set(i * 2 + 1, context->get(
-          scope_info->ContextSlotIndex(*name, &mode, &init_flag)));
+      locals->set(i * 2 + 1,
+                  context->get(scope_info->ContextSlotIndex(*name, NULL)));
     }
   }
 
   // Check whether this frame is positioned at return. If not top
   // frame or if the frame is optimized it cannot be at a return.
   bool at_return = false;
-  if (!is_optimized && index == 0) {
+  if (!it.frame()->is_optimized() && index == 0) {
     at_return = isolate->debug()->IsBreakAtReturn(it.frame());
   }
 
@@ -10581,21 +10701,26 @@
   // the provided parameters whereas the function frame always have the number
   // of arguments matching the functions parameters. The rest of the
   // information (except for what is collected above) is the same.
-  if ((inlined_jsframe_index == 0) && it.frame()->has_adapted_arguments()) {
+  if (it.frame()->has_adapted_arguments()) {
     it.AdvanceToArgumentsFrame();
     frame_inspector.SetArgumentsFrame(it.frame());
   }
 
   // Find the number of arguments to fill. At least fill the number of
   // parameters for the function and fill more if more parameters are provided.
-  int argument_count = scope_info->ParameterCount();
+  int argument_count = info.number_of_parameters();
   if (argument_count < frame_inspector.GetParametersCount()) {
     argument_count = frame_inspector.GetParametersCount();
   }
+#ifdef DEBUG
+  if (it.frame()->is_optimized()) {
+    ASSERT_EQ(argument_count, frame_inspector.GetParametersCount());
+  }
+#endif
 
   // Calculate the size of the result.
   int details_size = kFrameDetailsFirstDynamicIndex +
-                     2 * (argument_count + scope_info->LocalCount()) +
+                     2 * (argument_count + info.NumberOfLocals()) +
                      (at_return ? 1 : 0);
   Handle<FixedArray> details = isolate->factory()->NewFixedArray(details_size);
 
@@ -10610,7 +10735,7 @@
 
   // Add the locals count
   details->set(kFrameDetailsLocalCountIndex,
-               Smi::FromInt(scope_info->LocalCount()));
+               Smi::FromInt(info.NumberOfLocals()));
 
   // Add the source position.
   if (position != RelocInfo::kNoPosition) {
@@ -10633,9 +10758,9 @@
   if (*save->context() == *isolate->debug()->debug_context()) {
     flags |= 1 << 0;
   }
-  if (is_optimized) {
+  if (it.frame()->is_optimized()) {
     flags |= 1 << 1;
-    flags |= inlined_jsframe_index << 2;
+    flags |= inlined_frame_index << 2;
   }
   details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
 
@@ -10645,14 +10770,14 @@
   // Add arguments name and value.
   for (int i = 0; i < argument_count; i++) {
     // Name of the argument.
-    if (i < scope_info->ParameterCount()) {
-      details->set(details_index++, scope_info->ParameterName(i));
+    if (i < info.number_of_parameters()) {
+      details->set(details_index++, *info.parameter_name(i));
     } else {
       details->set(details_index++, heap->undefined_value());
     }
 
     // Parameter value.
-    if (i < frame_inspector.GetParametersCount()) {
+    if (i < it.frame()->ComputeParametersCount()) {
       // Get the value from the stack.
       details->set(details_index++, frame_inspector.GetParameter(i));
     } else {
@@ -10661,7 +10786,7 @@
   }
 
   // Add locals name and value from the temporary copy from the function frame.
-  for (int i = 0; i < scope_info->LocalCount() * 2; i++) {
+  for (int i = 0; i < info.NumberOfLocals() * 2; i++) {
     details->set(details_index++, locals->get(i));
   }
 
@@ -10674,9 +10799,7 @@
   // THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
   // THE FRAME ITERATOR TO WRAP THE RECEIVER.
   Handle<Object> receiver(it.frame()->receiver(), isolate);
-  if (!receiver->IsJSObject() &&
-      shared->is_classic_mode() &&
-      !shared->native()) {
+  if (!receiver->IsJSObject() && !shared->strict_mode() && !shared->native()) {
     // If the receiver is not a JSObject and the function is not a
     // builtin or strict-mode we have hit an optimization where a
     // value object is not converted into a wrapped JS objects. To
@@ -10699,20 +10822,21 @@
 // Copy all the context locals into an object used to materialize a scope.
 static bool CopyContextLocalsToScopeObject(
     Isolate* isolate,
-    Handle<ScopeInfo> scope_info,
+    Handle<SerializedScopeInfo> serialized_scope_info,
+    ScopeInfo<>& scope_info,
     Handle<Context> context,
     Handle<JSObject> scope_object) {
   // Fill all context locals to the context extension.
-  for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
-    VariableMode mode;
-    InitializationFlag init_flag;
-    int context_index = scope_info->ContextSlotIndex(
-        scope_info->ContextLocalName(i), &mode, &init_flag);
+  for (int i = Context::MIN_CONTEXT_SLOTS;
+       i < scope_info.number_of_context_slots();
+       i++) {
+    int context_index = serialized_scope_info->ContextSlotIndex(
+        *scope_info.context_slot_name(i), NULL);
 
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(scope_object,
-                    Handle<String>(scope_info->ContextLocalName(i)),
+                    scope_info.context_slot_name(i),
                     Handle<Object>(context->get(context_index), isolate),
                     NONE,
                     kNonStrictMode),
@@ -10725,13 +10849,15 @@
 
 // Create a plain JSObject which materializes the local scope for the specified
 // frame.
-static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
+static Handle<JSObject> MaterializeLocalScope(
     Isolate* isolate,
     JavaScriptFrame* frame,
-    FrameInspector* frame_inspector) {
-  Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
+    int inlined_frame_index) {
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<ScopeInfo> scope_info(shared->scope_info());
+  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
+  ScopeInfo<> scope_info(*serialized_scope_info);
+  FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
   // heap locals and extension properties of the debugged function.
@@ -10739,39 +10865,36 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // First fill all parameters.
-  for (int i = 0; i < scope_info->ParameterCount(); ++i) {
-    Handle<Object> value(
-        i < frame_inspector->GetParametersCount() ?
-        frame_inspector->GetParameter(i) : isolate->heap()->undefined_value());
-
+  for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    Handle<String>(scope_info->ParameterName(i)),
-                    value,
+                    scope_info.parameter_name(i),
+                    Handle<Object>(frame_inspector.GetParameter(i)),
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
   // Second fill all stack locals.
-  for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+  for (int i = 0; i < scope_info.number_of_stack_slots(); ++i) {
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         SetProperty(local_scope,
-                    Handle<String>(scope_info->StackLocalName(i)),
-                    Handle<Object>(frame_inspector->GetExpression(i)),
+                    scope_info.stack_slot_name(i),
+                    Handle<Object>(frame_inspector.GetExpression(i)),
                     NONE,
                     kNonStrictMode),
         Handle<JSObject>());
   }
 
-  if (scope_info->HasContext()) {
+  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
     // Third fill all context locals.
     Handle<Context> frame_context(Context::cast(frame->context()));
     Handle<Context> function_context(frame_context->declaration_context());
-    if (!CopyContextLocalsToScopeObject(
-            isolate, scope_info, function_context, local_scope)) {
+    if (!CopyContextLocalsToScopeObject(isolate,
+                                        serialized_scope_info, scope_info,
+                                        function_context, local_scope)) {
       return Handle<JSObject>();
     }
 
@@ -10781,11 +10904,7 @@
       if (function_context->has_extension() &&
           !function_context->IsGlobalContext()) {
         Handle<JSObject> ext(JSObject::cast(function_context->extension()));
-        bool threw = false;
-        Handle<FixedArray> keys =
-            GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
-        if (threw) return Handle<JSObject>();
-
+        Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
         for (int i = 0; i < keys->length(); i++) {
           // Names of variables introduced by eval are strings.
           ASSERT(keys->get(i)->IsString());
@@ -10807,17 +10926,6 @@
 }
 
 
-static Handle<JSObject> MaterializeLocalScope(
-    Isolate* isolate,
-    JavaScriptFrame* frame,
-    int inlined_jsframe_index) {
-  FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
-  return MaterializeLocalScopeWithFrameInspector(isolate,
-                                                 frame,
-                                                 &frame_inspector);
-}
-
-
 // Create a plain JSObject which materializes the closure content for the
 // context.
 static Handle<JSObject> MaterializeClosure(Isolate* isolate,
@@ -10825,16 +10933,18 @@
   ASSERT(context->IsFunctionContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
-  Handle<ScopeInfo> scope_info(shared->scope_info());
+  Handle<SerializedScopeInfo> serialized_scope_info(shared->scope_info());
+  ScopeInfo<> scope_info(*serialized_scope_info);
 
-  // Allocate and initialize a JSObject with all the content of this function
+  // Allocate and initialize a JSObject with all the content of theis function
   // closure.
   Handle<JSObject> closure_scope =
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals to the context extension.
-  if (!CopyContextLocalsToScopeObject(
-          isolate, scope_info, context, closure_scope)) {
+  if (!CopyContextLocalsToScopeObject(isolate,
+                                      serialized_scope_info, scope_info,
+                                      context, closure_scope)) {
     return Handle<JSObject>();
   }
 
@@ -10842,11 +10952,7 @@
   // be variables introduced by eval.
   if (context->has_extension()) {
     Handle<JSObject> ext(JSObject::cast(context->extension()));
-    bool threw = false;
-    Handle<FixedArray> keys =
-        GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS, &threw);
-    if (threw) return Handle<JSObject>();
-
+    Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext, INCLUDE_PROTOS);
     for (int i = 0; i < keys->length(); i++) {
       // Names of variables introduced by eval are strings.
       ASSERT(keys->get(i)->IsString());
@@ -10889,7 +10995,9 @@
     Isolate* isolate,
     Handle<Context> context) {
   ASSERT(context->IsBlockContext());
-  Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+  Handle<SerializedScopeInfo> serialized_scope_info(
+      SerializedScopeInfo::cast(context->extension()));
+  ScopeInfo<> scope_info(*serialized_scope_info);
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
   // heap locals and extension properties of the debugged function.
@@ -10897,42 +11005,21 @@
       isolate->factory()->NewJSObject(isolate->object_function());
 
   // Fill all context locals.
-  if (!CopyContextLocalsToScopeObject(
-          isolate, scope_info, context, block_scope)) {
-    return Handle<JSObject>();
+  if (scope_info.number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+    if (!CopyContextLocalsToScopeObject(isolate,
+                                        serialized_scope_info, scope_info,
+                                        context, block_scope)) {
+      return Handle<JSObject>();
+    }
   }
 
   return block_scope;
 }
 
 
-// Create a plain JSObject which materializes the module scope for the specified
-// module context.
-static Handle<JSObject> MaterializeModuleScope(
-    Isolate* isolate,
-    Handle<Context> context) {
-  ASSERT(context->IsModuleContext());
-  Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
-
-  // Allocate and initialize a JSObject with all the members of the debugged
-  // module.
-  Handle<JSObject> module_scope =
-      isolate->factory()->NewJSObject(isolate->object_function());
-
-  // Fill all context locals.
-  if (!CopyContextLocalsToScopeObject(
-          isolate, scope_info, context, module_scope)) {
-    return Handle<JSObject>();
-  }
-
-  return module_scope;
-}
-
-
-// Iterate over the actual scopes visible from a stack frame. The iteration
-// proceeds from the innermost visible nested scope outwards. All scopes are
+// Iterate over the actual scopes visible from a stack frame. All scopes are
 // backed by an actual context except the local scope, which is inserted
-// "artificially" in the context chain.
+// "artifically" in the context chain.
 class ScopeIterator {
  public:
   enum ScopeType {
@@ -10941,95 +11028,38 @@
     ScopeTypeWith,
     ScopeTypeClosure,
     ScopeTypeCatch,
-    ScopeTypeBlock,
-    ScopeTypeModule
+    ScopeTypeBlock
   };
 
   ScopeIterator(Isolate* isolate,
                 JavaScriptFrame* frame,
-                int inlined_jsframe_index)
+                int inlined_frame_index)
     : isolate_(isolate),
       frame_(frame),
-      inlined_jsframe_index_(inlined_jsframe_index),
+      inlined_frame_index_(inlined_frame_index),
       function_(JSFunction::cast(frame->function())),
       context_(Context::cast(frame->context())),
-      nested_scope_chain_(4) {
+      local_done_(false),
+      at_local_(false) {
 
-    // Catch the case when the debugger stops in an internal function.
-    Handle<SharedFunctionInfo> shared_info(function_->shared());
-    Handle<ScopeInfo> scope_info(shared_info->scope_info());
-    if (shared_info->script() == isolate->heap()->undefined_value()) {
-      while (context_->closure() == *function_) {
-        context_ = Handle<Context>(context_->previous(), isolate_);
-      }
-      return;
-    }
-
-    // Get the debug info (create it if it does not exist).
-    if (!isolate->debug()->EnsureDebugInfo(shared_info)) {
-      // Return if ensuring debug info failed.
-      return;
-    }
-    Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
-
-    // Find the break point where execution has stopped.
-    BreakLocationIterator break_location_iterator(debug_info,
-                                                  ALL_BREAK_LOCATIONS);
-    break_location_iterator.FindBreakLocationFromAddress(frame->pc());
-    if (break_location_iterator.IsExit()) {
-      // We are within the return sequence. At the momemt it is not possible to
-      // get a source position which is consistent with the current scope chain.
-      // Thus all nested with, catch and block contexts are skipped and we only
-      // provide the function scope.
-      if (scope_info->HasContext()) {
-        context_ = Handle<Context>(context_->declaration_context(), isolate_);
-      } else {
-        while (context_->closure() == *function_) {
-          context_ = Handle<Context>(context_->previous(), isolate_);
-        }
-      }
-      if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
-    } else {
-      // Reparse the code and analyze the scopes.
-      ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
-      Handle<Script> script(Script::cast(shared_info->script()));
-      Scope* scope = NULL;
-
-      // Check whether we are in global, eval or function code.
-      Handle<ScopeInfo> scope_info(shared_info->scope_info());
-      if (scope_info->Type() != FUNCTION_SCOPE) {
-        // Global or eval code.
-        CompilationInfo info(script);
-        if (scope_info->Type() == GLOBAL_SCOPE) {
-          info.MarkAsGlobal();
-        } else {
-          ASSERT(scope_info->Type() == EVAL_SCOPE);
-          info.MarkAsEval();
-          info.SetCallingContext(Handle<Context>(function_->context()));
-        }
-        if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
-          scope = info.function()->scope();
-        }
-      } else {
-        // Function code
-        CompilationInfo info(shared_info);
-        if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
-          scope = info.function()->scope();
-        }
-      }
-
-      // Retrieve the scope chain for the current position.
-      if (scope != NULL) {
-        int source_position = shared_info->code()->SourcePosition(frame_->pc());
-        scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
-      } else {
-        // A failed reparse indicates that the preparser has diverged from the
-        // parser or that the preparse data given to the initial parse has been
-        // faulty. We fail in debug mode but in release mode we only provide the
-        // information we get from the context chain but nothing about
-        // completely stack allocated scopes or stack allocated locals.
-        UNREACHABLE();
-      }
+    // Check whether the first scope is actually a local scope.
+    // If there is a stack slot for .result then this local scope has been
+    // created for evaluating top level code and it is not a real local scope.
+    // Checking for the existence of .result seems fragile, but the scope info
+    // saved with the code object does not otherwise have that information.
+    int index = function_->shared()->scope_info()->
+        StackSlotIndex(isolate_->heap()->result_symbol());
+    if (index >= 0) {
+      local_done_ = true;
+    } else if (context_->IsGlobalContext() ||
+               context_->IsFunctionContext()) {
+      at_local_ = true;
+    } else if (context_->closure() != *function_) {
+      // The context_ is a block or with or catch block from the outer function.
+      ASSERT(context_->IsWithContext() ||
+             context_->IsCatchContext() ||
+             context_->IsBlockContext());
+      at_local_ = true;
     }
   }
 
@@ -11038,52 +11068,40 @@
 
   // Move to the next scope.
   void Next() {
-    ScopeType scope_type = Type();
-    if (scope_type == ScopeTypeGlobal) {
-      // The global scope is always the last in the chain.
-      ASSERT(context_->IsGlobalContext());
+    // If at a local scope mark the local scope as passed.
+    if (at_local_) {
+      at_local_ = false;
+      local_done_ = true;
+
+      // If the current context is not associated with the local scope the
+      // current context is the next real scope, so don't move to the next
+      // context in this case.
+      if (context_->closure() != *function_) {
+        return;
+      }
+    }
+
+    // The global scope is always the last in the chain.
+    if (context_->IsGlobalContext()) {
       context_ = Handle<Context>();
       return;
     }
-    if (nested_scope_chain_.is_empty()) {
-      context_ = Handle<Context>(context_->previous(), isolate_);
-    } else {
-      if (nested_scope_chain_.last()->HasContext()) {
-        ASSERT(context_->previous() != NULL);
-        context_ = Handle<Context>(context_->previous(), isolate_);
-      }
-      nested_scope_chain_.RemoveLast();
+
+    // Move to the next context.
+    context_ = Handle<Context>(context_->previous(), isolate_);
+
+    // If passing the local scope indicate that the current scope is now the
+    // local scope.
+    if (!local_done_ &&
+        (context_->IsGlobalContext() || context_->IsFunctionContext())) {
+      at_local_ = true;
     }
   }
 
   // Return the type of the current scope.
   ScopeType Type() {
-    if (!nested_scope_chain_.is_empty()) {
-      Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
-      switch (scope_info->Type()) {
-        case FUNCTION_SCOPE:
-          ASSERT(context_->IsFunctionContext() ||
-                 !scope_info->HasContext());
-          return ScopeTypeLocal;
-        case MODULE_SCOPE:
-          ASSERT(context_->IsModuleContext());
-          return ScopeTypeModule;
-        case GLOBAL_SCOPE:
-          ASSERT(context_->IsGlobalContext());
-          return ScopeTypeGlobal;
-        case WITH_SCOPE:
-          ASSERT(context_->IsWithContext());
-          return ScopeTypeWith;
-        case CATCH_SCOPE:
-          ASSERT(context_->IsCatchContext());
-          return ScopeTypeCatch;
-        case BLOCK_SCOPE:
-          ASSERT(!scope_info->HasContext() ||
-                 context_->IsBlockContext());
-          return ScopeTypeBlock;
-        case EVAL_SCOPE:
-          UNREACHABLE();
-      }
+    if (at_local_) {
+      return ScopeTypeLocal;
     }
     if (context_->IsGlobalContext()) {
       ASSERT(context_->global()->IsGlobalObject());
@@ -11098,9 +11116,6 @@
     if (context_->IsBlockContext()) {
       return ScopeTypeBlock;
     }
-    if (context_->IsModuleContext()) {
-      return ScopeTypeModule;
-    }
     ASSERT(context_->IsWithContext());
     return ScopeTypeWith;
   }
@@ -11112,8 +11127,7 @@
         return Handle<JSObject>(CurrentContext()->global());
       case ScopeIterator::ScopeTypeLocal:
         // Materialize the content of the local scope into a JSObject.
-        ASSERT(nested_scope_chain_.length() == 1);
-        return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_);
+        return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
       case ScopeIterator::ScopeTypeWith:
         // Return the with object.
         return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
@@ -11124,35 +11138,18 @@
         return MaterializeClosure(isolate_, CurrentContext());
       case ScopeIterator::ScopeTypeBlock:
         return MaterializeBlockScope(isolate_, CurrentContext());
-      case ScopeIterator::ScopeTypeModule:
-        return MaterializeModuleScope(isolate_, CurrentContext());
     }
     UNREACHABLE();
     return Handle<JSObject>();
   }
 
-  Handle<ScopeInfo> CurrentScopeInfo() {
-    if (!nested_scope_chain_.is_empty()) {
-      return nested_scope_chain_.last();
-    } else if (context_->IsBlockContext()) {
-      return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
-    } else if (context_->IsFunctionContext()) {
-      return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
-    }
-    return Handle<ScopeInfo>::null();
-  }
-
   // Return the context for this scope. For the local context there might not
   // be an actual context.
   Handle<Context> CurrentContext() {
-    if (Type() == ScopeTypeGlobal ||
-        nested_scope_chain_.is_empty()) {
-      return context_;
-    } else if (nested_scope_chain_.last()->HasContext()) {
-      return context_;
-    } else {
+    if (at_local_ && context_->closure() != *function_) {
       return Handle<Context>();
     }
+    return context_;
   }
 
 #ifdef DEBUG
@@ -11166,7 +11163,8 @@
 
       case ScopeIterator::ScopeTypeLocal: {
         PrintF("Local:\n");
-        function_->shared()->scope_info()->Print();
+        ScopeInfo<> scope_info(function_->shared()->scope_info());
+        scope_info.Print();
         if (!CurrentContext().is_null()) {
           CurrentContext()->Print();
           if (CurrentContext()->has_extension()) {
@@ -11211,10 +11209,11 @@
  private:
   Isolate* isolate_;
   JavaScriptFrame* frame_;
-  int inlined_jsframe_index_;
+  int inlined_frame_index_;
   Handle<JSFunction> function_;
   Handle<Context> context_;
-  List<Handle<ScopeInfo> > nested_scope_chain_;
+  bool local_done_;
+  bool at_local_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
@@ -11230,7 +11229,7 @@
       RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
-  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
@@ -11272,8 +11271,8 @@
       RUNTIME_ARGUMENTS(isolate, args));
     if (!maybe_check->ToObject(&check)) return maybe_check;
   }
-  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
-  CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+  CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
 
   // Get the frame where the debugging is performed.
@@ -11283,7 +11282,7 @@
 
   // Find the requested scope.
   int n = 0;
-  ScopeIterator it(isolate, frame, inlined_jsframe_index);
+  ScopeIterator it(isolate, frame, inlined_frame_index);
   for (; !it.Done() && n < index; it.Next()) {
     n++;
   }
@@ -11412,7 +11411,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDisableBreak) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
+  CONVERT_BOOLEAN_CHECKED(disable_break, args[0]);
   isolate->debug()->set_disable_break(disable_break);
   return  isolate->heap()->undefined_value();
 }
@@ -11422,7 +11421,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   Handle<SharedFunctionInfo> shared(fun->shared());
   // Find the number of break points
   Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
@@ -11440,7 +11439,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+  CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   Handle<SharedFunctionInfo> shared(fun->shared());
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
   RUNTIME_ASSERT(source_position >= 0);
@@ -11470,53 +11469,48 @@
   int target_start_position = RelocInfo::kNoPosition;
   Handle<SharedFunctionInfo> target;
   while (!done) {
-    { // Extra scope for iterator and no-allocation.
-      isolate->heap()->EnsureHeapIsIterable();
-      AssertNoAllocation no_alloc_during_heap_iteration;
-      HeapIterator iterator;
-      for (HeapObject* obj = iterator.next();
-           obj != NULL; obj = iterator.next()) {
-        if (obj->IsSharedFunctionInfo()) {
-          Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
-          if (shared->script() == *script) {
-            // If the SharedFunctionInfo found has the requested script data and
-            // contains the source position it is a candidate.
-            int start_position = shared->function_token_position();
-            if (start_position == RelocInfo::kNoPosition) {
-              start_position = shared->start_position();
-            }
-            if (start_position <= position &&
-                position <= shared->end_position()) {
-              // If there is no candidate or this function is within the current
-              // candidate this is the new candidate.
-              if (target.is_null()) {
-                target_start_position = start_position;
-                target = shared;
-              } else {
-                if (target_start_position == start_position &&
-                    shared->end_position() == target->end_position()) {
-                    // If a top-level function contain only one function
-                    // declartion the source for the top-level and the
-                    // function is the same. In that case prefer the non
-                    // top-level function.
-                  if (!shared->is_toplevel()) {
-                    target_start_position = start_position;
-                    target = shared;
-                  }
-                } else if (target_start_position <= start_position &&
-                           shared->end_position() <= target->end_position()) {
-                  // This containment check includes equality as a function
-                  // inside a top-level function can share either start or end
-                  // position with the top-level function.
+    HeapIterator iterator;
+    for (HeapObject* obj = iterator.next();
+         obj != NULL; obj = iterator.next()) {
+      if (obj->IsSharedFunctionInfo()) {
+        Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+        if (shared->script() == *script) {
+          // If the SharedFunctionInfo found has the requested script data and
+          // contains the source position it is a candidate.
+          int start_position = shared->function_token_position();
+          if (start_position == RelocInfo::kNoPosition) {
+            start_position = shared->start_position();
+          }
+          if (start_position <= position &&
+              position <= shared->end_position()) {
+            // If there is no candidate or this function is within the current
+            // candidate this is the new candidate.
+            if (target.is_null()) {
+              target_start_position = start_position;
+              target = shared;
+            } else {
+              if (target_start_position == start_position &&
+                  shared->end_position() == target->end_position()) {
+                  // If a top-level function contain only one function
+                  // declartion the source for the top-level and the function is
+                  // the same. In that case prefer the non top-level function.
+                if (!shared->is_toplevel()) {
                   target_start_position = start_position;
                   target = shared;
                 }
+              } else if (target_start_position <= start_position &&
+                         shared->end_position() <= target->end_position()) {
+                // This containment check includes equality as a function inside
+                // a top-level function can share either start or end position
+                // with the top-level function.
+                target_start_position = start_position;
+                target = shared;
               }
             }
           }
         }
-      }  // End for loop.
-    }  // End No allocation scope.
+      }
+    }
 
     if (target.is_null()) {
       return isolate->heap()->undefined_value();
@@ -11529,9 +11523,9 @@
     if (!done) {
       // If the candidate is not compiled compile it to reveal any inner
       // functions which might contain the requested source position.
-      SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
+      CompileLazyShared(target, KEEP_EXCEPTION);
     }
-  }  // End while loop.
+  }
 
   return *target;
 }
@@ -11546,7 +11540,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScriptBreakPoint) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
+  CONVERT_ARG_CHECKED(JSValue, wrapper, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
   RUNTIME_ASSERT(source_position >= 0);
   Handle<Object> break_point_object_arg = args.at<Object>(2);
@@ -11596,7 +11590,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
   RUNTIME_ASSERT(args[0]->IsNumber());
-  CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
+  CONVERT_BOOLEAN_CHECKED(enable, args[1]);
 
   // If the number doesn't match an enum value, the ChangeBreakOnException
   // function will default to affecting caught exceptions.
@@ -11677,65 +11671,46 @@
 
 // Creates a copy of the with context chain. The copy of the context chain is
 // is linked to the function context supplied.
-static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
-                                                   Handle<JSFunction> function,
-                                                   Handle<Context> base,
-                                                   JavaScriptFrame* frame,
-                                                   int inlined_jsframe_index) {
-  HandleScope scope(isolate);
-  List<Handle<ScopeInfo> > scope_chain;
-  List<Handle<Context> > context_chain;
-
-  ScopeIterator it(isolate, frame, inlined_jsframe_index);
-  for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
-         it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
-    ASSERT(!it.Done());
-    scope_chain.Add(it.CurrentScopeInfo());
-    context_chain.Add(it.CurrentContext());
-  }
-
+static Handle<Context> CopyWithContextChain(Isolate* isolate,
+                                            Handle<JSFunction> function,
+                                            Handle<Context> current,
+                                            Handle<Context> base) {
   // At the end of the chain. Return the base context to link to.
-  Handle<Context> context = base;
-
-  // Iteratively copy and or materialize the nested contexts.
-  while (!scope_chain.is_empty()) {
-    Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
-    Handle<Context> current = context_chain.RemoveLast();
-    ASSERT(!(scope_info->HasContext() & current.is_null()));
-
-    if (scope_info->Type() == CATCH_SCOPE) {
-      Handle<String> name(String::cast(current->extension()));
-      Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
-      context =
-          isolate->factory()->NewCatchContext(function,
-                                              context,
-                                              name,
-                                              thrown_object);
-    } else if (scope_info->Type() == BLOCK_SCOPE) {
-      // Materialize the contents of the block scope into a JSObject.
-      Handle<JSObject> block_scope_object =
-          MaterializeBlockScope(isolate, current);
-      if (block_scope_object.is_null()) {
-        return Handle<Context>::null();
-      }
-      // Allocate a new function context for the debug evaluation and set the
-      // extension object.
-      Handle<Context> new_context =
-          isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
-                                                 function);
-      new_context->set_extension(*block_scope_object);
-      new_context->set_previous(*context);
-      context = new_context;
-    } else {
-      ASSERT(scope_info->Type() == WITH_SCOPE);
-      ASSERT(current->IsWithContext());
-      Handle<JSObject> extension(JSObject::cast(current->extension()));
-      context =
-          isolate->factory()->NewWithContext(function, context, extension);
-    }
+  if (current->IsFunctionContext() || current->IsGlobalContext()) {
+    return base;
   }
 
-  return scope.CloseAndEscape(context);
+  // Recursively copy the with and catch contexts.
+  HandleScope scope(isolate);
+  Handle<Context> previous(current->previous());
+  Handle<Context> new_previous =
+      CopyWithContextChain(isolate, function, previous, base);
+  Handle<Context> new_current;
+  if (current->IsCatchContext()) {
+    Handle<String> name(String::cast(current->extension()));
+    Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+    new_current =
+        isolate->factory()->NewCatchContext(function,
+                                            new_previous,
+                                            name,
+                                            thrown_object);
+  } else if (current->IsBlockContext()) {
+    Handle<SerializedScopeInfo> scope_info(
+        SerializedScopeInfo::cast(current->extension()));
+    new_current =
+        isolate->factory()->NewBlockContext(function, new_previous, scope_info);
+    // Copy context slots.
+    int num_context_slots = scope_info->NumberOfContextSlots();
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < num_context_slots; ++i) {
+      new_current->set(i, current->get(i));
+    }
+  } else {
+    ASSERT(current->IsWithContext());
+    Handle<JSObject> extension(JSObject::cast(current->extension()));
+    new_current =
+        isolate->factory()->NewWithContext(function, new_previous, extension);
+  }
+  return scope.CloseAndEscape(new_current);
 }
 
 
@@ -11743,32 +11718,33 @@
 // Runtime_DebugEvaluate.
 static Handle<Object> GetArgumentsObject(Isolate* isolate,
                                          JavaScriptFrame* frame,
-                                         FrameInspector* frame_inspector,
-                                         Handle<ScopeInfo> scope_info,
+                                         int inlined_frame_index,
+                                         Handle<JSFunction> function,
+                                         Handle<SerializedScopeInfo> scope_info,
+                                         const ScopeInfo<>* sinfo,
                                          Handle<Context> function_context) {
   // Try to find the value of 'arguments' to pass as parameter. If it is not
   // found (that is the debugged function does not reference 'arguments' and
   // does not support eval) then create an 'arguments' object.
   int index;
-  if (scope_info->StackLocalCount() > 0) {
+  if (sinfo->number_of_stack_slots() > 0) {
     index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
     if (index != -1) {
       return Handle<Object>(frame->GetExpression(index), isolate);
     }
   }
 
-  if (scope_info->HasHeapAllocatedLocals()) {
-    VariableMode mode;
-    InitializationFlag init_flag;
-    index = scope_info->ContextSlotIndex(
-        isolate->heap()->arguments_symbol(), &mode, &init_flag);
+  if (sinfo->number_of_context_slots() > Context::MIN_CONTEXT_SLOTS) {
+    index = scope_info->ContextSlotIndex(isolate->heap()->arguments_symbol(),
+                                         NULL);
     if (index != -1) {
       return Handle<Object>(function_context->get(index), isolate);
     }
   }
 
-  Handle<JSFunction> function(JSFunction::cast(frame_inspector->GetFunction()));
-  int length = frame_inspector->GetParametersCount();
+  FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
+
+  int length = frame_inspector.GetParametersCount();
   Handle<JSObject> arguments =
       isolate->factory()->NewArgumentsObject(function, length);
   Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
@@ -11776,7 +11752,7 @@
   AssertNoAllocation no_gc;
   WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
   for (int i = 0; i < length; i++) {
-    array->set(i, frame_inspector->GetParameter(i), mode);
+    array->set(i, frame_inspector.GetParameter(i), mode);
   }
   arguments->set_elements(*array);
   return arguments;
@@ -11811,10 +11787,10 @@
       return maybe_check_result;
     }
   }
-  CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
-  CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
-  CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
-  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+  CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
+  CONVERT_ARG_CHECKED(String, source, 3);
+  CONVERT_BOOLEAN_CHECKED(disable_break, args[4]);
   Handle<Object> additional_context(args[5]);
 
   // Handle the processing of break.
@@ -11824,14 +11800,17 @@
   StackFrame::Id id = UnwrapFrameId(wrapped_id);
   JavaScriptFrameIterator it(isolate, id);
   JavaScriptFrame* frame = it.frame();
-  FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
-  Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
-  Handle<ScopeInfo> scope_info(function->shared()->scope_info());
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<SerializedScopeInfo> scope_info(function->shared()->scope_info());
+  ScopeInfo<> sinfo(*scope_info);
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
-  SaveContext* save = FindSavedContextForFrame(isolate, frame);
-
+  SaveContext* save = isolate->save_context();
+  while (save != NULL && !save->below(frame)) {
+    save = save->prev();
+  }
+  ASSERT(save != NULL);
   SaveContext savex(isolate);
   isolate->set_context(*(save->context()));
 
@@ -11846,14 +11825,14 @@
                                       isolate->factory()->undefined_value());
   go_between->set_context(function->context());
 #ifdef DEBUG
-  Handle<ScopeInfo> go_between_scope_info(go_between->shared()->scope_info());
-  ASSERT(go_between_scope_info->ParameterCount() == 0);
-  ASSERT(go_between_scope_info->ContextLocalCount() == 0);
+  ScopeInfo<> go_between_sinfo(go_between->shared()->scope_info());
+  ASSERT(go_between_sinfo.number_of_parameters() == 0);
+  ASSERT(go_between_sinfo.number_of_context_slots() == 0);
 #endif
 
   // Materialize the content of the local scope into a JSObject.
-  Handle<JSObject> local_scope = MaterializeLocalScopeWithFrameInspector(
-      isolate, frame, &frame_inspector);
+  Handle<JSObject> local_scope = MaterializeLocalScope(
+      isolate, frame, inlined_frame_index);
   RETURN_IF_EMPTY_HANDLE(isolate, local_scope);
 
   // Allocate a new context for the debug evaluation and set the extension
@@ -11866,14 +11845,10 @@
   Handle<Context> frame_context(Context::cast(frame->context()));
   Handle<Context> function_context;
   // Get the function's context if it has one.
-  if (scope_info->HasContext()) {
+  if (scope_info->HasHeapAllocatedLocals()) {
     function_context = Handle<Context>(frame_context->declaration_context());
   }
-  context = CopyNestedScopeContextChain(isolate,
-                                        go_between,
-                                        context,
-                                        frame,
-                                        inlined_jsframe_index);
+  context = CopyWithContextChain(isolate, go_between, frame_context, context);
 
   if (additional_context->IsJSObject()) {
     Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
@@ -11897,8 +11872,7 @@
       Compiler::CompileEval(function_source,
                             context,
                             context->IsGlobalContext(),
-                            CLASSIC_MODE,
-                            RelocInfo::kNoPosition);
+                            kNonStrictMode);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
@@ -11912,19 +11886,17 @@
   if (has_pending_exception) return Failure::Exception();
 
   Handle<Object> arguments = GetArgumentsObject(isolate,
-                                                frame,
-                                                &frame_inspector,
-                                                scope_info,
-                                                function_context);
+                                                frame, inlined_frame_index,
+                                                function, scope_info,
+                                                &sinfo, function_context);
 
   // Invoke the evaluation function and return the result.
-  Handle<Object> argv[] = { arguments, source };
+  const int argc = 2;
+  Object** argv[argc] = { arguments.location(),
+                          Handle<Object>::cast(source).location() };
   Handle<Object> result =
-      Execution::Call(Handle<JSFunction>::cast(evaluation_function),
-                      receiver,
-                      ARRAY_SIZE(argv),
-                      argv,
-                      &has_pending_exception);
+      Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
+                      argc, argv, &has_pending_exception);
   if (has_pending_exception) return Failure::Exception();
 
   // Skip the global proxy as it has no properties and always delegates to the
@@ -11950,8 +11922,8 @@
       return maybe_check_result;
     }
   }
-  CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
+  CONVERT_ARG_CHECKED(String, source, 1);
+  CONVERT_BOOLEAN_CHECKED(disable_break, args[2]);
   Handle<Object> additional_context(args[3]);
 
   // Handle the processing of break.
@@ -11974,12 +11946,15 @@
   bool is_global = true;
 
   if (additional_context->IsJSObject()) {
-    // Create a new with context with the additional context information between
-    // the context of the debugged function and the eval code to be executed.
-    context = isolate->factory()->NewWithContext(
-        Handle<JSFunction>(context->closure()),
-        context,
-        Handle<JSObject>::cast(additional_context));
+    // Create a function context first, than put 'with' context on top of it.
+    Handle<JSFunction> go_between = isolate->factory()->NewFunction(
+        isolate->factory()->empty_string(),
+        isolate->factory()->undefined_value());
+    go_between->set_context(*context);
+    context =
+        isolate->factory()->NewFunctionContext(
+            Context::MIN_CONTEXT_SLOTS, go_between);
+    context->set_extension(JSObject::cast(*additional_context));
     is_global = false;
   }
 
@@ -11987,11 +11962,7 @@
   // Currently, the eval code will be executed in non-strict mode,
   // even in the strict code context.
   Handle<SharedFunctionInfo> shared =
-      Compiler::CompileEval(source,
-                            context,
-                            is_global,
-                            CLASSIC_MODE,
-                            RelocInfo::kNoPosition);
+      Compiler::CompileEval(source, context, is_global, kNonStrictMode);
   if (shared.is_null()) return Failure::Exception();
   Handle<JSFunction> compiled_function =
       Handle<JSFunction>(
@@ -12004,8 +11975,6 @@
   Handle<Object> result =
     Execution::Call(compiled_function, receiver, 0, NULL,
                     &has_pending_exception);
-  // Clear the oneshot breakpoints so that the debugger does not step further.
-  isolate->debug()->ClearStepping();
   if (has_pending_exception) return Failure::Exception();
   return *result;
 }
@@ -12025,7 +11994,7 @@
     // because using
     //   instances->set(i, *GetScriptWrapper(script))
     // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
-    // already have dereferenced the instances handle.
+    // already have deferenced the instances handle.
     Handle<JSValue> wrapper = GetScriptWrapper(script);
     instances->set(i, *wrapper);
   }
@@ -12033,14 +12002,13 @@
   // Return result as a JS array.
   Handle<JSObject> result =
       isolate->factory()->NewJSObject(isolate->array_function());
-  isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
+  Handle<JSArray>::cast(result)->SetContent(*instances);
   return *result;
 }
 
 
 // Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(HeapIterator* iterator,
-                             JSObject* target,
+static int DebugReferencedBy(JSObject* target,
                              Object* instance_filter, int max_references,
                              FixedArray* instances, int instances_size,
                              JSFunction* arguments_function) {
@@ -12050,8 +12018,9 @@
   // Iterate the heap.
   int count = 0;
   JSObject* last = NULL;
+  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator->next()) != NULL) &&
+  while (((heap_obj = iterator.next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12116,22 +12085,16 @@
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
-  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                                     "%DebugReferencedBy");
-  // The heap iterator reserves the right to do a GC to make the heap iterable.
-  // Due to the GC above we know it won't need to do that, but it seems cleaner
-  // to get the heap iterator constructed before we start having unprotected
-  // Object* locals that are not protected by handles.
+  isolate->heap()->CollectAllGarbage(false);
 
   // Check parameters.
-  CONVERT_ARG_CHECKED(JSObject, target, 0);
+  CONVERT_CHECKED(JSObject, target, args[0]);
   Object* instance_filter = args[1];
   RUNTIME_ASSERT(instance_filter->IsUndefined() ||
                  instance_filter->IsJSObject());
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
   RUNTIME_ASSERT(max_references >= 0);
 
-
   // Get the constructor function for context extension and arguments array.
   JSObject* arguments_boilerplate =
       isolate->context()->global_context()->arguments_boilerplate();
@@ -12140,9 +12103,7 @@
 
   // Get the number of referencing objects.
   int count;
-  HeapIterator heap_iterator;
-  count = DebugReferencedBy(&heap_iterator,
-                            target, instance_filter, max_references,
+  count = DebugReferencedBy(target, instance_filter, max_references,
                             NULL, 0, arguments_function);
 
   // Allocate an array to hold the result.
@@ -12153,34 +12114,30 @@
   FixedArray* instances = FixedArray::cast(object);
 
   // Fill the referencing objects.
-  // AllocateFixedArray above does not make the heap non-iterable.
-  ASSERT(HEAP->IsHeapIterable());
-  HeapIterator heap_iterator2;
-  count = DebugReferencedBy(&heap_iterator2,
-                            target, instance_filter, max_references,
+  count = DebugReferencedBy(target, instance_filter, max_references,
                             instances, count, arguments_function);
 
   // Return result as JS array.
   Object* result;
-  MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+  { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
       isolate->context()->global_context()->array_function());
-  if (!maybe_result->ToObject(&result)) return maybe_result;
-  return JSArray::cast(result)->SetContent(instances);
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  JSArray::cast(result)->SetContent(instances);
+  return result;
 }
 
 
 // Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(HeapIterator* iterator,
-                              JSFunction* constructor,
-                              int max_references,
-                              FixedArray* instances,
-                              int instances_size) {
+static int DebugConstructedBy(JSFunction* constructor, int max_references,
+                              FixedArray* instances, int instances_size) {
   AssertNoAllocation no_alloc;
 
   // Iterate the heap.
   int count = 0;
+  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator->next()) != NULL) &&
+  while (((heap_obj = iterator.next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12208,22 +12165,16 @@
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
-  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
-                                     "%DebugConstructedBy");
+  isolate->heap()->CollectAllGarbage(false);
 
   // Check parameters.
-  CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
+  CONVERT_CHECKED(JSFunction, constructor, args[0]);
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
   RUNTIME_ASSERT(max_references >= 0);
 
   // Get the number of referencing objects.
   int count;
-  HeapIterator heap_iterator;
-  count = DebugConstructedBy(&heap_iterator,
-                             constructor,
-                             max_references,
-                             NULL,
-                             0);
+  count = DebugConstructedBy(constructor, max_references, NULL, 0);
 
   // Allocate an array to hold the result.
   Object* object;
@@ -12232,14 +12183,8 @@
   }
   FixedArray* instances = FixedArray::cast(object);
 
-  ASSERT(HEAP->IsHeapIterable());
   // Fill the referencing objects.
-  HeapIterator heap_iterator2;
-  count = DebugConstructedBy(&heap_iterator2,
-                             constructor,
-                             max_references,
-                             instances,
-                             count);
+  count = DebugConstructedBy(constructor, max_references, instances, count);
 
   // Return result as JS array.
   Object* result;
@@ -12247,7 +12192,8 @@
           isolate->context()->global_context()->array_function());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  return JSArray::cast(result)->SetContent(instances);
+  JSArray::cast(result)->SetContent(instances);
+  return result;
 }
 
 
@@ -12256,32 +12202,13 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSObject, obj, 0);
+  CONVERT_CHECKED(JSObject, obj, args[0]);
 
   // Use the __proto__ accessor.
   return Accessors::ObjectPrototype.getter(obj, NULL);
 }
 
 
-// Patches script source (should be called upon BeforeCompile event).
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-
-  CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
-  Handle<String> source(String::cast(args[1]));
-
-  RUNTIME_ASSERT(script_wrapper->value()->IsScript());
-  Handle<Script> script(Script::cast(script_wrapper->value()));
-
-  int compilation_state = Smi::cast(script->compilation_state())->value();
-  RUNTIME_ASSERT(compilation_state == Script::COMPILATION_STATE_INITIAL);
-  script->set_source(*source);
-
-  return isolate->heap()->undefined_value();
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) {
   ASSERT(args.length() == 0);
   CPU::DebugBreak();
@@ -12294,9 +12221,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
+  CONVERT_ARG_CHECKED(JSFunction, func, 0);
   Handle<SharedFunctionInfo> shared(func->shared());
-  if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
+  if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   func->code()->PrintLn();
@@ -12310,9 +12237,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
+  CONVERT_ARG_CHECKED(JSFunction, func, 0);
   Handle<SharedFunctionInfo> shared(func->shared());
-  if (!SharedFunctionInfo::EnsureCompiled(shared, KEEP_EXCEPTION)) {
+  if (!EnsureCompiled(shared, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   shared->construct_stub()->PrintLn();
@@ -12325,20 +12252,19 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  CONVERT_CHECKED(JSFunction, f, args[0]);
   return f->shared()->inferred_name();
 }
 
 
-static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
-                                            Script* script,
+static int FindSharedFunctionInfosForScript(Script* script,
                                             FixedArray* buffer) {
   AssertNoAllocation no_allocations;
+
   int counter = 0;
   int buffer_size = buffer->length();
-  for (HeapObject* obj = iterator->next();
-       obj != NULL;
-       obj = iterator->next()) {
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     ASSERT(obj != NULL);
     if (!obj->IsSharedFunctionInfo()) {
       continue;
@@ -12362,8 +12288,7 @@
                  Runtime_LiveEditFindSharedFunctionInfosForScript) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSValue, script_value, 0);
-
+  CONVERT_CHECKED(JSValue, script_value, args[0]);
 
   Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
 
@@ -12371,23 +12296,10 @@
 
   Handle<FixedArray> array;
   array = isolate->factory()->NewFixedArray(kBufferSize);
-  int number;
-  {
-    isolate->heap()->EnsureHeapIsIterable();
-    AssertNoAllocation no_allocations;
-    HeapIterator heap_iterator;
-    Script* scr = *script;
-    FixedArray* arr = *array;
-    number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
-  }
+  int number = FindSharedFunctionInfosForScript(*script, *array);
   if (number > kBufferSize) {
     array = isolate->factory()->NewFixedArray(number);
-    isolate->heap()->EnsureHeapIsIterable();
-    AssertNoAllocation no_allocations;
-    HeapIterator heap_iterator;
-    Script* scr = *script;
-    FixedArray* arr = *array;
-    FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
+    FindSharedFunctionInfosForScript(*script, *array);
   }
 
   Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
@@ -12408,8 +12320,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSValue, script, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+  CONVERT_CHECKED(JSValue, script, args[0]);
+  CONVERT_ARG_CHECKED(String, source, 1);
   Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
 
   JSArray* result =  LiveEdit::GatherCompileInfo(script_handle, source);
@@ -12427,12 +12339,13 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) {
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
-  CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
+  CONVERT_CHECKED(JSValue, original_script_value, args[0]);
+  CONVERT_ARG_CHECKED(String, new_source, 1);
   Handle<Object> old_script_name(args[2], isolate);
 
-  RUNTIME_ASSERT(original_script_value->value()->IsScript());
-  Handle<Script> original_script(Script::cast(original_script_value->value()));
+  CONVERT_CHECKED(Script, original_script_pointer,
+                  original_script_value->value());
+  Handle<Script> original_script(original_script_pointer);
 
   Object* old_script = LiveEdit::ChangeScriptSource(original_script,
                                                     new_source,
@@ -12450,7 +12363,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) {
   ASSERT(args.length() == 1);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
+  CONVERT_ARG_CHECKED(JSArray, shared_info, 0);
   return LiveEdit::FunctionSourceUpdated(shared_info);
 }
 
@@ -12459,8 +12372,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
+  CONVERT_ARG_CHECKED(JSArray, new_compile_info, 0);
+  CONVERT_ARG_CHECKED(JSArray, shared_info, 1);
 
   return LiveEdit::ReplaceFunctionCode(new_compile_info, shared_info);
 }
@@ -12475,8 +12388,7 @@
   if (function_object->IsJSValue()) {
     Handle<JSValue> function_wrapper = Handle<JSValue>::cast(function_object);
     if (script_object->IsJSValue()) {
-      RUNTIME_ASSERT(JSValue::cast(*script_object)->value()->IsScript());
-      Script* script = Script::cast(JSValue::cast(*script_object)->value());
+      CONVERT_CHECKED(Script, script, JSValue::cast(*script_object)->value());
       script_object = Handle<Object>(script, isolate);
     }
 
@@ -12496,9 +12408,9 @@
   ASSERT(args.length() == 3);
   HandleScope scope(isolate);
 
-  CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSValue, subst_wrapper, 2);
+  CONVERT_ARG_CHECKED(JSValue, parent_wrapper, 0);
+  CONVERT_ARG_CHECKED(JSValue, orig_wrapper, 1);
+  CONVERT_ARG_CHECKED(JSValue, subst_wrapper, 2);
 
   LiveEdit::ReplaceRefToNestedFunction(parent_wrapper, orig_wrapper,
                                        subst_wrapper);
@@ -12515,8 +12427,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
+  CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
+  CONVERT_ARG_CHECKED(JSArray, position_change_array, 1);
 
   return LiveEdit::PatchFunctionPositions(shared_array, position_change_array);
 }
@@ -12529,8 +12441,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
-  CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
+  CONVERT_ARG_CHECKED(JSArray, shared_array, 0);
+  CONVERT_BOOLEAN_CHECKED(do_drop, args[1]);
 
   return *LiveEdit::CheckAndDropActivations(shared_array, do_drop);
 }
@@ -12541,8 +12453,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
+  CONVERT_ARG_CHECKED(String, s1, 0);
+  CONVERT_ARG_CHECKED(String, s2, 1);
 
   return *LiveEdit::CompareStrings(s1, s2);
 }
@@ -12553,7 +12465,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
 
   Handle<Code> code(function->code(), isolate);
@@ -12590,8 +12502,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) {
   ASSERT(args.length() == 2);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_BOOLEAN_CHECKED(without_debugger, args[1]);
 
   Handle<Object> result;
   bool pending_exception;
@@ -12615,7 +12527,7 @@
 
 // Sets a v8 flag.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
-  CONVERT_ARG_CHECKED(String, arg, 0);
+  CONVERT_CHECKED(String, arg, args[0]);
   SmartArrayPointer<char> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   FlagList::SetFlagsFromString(*flags, StrLength(*flags));
@@ -12626,7 +12538,7 @@
 // Performs a GC.
 // Presently, it only does a full GC.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
-  isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
+  isolate->heap()->CollectAllGarbage(true);
   return isolate->heap()->undefined_value();
 }
 
@@ -12685,7 +12597,7 @@
   CONVERT_SMI_ARG_CHECKED(id2, 1);
   CONVERT_SMI_ARG_CHECKED(start, 2);
   CONVERT_SMI_ARG_CHECKED(count, 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 4);
+  CONVERT_ARG_CHECKED(JSObject, filter_obj, 4);
   EnterDebugger enter_debugger;
   return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
 #else
@@ -12712,7 +12624,7 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
 #ifdef LIVE_OBJECT_LIST
   HandleScope scope;
-  CONVERT_ARG_HANDLE_CHECKED(String, address, 0);
+  CONVERT_ARG_CHECKED(String, address, 0);
   Object* result = LiveObjectList::GetObjId(address);
   return result;
 #else
@@ -12730,7 +12642,7 @@
   RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
   RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
   RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi());
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 5);
+  CONVERT_ARG_CHECKED(JSObject, filter_obj, 5);
 
   Handle<JSObject> instance_filter;
   if (args[1]->IsJSObject()) {
@@ -12831,7 +12743,7 @@
   HandleScope scope;
   CONVERT_SMI_ARG_CHECKED(id1, 0);
   CONVERT_SMI_ARG_CHECKED(id2, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 2);
+  CONVERT_ARG_CHECKED(JSObject, filter_obj, 2);
 
   EnterDebugger enter_debugger;
   return LiveObjectList::Summarize(id1, id2, filter_obj);
@@ -12868,8 +12780,6 @@
   // Scan the heap for Script objects to find the script with the requested
   // script data.
   Handle<Script> script;
-  script_name->GetHeap()->EnsureHeapIsIterable();
-  AssertNoAllocation no_allocation_during_heap_iteration;
   HeapIterator iterator;
   HeapObject* obj = NULL;
   while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -12899,7 +12809,7 @@
 
   ASSERT(args.length() == 1);
 
-  CONVERT_ARG_CHECKED(String, script_name, 0);
+  CONVERT_CHECKED(String, script_name, args[0]);
 
   // Find the requested script.
   Handle<Object> result =
@@ -12918,32 +12828,34 @@
                                   Object* caller,
                                   bool* seen_caller) {
   // Only display JS frames.
-  if (!raw_frame->is_java_script()) {
+  if (!raw_frame->is_java_script())
     return false;
-  }
   JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
   Object* raw_fun = frame->function();
   // Not sure when this can happen but skip it just in case.
-  if (!raw_fun->IsJSFunction()) {
+  if (!raw_fun->IsJSFunction())
     return false;
-  }
   if ((raw_fun == caller) && !(*seen_caller)) {
     *seen_caller = true;
     return false;
   }
   // Skip all frames until we've seen the caller.
   if (!(*seen_caller)) return false;
-  // Also, skip non-visible built-in functions and any call with the builtins
-  // object as receiver, so as to not reveal either the builtins object or
-  // an internal function.
-  // The --builtins-in-stack-traces command line flag allows including
-  // internal call sites in the stack trace for debugging purposes.
-  if (!FLAG_builtins_in_stack_traces) {
-    JSFunction* fun = JSFunction::cast(raw_fun);
-    if (frame->receiver()->IsJSBuiltinsObject() ||
-        (fun->IsBuiltin() && !fun->shared()->native())) {
-      return false;
-    }
+  // Also, skip the most obvious builtin calls. We recognize builtins
+  // as (1) functions called with the builtins object as the receiver and
+  // as (2) functions from native scripts called with undefined as the
+  // receiver (direct calls to helper functions in the builtins
+  // code). Some builtin calls (such as Number.ADD which is invoked
+  // using 'call') are very difficult to recognize so we're leaving
+  // them in for now.
+  if (frame->receiver()->IsJSBuiltinsObject()) {
+    return false;
+  }
+  JSFunction* fun = JSFunction::cast(raw_fun);
+  Object* raw_script = fun->shared()->script();
+  if (frame->receiver()->IsUndefined() && raw_script->IsScript()) {
+    int script_type = Script::cast(raw_script)->type()->value();
+    return script_type != Script::TYPE_NATIVE;
   }
   return true;
 }
@@ -12953,10 +12865,9 @@
 // element segments each containing a receiver, function, code and
 // native code offset.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
-  ASSERT_EQ(args.length(), 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
-  Handle<Object> caller = args.at<Object>(1);
-  CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
+  ASSERT_EQ(args.length(), 2);
+  Handle<Object> caller = args.at<Object>(0);
+  CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
 
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
@@ -13006,8 +12917,6 @@
     iter.Advance();
   }
   Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
-  // Capture and attach a more detailed stack trace if necessary.
-  isolate->CaptureAndSetCurrentStackTraceFor(error_object);
   result->set_length(Smi::FromInt(cursor));
   return *result;
 }
@@ -13039,7 +12948,7 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
   // This is only called from codegen, so checks might be more lax.
-  CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
+  CONVERT_CHECKED(JSFunctionResultCache, cache, args[0]);
   Object* key = args[1];
 
   int finger_index = cache->finger_index();
@@ -13082,20 +12991,18 @@
     // TODO(antonm): consider passing a receiver when constructing a cache.
     Handle<Object> receiver(isolate->global_context()->global());
     // This handle is nor shared, nor used later, so it's safe.
-    Handle<Object> argv[] = { key_handle };
-    bool pending_exception;
+    Object** argv[] = { key_handle.location() };
+    bool pending_exception = false;
     value = Execution::Call(factory,
                             receiver,
-                            ARRAY_SIZE(argv),
+                            1,
                             argv,
                             &pending_exception);
     if (pending_exception) return Failure::Exception();
   }
 
 #ifdef DEBUG
-  if (FLAG_verify_heap) {
-    cache_handle->JSFunctionResultCacheVerify();
-  }
+  cache_handle->JSFunctionResultCacheVerify();
 #endif
 
   // Function invocation may have cleared the cache.  Reread all the data.
@@ -13124,9 +13031,7 @@
   cache_handle->set_finger_index(index);
 
 #ifdef DEBUG
-  if (FLAG_verify_heap) {
-    cache_handle->JSFunctionResultCacheVerify();
-  }
+  cache_handle->JSFunctionResultCacheVerify();
 #endif
 
   return *value;
@@ -13135,8 +13040,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 1);
+  CONVERT_ARG_CHECKED(String, type, 0);
+  CONVERT_ARG_CHECKED(JSArray, arguments, 1);
   return *isolate->factory()->NewJSMessageObject(
       type,
       arguments,
@@ -13149,25 +13054,25 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
-  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
+  CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->type();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
-  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
+  CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->arguments();
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
-  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
+  CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return Smi::FromInt(message->start_position());
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
-  CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
+  CONVERT_CHECKED(JSMessageObject, message, args[0]);
   return message->script();
 }
 
@@ -13221,8 +13126,8 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(String, format, 0);
-  CONVERT_ARG_CHECKED(JSArray, elms, 1);
+  CONVERT_CHECKED(String, format, args[0]);
+  CONVERT_CHECKED(JSArray, elms, args[1]);
   String::FlatContent format_content = format->GetFlatContent();
   RUNTIME_ASSERT(format_content.IsAscii());
   Vector<const char> chars = format_content.ToAsciiVector();
@@ -13239,11 +13144,10 @@
 
 #define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name)        \
   RUNTIME_FUNCTION(MaybeObject*, Runtime_Has##Name) {     \
-    CONVERT_ARG_CHECKED(JSObject, obj, 0);              \
+    CONVERT_CHECKED(JSObject, obj, args[0]);              \
     return isolate->heap()->ToBoolean(obj->Has##Name());  \
   }
 
-ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
@@ -13260,14 +13164,6 @@
 
 #undef ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
 
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
-  ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSObject, obj1, 0);
-  CONVERT_ARG_CHECKED(JSObject, obj2, 1);
-  return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
-}
-
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
@@ -13335,20 +13231,14 @@
   Isolate* isolate = Isolate::Current();
   Failure* failure = Failure::cast(result);
   if (failure->IsRetryAfterGC()) {
-    if (isolate->heap()->new_space()->AddFreshPage()) {
-      return;
-    }
-
     // Try to do a garbage collection; ignore it if it fails. The C
     // entry stub will throw an out-of-memory exception in that case.
-    isolate->heap()->CollectGarbage(failure->allocation_space(),
-                                    "Runtime::PerformGC");
+    isolate->heap()->CollectGarbage(failure->allocation_space());
   } else {
     // Handle last resort GC and make sure to allow future allocations
     // to grow the heap without causing GCs (if possible).
     isolate->counters()->gc_last_resort_from_js()->Increment();
-    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
-                                       "Runtime::PerformGC");
+    isolate->heap()->CollectAllGarbage(false);
   }
 }
 
diff --git a/src/runtime.h b/src/runtime.h
index fe9cfd9..1538b7d 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -69,6 +69,9 @@
   \
   F(GetPrototype, 1, 1) \
   F(IsInPrototypeChain, 2, 1) \
+  F(SetHiddenPrototype, 2, 1) \
+  \
+  F(IsConstructCall, 0, 1) \
   \
   F(GetOwnProperty, 2, 1) \
   \
@@ -77,7 +80,6 @@
   \
   /* Utilities */ \
   F(CheckIsBootstrapping, 0, 1) \
-  F(Call, -1 /* >= 2 */, 1) \
   F(Apply, 5, 1) \
   F(GetFunctionDelegate, 1, 1) \
   F(GetConstructorDelegate, 1, 1) \
@@ -89,14 +91,13 @@
   F(NotifyOSR, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
   F(RunningInSimulator, 0, 1) \
-  F(OptimizeFunctionOnNextCall, -1, 1) \
+  F(OptimizeFunctionOnNextCall, 1, 1) \
   F(GetOptimizationStatus, 1, 1) \
   F(GetOptimizationCount, 1, 1) \
   F(CompileForOnStackReplacement, 1, 1) \
   F(SetNewFunctionAttributes, 1, 1) \
   F(AllocateInNewSpace, 1, 1) \
   F(SetNativeFlag, 1, 1) \
-  F(StoreArrayLiteralElement, 5, 1) \
   \
   /* Array join support */ \
   F(PushIfAbsent, 2, 1) \
@@ -141,7 +142,7 @@
   F(StringAdd, 2, 1) \
   F(StringBuilderConcat, 3, 1) \
   F(StringBuilderJoin, 3, 1) \
-  F(SparseJoinWithSeparator, 3, 1) \
+  F(SparseJoinWithSeparator, 3, 1)            \
   \
   /* Bit operations */ \
   F(NumberOr, 2, 1) \
@@ -195,7 +196,6 @@
   F(StringLocaleCompare, 2, 1) \
   F(SubString, 3, 1) \
   F(StringReplaceRegExpWithString, 4, 1) \
-  F(StringReplaceOneCharWithString, 3, 1) \
   F(StringMatch, 3, 1) \
   F(StringTrim, 3, 1) \
   F(StringToArray, 2, 1) \
@@ -211,14 +211,14 @@
   /* Reflection */ \
   F(FunctionSetInstanceClassName, 2, 1) \
   F(FunctionSetLength, 2, 1) \
+  F(BoundFunctionSetLength, 2, 1)    \
   F(FunctionSetPrototype, 2, 1) \
   F(FunctionSetReadOnlyPrototype, 1, 1) \
   F(FunctionGetName, 1, 1) \
   F(FunctionSetName, 2, 1) \
   F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
   F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
-  F(FunctionBindArguments, 4, 1) \
-  F(BoundFunctionGetBindings, 1, 1) \
+  F(FunctionSetBound, 1, 1) \
   F(FunctionRemovePrototype, 1, 1) \
   F(FunctionGetSourceCode, 1, 1) \
   F(FunctionGetScript, 1, 1) \
@@ -227,7 +227,7 @@
   F(FunctionIsAPIFunction, 1, 1) \
   F(FunctionIsBuiltin, 1, 1) \
   F(GetScript, 1, 1) \
-  F(CollectStackTrace, 3, 1) \
+  F(CollectStackTrace, 2, 1) \
   F(GetV8Version, 0, 1) \
   \
   F(ClassOf, 1, 1) \
@@ -244,9 +244,10 @@
   F(DateCurrentTime, 0, 1) \
   F(DateParseString, 2, 1) \
   F(DateLocalTimezone, 1, 1) \
-  F(DateToUTC, 1, 1) \
-  F(DateMakeDay, 2, 1) \
-  F(DateSetValue, 3, 1) \
+  F(DateLocalTimeOffset, 0, 1) \
+  F(DateDaylightSavingsOffset, 1, 1) \
+  F(DateMakeDay, 3, 1) \
+  F(DateYMDFromTime, 2, 1) \
   \
   /* Numbers */ \
   \
@@ -256,7 +257,8 @@
   \
   /* Eval */ \
   F(GlobalReceiver, 1, 1) \
-  F(ResolvePossiblyDirectEval, 5, 2) \
+  F(ResolvePossiblyDirectEval, 4, 2) \
+  F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
   \
   F(SetProperty, -1 /* 4 or 5 */, 1) \
   F(DefineOrRedefineDataProperty, 4, 1) \
@@ -271,10 +273,14 @@
   F(SwapElements, 3, 1) \
   \
   /* Getters and Setters */ \
+  F(DefineAccessor, -1 /* 4 or 5 */, 1) \
   F(LookupAccessor, 3, 1) \
   \
   /* Literals */ \
   F(MaterializeRegExpLiteral, 4, 1)\
+  F(CreateArrayLiteralBoilerplate, 3, 1) \
+  F(CloneLiteralBoilerplate, 1, 1) \
+  F(CloneShallowLiteralBoilerplate, 1, 1) \
   F(CreateObjectLiteral, 4, 1) \
   F(CreateObjectLiteralShallow, 4, 1) \
   F(CreateArrayLiteral, 3, 1) \
@@ -290,17 +296,6 @@
   F(GetConstructTrap, 1, 1) \
   F(Fix, 1, 1) \
   \
-  /* Harmony sets */ \
-  F(SetInitialize, 1, 1) \
-  F(SetAdd, 2, 1) \
-  F(SetHas, 2, 1) \
-  F(SetDelete, 2, 1) \
-  \
-  /* Harmony maps */ \
-  F(MapInitialize, 1, 1) \
-  F(MapGet, 2, 1) \
-  F(MapSet, 3, 1) \
-  \
   /* Harmony weakmaps */ \
   F(WeakMapInitialize, 1, 1) \
   F(WeakMapGet, 2, 1) \
@@ -309,13 +304,12 @@
   /* Statements */ \
   F(NewClosure, 3, 1) \
   F(NewObject, 1, 1) \
-  F(NewObjectFromBound, 1, 1) \
+  F(NewObjectFromBound, 2, 1) \
   F(FinalizeInstanceSize, 1, 1) \
   F(Throw, 1, 1) \
   F(ReThrow, 1, 1) \
   F(ThrowReferenceError, 1, 1) \
   F(StackGuard, 0, 1) \
-  F(Interrupt, 0, 1) \
   F(PromoteScheduledException, 0, 1) \
   \
   /* Contexts */ \
@@ -360,7 +354,6 @@
   F(IS_VAR, 1, 1) \
   \
   /* expose boolean functions from objects-inl.h */ \
-  F(HasFastSmiOnlyElements, 1, 1) \
   F(HasFastElements, 1, 1) \
   F(HasFastDoubleElements, 1, 1) \
   F(HasDictionaryElements, 1, 1) \
@@ -374,9 +367,6 @@
   F(HasExternalUnsignedIntElements, 1, 1) \
   F(HasExternalFloatElements, 1, 1) \
   F(HasExternalDoubleElements, 1, 1) \
-  F(TransitionElementsSmiToDouble, 1, 1) \
-  F(TransitionElementsDoubleToObject, 1, 1) \
-  F(HaveSameMap, 2, 1) \
   /* profiler */ \
   F(ProfilerResume, 0, 1) \
   F(ProfilerPause, 0, 1)
@@ -418,7 +408,6 @@
   F(DebugReferencedBy, 3, 1) \
   F(DebugConstructedBy, 2, 1) \
   F(DebugGetPrototype, 1, 1) \
-  F(DebugSetScriptSource, 2, 1) \
   F(SystemBreak, 0, 1) \
   F(DebugDisassembleFunction, 1, 1) \
   F(DebugDisassembleConstructor, 1, 1) \
@@ -486,13 +475,11 @@
   F(IsNonNegativeSmi, 1, 1)                                                  \
   F(IsArray, 1, 1)                                                           \
   F(IsRegExp, 1, 1)                                                          \
-  F(IsConstructCall, 0, 1)                                                   \
   F(CallFunction, -1 /* receiver + n args + function */, 1)                  \
   F(ArgumentsLength, 0, 1)                                                   \
   F(Arguments, 1, 1)                                                         \
   F(ValueOf, 1, 1)                                                           \
   F(SetValueOf, 2, 1)                                                        \
-  F(DateField, 2 /* date object, field index */, 1)                          \
   F(StringCharFromCode, 1, 1)                                                \
   F(StringCharAt, 2, 1)                                                      \
   F(ObjectEquals, 2, 1)                                                      \
@@ -505,7 +492,6 @@
   F(MathPow, 2, 1)                                                           \
   F(MathSin, 1, 1)                                                           \
   F(MathCos, 1, 1)                                                           \
-  F(MathTan, 1, 1)                                                           \
   F(MathSqrt, 1, 1)                                                          \
   F(MathLog, 1, 1)                                                           \
   F(IsRegExpEquivalent, 2, 1)                                                \
@@ -520,6 +506,7 @@
 // a corresponding runtime function, that is called for slow cases.
 // Entries have the form F(name, number of arguments, number of return values).
 #define INLINE_RUNTIME_FUNCTION_LIST(F) \
+  F(IsConstructCall, 0, 1)                                                   \
   F(ClassOf, 1, 1)                                                           \
   F(StringCharCodeAt, 2, 1)                                                  \
   F(Log, 3, 1)                                                               \
@@ -629,13 +616,6 @@
   // Get the intrinsic function with the given FunctionId.
   static const Function* FunctionForId(FunctionId id);
 
-  static Handle<String> StringReplaceOneCharWithString(Isolate* isolate,
-                                                       Handle<String> subject,
-                                                       Handle<String> search,
-                                                       Handle<String> replace,
-                                                       bool* found,
-                                                       int recursion_limit);
-
   // General-purpose helper functions for runtime system.
   static int StringMatch(Isolate* isolate,
                          Handle<String> sub,
@@ -644,14 +624,16 @@
 
   static bool IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch);
 
-  // TODO(1240886): Some of the following methods are *not* handle safe, but
-  // accept handle arguments. This seems fragile.
+  // TODO(1240886): The following three methods are *not* handle safe,
+  // but accept handle arguments. This seems fragile.
 
   // Support getting the characters in a string using [] notation as
   // in Firefox/SpiderMonkey, Safari and Opera.
   MUST_USE_RESULT static MaybeObject* GetElementOrCharAt(Isolate* isolate,
                                                          Handle<Object> object,
                                                          uint32_t index);
+  MUST_USE_RESULT static MaybeObject* GetElement(Handle<Object> object,
+                                                 uint32_t index);
 
   MUST_USE_RESULT static MaybeObject* SetObjectProperty(
       Isolate* isolate,
@@ -685,21 +667,17 @@
 
   // Helper functions used stubs.
   static void PerformGC(Object* result);
-
-  // Used in runtime.cc and hydrogen's VisitArrayLiteral.
-  static Handle<Object> CreateArrayLiteralBoilerplate(
-      Isolate* isolate,
-      Handle<FixedArray> literals,
-      Handle<FixedArray> elements);
 };
 
 
 //---------------------------------------------------------------------------
 // Constants used by interface to runtime functions.
 
-class DeclareGlobalsEvalFlag:     public BitField<bool,         0, 1> {};
-class DeclareGlobalsNativeFlag:   public BitField<bool,         1, 1> {};
-class DeclareGlobalsLanguageMode: public BitField<LanguageMode, 2, 2> {};
+enum kDeclareGlobalsFlags {
+  kDeclareGlobalsEvalFlag = 1 << 0,
+  kDeclareGlobalsStrictModeFlag = 1 << 1,
+  kDeclareGlobalsNativeFlag = 1 << 2
+};
 
 } }  // namespace v8::internal
 
diff --git a/src/runtime.js b/src/runtime.js
index 53d9a39..14ff1b6 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -39,16 +39,16 @@
    -----------------------------------
 */
 
-// The following declarations are shared with other native JS files.
-// They are all declared at this one spot to avoid redeclaration errors.
-var $Object = global.Object;
-var $Array = global.Array;
-var $String = global.String;
-var $Number = global.Number;
-var $Function = global.Function;
-var $Boolean = global.Boolean;
-var $NaN = 0/0;
-var builtins = this;
+// The following const declarations are shared with other native JS files.
+// They are all declared at this one spot to avoid const redeclaration errors.
+const $Object = global.Object;
+const $Array = global.Array;
+const $String = global.String;
+const $Number = global.Number;
+const $Function = global.Function;
+const $Boolean = global.Boolean;
+const $NaN = 0/0;
+const builtins = this;
 
 // ECMA-262 Section 11.9.3.
 function EQUALS(y) {
@@ -355,7 +355,7 @@
   if (!IS_SPEC_OBJECT(x)) {
     throw %MakeTypeError('invalid_in_operator_use', [this, x]);
   }
-  return %_IsNonNegativeSmi(this) ?
+  return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
     %HasElement(x, this) : %HasProperty(x, %ToString(this));
 }
 
@@ -375,12 +375,6 @@
     return 1;
   }
 
-  // Check if function is bound, if so, get [[BoundFunction]] from it
-  // and use that instead of F.
-  var bindings = %BoundFunctionGetBindings(F);
-  if (bindings) {
-    F = bindings[kBoundFunctionIndex];  // Always a non-bound function.
-  }
   // Get the prototype of F; if it is not an object, throw an error.
   var O = F.prototype;
   if (!IS_SPEC_OBJECT(O)) {
@@ -392,6 +386,13 @@
 }
 
 
+// Get an array of property keys for the given object. Used in
+// for-in statements.
+function GET_KEYS() {
+  return %GetPropertyNames(this);
+}
+
+
 // Filter a given key against an object by checking if the object
 // has a property with the given key; return the key as a string if
 // it has. Otherwise returns 0 (smi). Used in for-in statements.
@@ -428,10 +429,20 @@
 }
 
 
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() {
-  var proxy = this;
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) {
+  var arity = %_ArgumentsLength() - 1;
   var trap = %GetConstructTrap(proxy);
-  return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
+  var receiver = void 0;
+  if (!IS_UNDEFINED(trap)) {
+    trap = %GetCallTrap(proxy);
+    var proto = proxy.prototype;
+    if (!IS_SPEC_OBJECT(proto) && proto !== null) {
+      throw MakeTypeError("proto_object_or_null", [proto]);
+    }
+    receiver = new global.Object();
+    receiver.__proto__ = proto;
+  }
+  return %Apply(trap, this, arguments, 1, arity);
 }
 
 
@@ -458,12 +469,11 @@
   }
 
   if (!IS_SPEC_FUNCTION(this)) {
-    throw %MakeTypeError('apply_non_function',
-                         [ %ToString(this), typeof this ]);
+    throw %MakeTypeError('apply_non_function', [ %ToString(this), typeof this ]);
   }
 
   // Make sure the arguments list has the right type.
-  if (args != null && !IS_SPEC_OBJECT(args)) {
+  if (args != null && !IS_ARRAY(args) && !IS_ARGUMENTS(args)) {
     throw %MakeTypeError('apply_wrong_args', []);
   }
 
diff --git a/src/scanner-character-streams.cc b/src/scanner-character-streams.cc
index 56b9f03..ee10703 100644
--- a/src/scanner-character-streams.cc
+++ b/src/scanner-character-streams.cc
@@ -36,19 +36,19 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// BufferedUtf16CharacterStreams
+// BufferedUC16CharacterStreams
 
-BufferedUtf16CharacterStream::BufferedUtf16CharacterStream()
-    : Utf16CharacterStream(),
+BufferedUC16CharacterStream::BufferedUC16CharacterStream()
+    : UC16CharacterStream(),
       pushback_limit_(NULL) {
   // Initialize buffer as being empty. First read will fill the buffer.
   buffer_cursor_ = buffer_;
   buffer_end_ = buffer_;
 }
 
-BufferedUtf16CharacterStream::~BufferedUtf16CharacterStream() { }
+BufferedUC16CharacterStream::~BufferedUC16CharacterStream() { }
 
-void BufferedUtf16CharacterStream::PushBack(uc32 character) {
+void BufferedUC16CharacterStream::PushBack(uc32 character) {
   if (character == kEndOfInput) {
     pos_--;
     return;
@@ -63,7 +63,7 @@
 }
 
 
-void BufferedUtf16CharacterStream::SlowPushBack(uc16 character) {
+void BufferedUC16CharacterStream::SlowPushBack(uc16 character) {
   // In pushback mode, the end of the buffer contains pushback,
   // and the start of the buffer (from buffer start to pushback_limit_)
   // contains valid data that comes just after the pushback.
@@ -89,7 +89,7 @@
 }
 
 
-bool BufferedUtf16CharacterStream::ReadBlock() {
+bool BufferedUC16CharacterStream::ReadBlock() {
   buffer_cursor_ = buffer_;
   if (pushback_limit_ != NULL) {
     // Leave pushback mode.
@@ -106,7 +106,7 @@
 }
 
 
-unsigned BufferedUtf16CharacterStream::SlowSeekForward(unsigned delta) {
+unsigned BufferedUC16CharacterStream::SlowSeekForward(unsigned delta) {
   // Leave pushback mode (i.e., ignore that there might be valid data
   // in the buffer before the pushback_limit_ point).
   pushback_limit_ = NULL;
@@ -114,10 +114,10 @@
 }
 
 // ----------------------------------------------------------------------------
-// GenericStringUtf16CharacterStream
+// GenericStringUC16CharacterStream
 
 
-GenericStringUtf16CharacterStream::GenericStringUtf16CharacterStream(
+GenericStringUC16CharacterStream::GenericStringUC16CharacterStream(
     Handle<String> data,
     unsigned start_position,
     unsigned end_position)
@@ -130,10 +130,10 @@
 }
 
 
-GenericStringUtf16CharacterStream::~GenericStringUtf16CharacterStream() { }
+GenericStringUC16CharacterStream::~GenericStringUC16CharacterStream() { }
 
 
-unsigned GenericStringUtf16CharacterStream::BufferSeekForward(unsigned delta) {
+unsigned GenericStringUC16CharacterStream::BufferSeekForward(unsigned delta) {
   unsigned old_pos = pos_;
   pos_ = Min(pos_ + delta, length_);
   ReadBlock();
@@ -141,7 +141,7 @@
 }
 
 
-unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos,
+unsigned GenericStringUC16CharacterStream::FillBuffer(unsigned from_pos,
                                                       unsigned length) {
   if (from_pos >= length_) return 0;
   if (from_pos + length > length_) {
@@ -153,10 +153,10 @@
 
 
 // ----------------------------------------------------------------------------
-// Utf8ToUtf16CharacterStream
-Utf8ToUtf16CharacterStream::Utf8ToUtf16CharacterStream(const byte* data,
-                                                       unsigned length)
-    : BufferedUtf16CharacterStream(),
+// Utf8ToUC16CharacterStream
+Utf8ToUC16CharacterStream::Utf8ToUC16CharacterStream(const byte* data,
+                                                     unsigned length)
+    : BufferedUC16CharacterStream(),
       raw_data_(data),
       raw_data_length_(length),
       raw_data_pos_(0),
@@ -165,10 +165,10 @@
 }
 
 
-Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
+Utf8ToUC16CharacterStream::~Utf8ToUC16CharacterStream() { }
 
 
-unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
+unsigned Utf8ToUC16CharacterStream::BufferSeekForward(unsigned delta) {
   unsigned old_pos = pos_;
   unsigned target_pos = pos_ + delta;
   SetRawPosition(target_pos);
@@ -178,9 +178,9 @@
 }
 
 
-unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position,
-                                                unsigned length) {
-  static const unibrow::uchar kMaxUtf16Character = 0xffff;
+unsigned Utf8ToUC16CharacterStream::FillBuffer(unsigned char_position,
+                                               unsigned length) {
+  static const unibrow::uchar kMaxUC16Character = 0xffff;
   SetRawPosition(char_position);
   if (raw_character_position_ != char_position) {
     // char_position was not a valid position in the stream (hit the end
@@ -188,7 +188,7 @@
     return 0u;
   }
   unsigned i = 0;
-  while (i < length - 1) {
+  while (i < length) {
     if (raw_data_pos_ == raw_data_length_) break;
     unibrow::uchar c = raw_data_[raw_data_pos_];
     if (c <= unibrow::Utf8::kMaxOneByteChar) {
@@ -197,13 +197,12 @@
       c =  unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
                                          raw_data_length_ - raw_data_pos_,
                                          &raw_data_pos_);
+      // Don't allow characters outside of the BMP.
+      if (c > kMaxUC16Character) {
+        c = unibrow::Utf8::kBadChar;
+      }
     }
-    if (c > kMaxUtf16Character) {
-      buffer_[i++] = unibrow::Utf16::LeadSurrogate(c);
-      buffer_[i++] = unibrow::Utf16::TrailSurrogate(c);
-    } else {
-      buffer_[i++] = static_cast<uc16>(c);
-    }
+    buffer_[i++] = static_cast<uc16>(c);
   }
   raw_character_position_ = char_position + i;
   return i;
@@ -267,52 +266,37 @@
 }
 
 
-// This can't set a raw position between two surrogate pairs, since there
-// is no position in the UTF8 stream that corresponds to that.  This assumes
-// that the surrogate pair is correctly coded as a 4 byte UTF-8 sequence.  If
-// it is illegally coded as two 3 byte sequences then there is no problem here.
-void Utf8ToUtf16CharacterStream::SetRawPosition(unsigned target_position) {
+void Utf8ToUC16CharacterStream::SetRawPosition(unsigned target_position) {
   if (raw_character_position_ > target_position) {
     // Spool backwards in utf8 buffer.
     do {
-      int old_pos = raw_data_pos_;
       Utf8CharacterBack(raw_data_, &raw_data_pos_);
       raw_character_position_--;
-      ASSERT(old_pos - raw_data_pos_ <= 4);
-      // Step back over both code units for surrogate pairs.
-      if (old_pos - raw_data_pos_ == 4) raw_character_position_--;
     } while (raw_character_position_ > target_position);
-    // No surrogate pair splitting.
-    ASSERT(raw_character_position_ == target_position);
     return;
   }
   // Spool forwards in the utf8 buffer.
   while (raw_character_position_ < target_position) {
     if (raw_data_pos_ == raw_data_length_) return;
-    int old_pos = raw_data_pos_;
     Utf8CharacterForward(raw_data_, &raw_data_pos_);
     raw_character_position_++;
-    ASSERT(raw_data_pos_ - old_pos <= 4);
-    if (raw_data_pos_ - old_pos == 4) raw_character_position_++;
   }
-  // No surrogate pair splitting.
-  ASSERT(raw_character_position_ == target_position);
 }
 
 
 // ----------------------------------------------------------------------------
-// ExternalTwoByteStringUtf16CharacterStream
+// ExternalTwoByteStringUC16CharacterStream
 
-ExternalTwoByteStringUtf16CharacterStream::
-    ~ExternalTwoByteStringUtf16CharacterStream() { }
+ExternalTwoByteStringUC16CharacterStream::
+    ~ExternalTwoByteStringUC16CharacterStream() { }
 
 
-ExternalTwoByteStringUtf16CharacterStream
-    ::ExternalTwoByteStringUtf16CharacterStream(
+ExternalTwoByteStringUC16CharacterStream
+    ::ExternalTwoByteStringUC16CharacterStream(
         Handle<ExternalTwoByteString> data,
         int start_position,
         int end_position)
-    : Utf16CharacterStream(),
+    : UC16CharacterStream(),
       source_(data),
       raw_data_(data->GetTwoByteData(start_position)) {
   buffer_cursor_ = raw_data_,
diff --git a/src/scanner-character-streams.h b/src/scanner-character-streams.h
index 319ee8f..5c4ea2c 100644
--- a/src/scanner-character-streams.h
+++ b/src/scanner-character-streams.h
@@ -36,10 +36,10 @@
 // A buffered character stream based on a random access character
 // source (ReadBlock can be called with pos_ pointing to any position,
 // even positions before the current).
-class BufferedUtf16CharacterStream: public Utf16CharacterStream {
+class BufferedUC16CharacterStream: public UC16CharacterStream {
  public:
-  BufferedUtf16CharacterStream();
-  virtual ~BufferedUtf16CharacterStream();
+  BufferedUC16CharacterStream();
+  virtual ~BufferedUC16CharacterStream();
 
   virtual void PushBack(uc32 character);
 
@@ -60,12 +60,12 @@
 
 
 // Generic string stream.
-class GenericStringUtf16CharacterStream: public BufferedUtf16CharacterStream {
+class GenericStringUC16CharacterStream: public BufferedUC16CharacterStream {
  public:
-  GenericStringUtf16CharacterStream(Handle<String> data,
-                                    unsigned start_position,
-                                    unsigned end_position);
-  virtual ~GenericStringUtf16CharacterStream();
+  GenericStringUC16CharacterStream(Handle<String> data,
+                                   unsigned start_position,
+                                   unsigned end_position);
+  virtual ~GenericStringUC16CharacterStream();
 
  protected:
   virtual unsigned BufferSeekForward(unsigned delta);
@@ -77,11 +77,11 @@
 };
 
 
-// Utf16 stream based on a literal UTF-8 string.
-class Utf8ToUtf16CharacterStream: public BufferedUtf16CharacterStream {
+// UC16 stream based on a literal UTF-8 string.
+class Utf8ToUC16CharacterStream: public BufferedUC16CharacterStream {
  public:
-  Utf8ToUtf16CharacterStream(const byte* data, unsigned length);
-  virtual ~Utf8ToUtf16CharacterStream();
+  Utf8ToUC16CharacterStream(const byte* data, unsigned length);
+  virtual ~Utf8ToUC16CharacterStream();
 
  protected:
   virtual unsigned BufferSeekForward(unsigned delta);
@@ -98,12 +98,12 @@
 
 
 // UTF16 buffer to read characters from an external string.
-class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
+class ExternalTwoByteStringUC16CharacterStream: public UC16CharacterStream {
  public:
-  ExternalTwoByteStringUtf16CharacterStream(Handle<ExternalTwoByteString> data,
-                                            int start_position,
-                                            int end_position);
-  virtual ~ExternalTwoByteStringUtf16CharacterStream();
+  ExternalTwoByteStringUC16CharacterStream(Handle<ExternalTwoByteString> data,
+                                           int start_position,
+                                           int end_position);
+  virtual ~ExternalTwoByteStringUC16CharacterStream();
 
   virtual void PushBack(uc32 character) {
     ASSERT(buffer_cursor_ > raw_data_);
diff --git a/src/scanner.cc b/src/scanner.cc
old mode 100755
new mode 100644
index 7901b5d..69ea8ae
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -36,26 +36,29 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
+// Scanner::LiteralScope
+
+Scanner::LiteralScope::LiteralScope(Scanner* self)
+    : scanner_(self), complete_(false) {
+  self->StartLiteral();
+}
+
+
+Scanner::LiteralScope::~LiteralScope() {
+  if (!complete_) scanner_->DropLiteral();
+}
+
+
+void Scanner::LiteralScope::Complete() {
+  scanner_->TerminateLiteral();
+  complete_ = true;
+}
+
+// ----------------------------------------------------------------------------
 // Scanner
 
 Scanner::Scanner(UnicodeCache* unicode_cache)
-    : unicode_cache_(unicode_cache),
-      octal_pos_(Location::invalid()),
-      harmony_scoping_(false),
-      harmony_modules_(false) { }
-
-
-void Scanner::Initialize(Utf16CharacterStream* source) {
-  source_ = source;
-  // Need to capture identifiers in order to recognize "get" and "set"
-  // in object literals.
-  Init();
-  // Skip initial whitespace allowing HTML comment ends just like
-  // after a newline and scan first token.
-  has_line_terminator_before_next_ = true;
-  SkipWhiteSpace();
-  Scan();
-}
+    : unicode_cache_(unicode_cache) { }
 
 
 uc32 Scanner::ScanHexNumber(int expected_length) {
@@ -85,6 +88,29 @@
 }
 
 
+
+// ----------------------------------------------------------------------------
+// JavaScriptScanner
+
+JavaScriptScanner::JavaScriptScanner(UnicodeCache* scanner_contants)
+    : Scanner(scanner_contants),
+      octal_pos_(Location::invalid()),
+      harmony_block_scoping_(false) { }
+
+
+void JavaScriptScanner::Initialize(UC16CharacterStream* source) {
+  source_ = source;
+  // Need to capture identifiers in order to recognize "get" and "set"
+  // in object literals.
+  Init();
+  // Skip initial whitespace allowing HTML comment ends just like
+  // after a newline and scan first token.
+  has_line_terminator_before_next_ = true;
+  SkipWhiteSpace();
+  Scan();
+}
+
+
 // Ensure that tokens can be stored in a byte.
 STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
 
@@ -221,7 +247,7 @@
 };
 
 
-Token::Value Scanner::Next() {
+Token::Value JavaScriptScanner::Next() {
   current_ = next_;
   has_line_terminator_before_next_ = false;
   has_multiline_comment_before_next_ = false;
@@ -253,7 +279,7 @@
 }
 
 
-bool Scanner::SkipWhiteSpace() {
+bool JavaScriptScanner::SkipWhiteSpace() {
   int start_position = source_pos();
 
   while (true) {
@@ -293,7 +319,7 @@
 }
 
 
-Token::Value Scanner::SkipSingleLineComment() {
+Token::Value JavaScriptScanner::SkipSingleLineComment() {
   Advance();
 
   // The line terminator at the end of the line is not considered
@@ -309,7 +335,7 @@
 }
 
 
-Token::Value Scanner::SkipMultiLineComment() {
+Token::Value JavaScriptScanner::SkipMultiLineComment() {
   ASSERT(c0_ == '*');
   Advance();
 
@@ -335,7 +361,7 @@
 }
 
 
-Token::Value Scanner::ScanHtmlComment() {
+Token::Value JavaScriptScanner::ScanHtmlComment() {
   // Check for <!-- comments.
   ASSERT(c0_ == '!');
   Advance();
@@ -350,7 +376,7 @@
 }
 
 
-void Scanner::Scan() {
+void JavaScriptScanner::Scan() {
   next_.literal_chars = NULL;
   Token::Value token;
   do {
@@ -590,7 +616,7 @@
 }
 
 
-void Scanner::SeekForward(int pos) {
+void JavaScriptScanner::SeekForward(int pos) {
   // After this call, we will have the token at the given position as
   // the "next" token. The "current" token will be invalid.
   if (pos == next_.location.beg_pos) return;
@@ -611,7 +637,7 @@
 }
 
 
-void Scanner::ScanEscape() {
+void JavaScriptScanner::ScanEscape() {
   uc32 c = c0_;
   Advance();
 
@@ -663,7 +689,7 @@
 
 // Octal escapes of the forms '\0xx' and '\xxx' are not a part of
 // ECMA-262. Other JS VMs support them.
-uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
+uc32 JavaScriptScanner::ScanOctalEscape(uc32 c, int length) {
   uc32 x = c - '0';
   int i = 0;
   for (; i < length; i++) {
@@ -686,7 +712,7 @@
 }
 
 
-Token::Value Scanner::ScanString() {
+Token::Value JavaScriptScanner::ScanString() {
   uc32 quote = c0_;
   Advance();  // consume quote
 
@@ -710,13 +736,13 @@
 }
 
 
-void Scanner::ScanDecimalDigits() {
+void JavaScriptScanner::ScanDecimalDigits() {
   while (IsDecimalDigit(c0_))
     AddLiteralCharAdvance();
 }
 
 
-Token::Value Scanner::ScanNumber(bool seen_period) {
+Token::Value JavaScriptScanner::ScanNumber(bool seen_period) {
   ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
 
   enum { DECIMAL, HEX, OCTAL } kind = DECIMAL;
@@ -801,7 +827,7 @@
 }
 
 
-uc32 Scanner::ScanIdentifierUnicodeEscape() {
+uc32 JavaScriptScanner::ScanIdentifierUnicodeEscape() {
   Advance();
   if (c0_ != 'u') return -1;
   Advance();
@@ -831,8 +857,7 @@
   KEYWORD_GROUP('e')                                                \
   KEYWORD("else", Token::ELSE)                                      \
   KEYWORD("enum", Token::FUTURE_RESERVED_WORD)                      \
-  KEYWORD("export", harmony_modules                                 \
-                    ? Token::EXPORT : Token::FUTURE_RESERVED_WORD)  \
+  KEYWORD("export", Token::FUTURE_RESERVED_WORD)                    \
   KEYWORD("extends", Token::FUTURE_RESERVED_WORD)                   \
   KEYWORD_GROUP('f')                                                \
   KEYWORD("false", Token::FALSE_LITERAL)                            \
@@ -842,13 +867,12 @@
   KEYWORD_GROUP('i')                                                \
   KEYWORD("if", Token::IF)                                          \
   KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD)         \
-  KEYWORD("import", harmony_modules                                 \
-                    ? Token::IMPORT : Token::FUTURE_RESERVED_WORD)  \
+  KEYWORD("import", Token::FUTURE_RESERVED_WORD)                    \
   KEYWORD("in", Token::IN)                                          \
   KEYWORD("instanceof", Token::INSTANCEOF)                          \
   KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD)          \
   KEYWORD_GROUP('l')                                                \
-  KEYWORD("let", harmony_scoping                                    \
+  KEYWORD("let", harmony_block_scoping                              \
                  ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
   KEYWORD_GROUP('n')                                                \
   KEYWORD("new", Token::NEW)                                        \
@@ -882,8 +906,7 @@
 
 static Token::Value KeywordOrIdentifierToken(const char* input,
                                              int input_length,
-                                             bool harmony_scoping,
-                                             bool harmony_modules) {
+                                             bool harmony_block_scoping) {
   ASSERT(input_length >= 1);
   const int kMinLength = 2;
   const int kMaxLength = 10;
@@ -921,7 +944,7 @@
 }
 
 
-Token::Value Scanner::ScanIdentifierOrKeyword() {
+Token::Value JavaScriptScanner::ScanIdentifierOrKeyword() {
   ASSERT(unicode_cache_->IsIdentifierStart(c0_));
   LiteralScope literal(this);
   // Scan identifier start character.
@@ -959,15 +982,14 @@
     Vector<const char> chars = next_.literal_chars->ascii_literal();
     return KeywordOrIdentifierToken(chars.start(),
                                     chars.length(),
-                                    harmony_scoping_,
-                                    harmony_modules_);
+                                    harmony_block_scoping_);
   }
 
   return Token::IDENTIFIER;
 }
 
 
-Token::Value Scanner::ScanIdentifierSuffix(LiteralScope* literal) {
+Token::Value JavaScriptScanner::ScanIdentifierSuffix(LiteralScope* literal) {
   // Scan the rest of the identifier characters.
   while (unicode_cache_->IsIdentifierPart(c0_)) {
     if (c0_ == '\\') {
@@ -990,7 +1012,7 @@
 }
 
 
-bool Scanner::ScanRegExpPattern(bool seen_equal) {
+bool JavaScriptScanner::ScanRegExpPattern(bool seen_equal) {
   // Scan: ('/' | '/=') RegularExpressionBody '/' RegularExpressionFlags
   bool in_character_class = false;
 
@@ -1037,7 +1059,7 @@
 }
 
 
-bool Scanner::ScanLiteralUnicodeEscape() {
+bool JavaScriptScanner::ScanLiteralUnicodeEscape() {
   ASSERT(c0_ == '\\');
   uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
   Advance();
@@ -1067,7 +1089,7 @@
 }
 
 
-bool Scanner::ScanRegExpFlags() {
+bool JavaScriptScanner::ScanRegExpFlags() {
   // Scan regular expression flags.
   LiteralScope literal(this);
   while (unicode_cache_->IsIdentifierPart(c0_)) {
diff --git a/src/scanner.h b/src/scanner.h
index 045e7d2..16c3a42 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -41,26 +41,6 @@
 namespace v8 {
 namespace internal {
 
-
-// General collection of (multi-)bit-flags that can be passed to scanners and
-// parsers to signify their (initial) mode of operation.
-enum ParsingFlags {
-  kNoParsingFlags = 0,
-  // Embed LanguageMode values in parsing flags, i.e., equivalent to:
-  // CLASSIC_MODE = 0,
-  // STRICT_MODE,
-  // EXTENDED_MODE,
-  kLanguageModeMask = 0x03,
-  kAllowLazy = 0x04,
-  kAllowNativesSyntax = 0x08,
-  kAllowModules = 0x10
-};
-
-STATIC_ASSERT((kLanguageModeMask & CLASSIC_MODE) == CLASSIC_MODE);
-STATIC_ASSERT((kLanguageModeMask & STRICT_MODE) == STRICT_MODE);
-STATIC_ASSERT((kLanguageModeMask & EXTENDED_MODE) == EXTENDED_MODE);
-
-
 // Returns the value (0 .. 15) of a hexadecimal character c.
 // If c is not a legal hexadecimal character, returns a value < 0.
 inline int HexValue(uc32 c) {
@@ -73,17 +53,15 @@
 
 
 // ---------------------------------------------------------------------
-// Buffered stream of UTF-16 code units, using an internal UTF-16 buffer.
-// A code unit is a 16 bit value representing either a 16 bit code point
-// or one part of a surrogate pair that make a single 21 bit code point.
+// Buffered stream of characters, using an internal UC16 buffer.
 
-class Utf16CharacterStream {
+class UC16CharacterStream {
  public:
-  Utf16CharacterStream() : pos_(0) { }
-  virtual ~Utf16CharacterStream() { }
+  UC16CharacterStream() : pos_(0) { }
+  virtual ~UC16CharacterStream() { }
 
-  // Returns and advances past the next UTF-16 code unit in the input
-  // stream. If there are no more code units, it returns a negative
+  // Returns and advances past the next UC16 character in the input
+  // stream. If there are no more characters, it returns a negative
   // value.
   inline uc32 Advance() {
     if (buffer_cursor_ < buffer_end_ || ReadBlock()) {
@@ -92,47 +70,47 @@
     }
     // Note: currently the following increment is necessary to avoid a
     // parser problem! The scanner treats the final kEndOfInput as
-    // a code unit with a position, and does math relative to that
+    // a character with a position, and does math relative to that
     // position.
     pos_++;
 
     return kEndOfInput;
   }
 
-  // Return the current position in the code unit stream.
+  // Return the current position in the character stream.
   // Starts at zero.
   inline unsigned pos() const { return pos_; }
 
-  // Skips forward past the next code_unit_count UTF-16 code units
+  // Skips forward past the next character_count UC16 characters
   // in the input, or until the end of input if that comes sooner.
-  // Returns the number of code units actually skipped. If less
-  // than code_unit_count,
-  inline unsigned SeekForward(unsigned code_unit_count) {
+  // Returns the number of characters actually skipped. If less
+  // than character_count,
+  inline unsigned SeekForward(unsigned character_count) {
     unsigned buffered_chars =
         static_cast<unsigned>(buffer_end_ - buffer_cursor_);
-    if (code_unit_count <= buffered_chars) {
-      buffer_cursor_ += code_unit_count;
-      pos_ += code_unit_count;
-      return code_unit_count;
+    if (character_count <= buffered_chars) {
+      buffer_cursor_ += character_count;
+      pos_ += character_count;
+      return character_count;
     }
-    return SlowSeekForward(code_unit_count);
+    return SlowSeekForward(character_count);
   }
 
-  // Pushes back the most recently read UTF-16 code unit (or negative
+  // Pushes back the most recently read UC16 character (or negative
   // value if at end of input), i.e., the value returned by the most recent
   // call to Advance.
   // Must not be used right after calling SeekForward.
-  virtual void PushBack(int32_t code_unit) = 0;
+  virtual void PushBack(int32_t character) = 0;
 
  protected:
   static const uc32 kEndOfInput = -1;
 
-  // Ensures that the buffer_cursor_ points to the code_unit at
+  // Ensures that the buffer_cursor_ points to the character at
   // position pos_ of the input, if possible. If the position
   // is at or after the end of the input, return false. If there
-  // are more code_units available, return true.
+  // are more characters available, return true.
   virtual bool ReadBlock() = 0;
-  virtual unsigned SlowSeekForward(unsigned code_unit_count) = 0;
+  virtual unsigned SlowSeekForward(unsigned character_count) = 0;
 
   const uc16* buffer_cursor_;
   const uc16* buffer_end_;
@@ -180,24 +158,23 @@
     }
   }
 
-  INLINE(void AddChar(uint32_t code_unit)) {
+  inline void AddChar(uc16 character) {
     if (position_ >= backing_store_.length()) ExpandBuffer();
     if (is_ascii_) {
-      if (code_unit < kMaxAsciiCharCodeU) {
-        backing_store_[position_] = static_cast<byte>(code_unit);
+      if (character < kMaxAsciiCharCodeU) {
+        backing_store_[position_] = static_cast<byte>(character);
         position_ += kASCIISize;
         return;
       }
-      ConvertToUtf16();
+      ConvertToUC16();
     }
-    ASSERT(code_unit < 0x10000u);
-    *reinterpret_cast<uc16*>(&backing_store_[position_]) = code_unit;
+    *reinterpret_cast<uc16*>(&backing_store_[position_]) = character;
     position_ += kUC16Size;
   }
 
   bool is_ascii() { return is_ascii_; }
 
-  Vector<const uc16> utf16_literal() {
+  Vector<const uc16> uc16_literal() {
     ASSERT(!is_ascii_);
     ASSERT((position_ & 0x1) == 0);
     return Vector<const uc16>(
@@ -239,13 +216,13 @@
     backing_store_ = new_store;
   }
 
-  void ConvertToUtf16() {
+  void ConvertToUC16() {
     ASSERT(is_ascii_);
     Vector<byte> new_store;
     int new_content_size = position_ * kUC16Size;
     if (new_content_size >= backing_store_.length()) {
-      // Ensure room for all currently read code units as UC16 as well
-      // as the code unit about to be stored.
+      // Ensure room for all currently read characters as UC16 as well
+      // as the character about to be stored.
       new_store = Vector<byte>::New(NewCapacity(new_content_size));
     } else {
       new_store = backing_store_;
@@ -272,32 +249,35 @@
 
 
 // ----------------------------------------------------------------------------
-// JavaScript Scanner.
+// Scanner base-class.
 
+// Generic functionality used by both JSON and JavaScript scanners.
 class Scanner {
  public:
-  // Scoped helper for literal recording. Automatically drops the literal
-  // if aborting the scanning before it's complete.
+  // -1 is outside of the range of any real source code.
+  static const int kNoOctalLocation = -1;
+
+  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+
   class LiteralScope {
    public:
-    explicit LiteralScope(Scanner* self)
-        : scanner_(self), complete_(false) {
-      scanner_->StartLiteral();
-    }
-     ~LiteralScope() {
-       if (!complete_) scanner_->DropLiteral();
-     }
-    void Complete() {
-      scanner_->TerminateLiteral();
-      complete_ = true;
-    }
+    explicit LiteralScope(Scanner* self);
+    ~LiteralScope();
+    void Complete();
 
    private:
     Scanner* scanner_;
     bool complete_;
   };
 
-  // Representation of an interval of source positions.
+  explicit Scanner(UnicodeCache* scanner_contants);
+
+  // Returns the current token again.
+  Token::Value current_token() { return current_.token; }
+
+  // One token look-ahead (past the token returned by Next()).
+  Token::Value peek() const { return next_.token; }
+
   struct Location {
     Location(int b, int e) : beg_pos(b), end_pos(e) { }
     Location() : beg_pos(0), end_pos(0) { }
@@ -312,39 +292,28 @@
     int end_pos;
   };
 
-  // -1 is outside of the range of any real source code.
-  static const int kNoOctalLocation = -1;
-
-  typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
-  explicit Scanner(UnicodeCache* scanner_contants);
-
-  void Initialize(Utf16CharacterStream* source);
-
-  // Returns the next token and advances input.
-  Token::Value Next();
-  // Returns the current token again.
-  Token::Value current_token() { return current_.token; }
   // Returns the location information for the current token
-  // (the token last returned by Next()).
+  // (the token returned by Next()).
   Location location() const { return current_.location; }
+  Location peek_location() const { return next_.location; }
+
   // Returns the literal string, if any, for the current token (the
-  // token last returned by Next()). The string is 0-terminated.
-  // Literal strings are collected for identifiers, strings, and
-  // numbers.
+  // token returned by Next()). The string is 0-terminated and in
+  // UTF-8 format; they may contain 0-characters. Literal strings are
+  // collected for identifiers, strings, and numbers.
   // These functions only give the correct result if the literal
   // was scanned between calls to StartLiteral() and TerminateLiteral().
+  bool is_literal_ascii() {
+    ASSERT_NOT_NULL(current_.literal_chars);
+    return current_.literal_chars->is_ascii();
+  }
   Vector<const char> literal_ascii_string() {
     ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->ascii_literal();
   }
-  Vector<const uc16> literal_utf16_string() {
+  Vector<const uc16> literal_uc16_string() {
     ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->utf16_literal();
-  }
-  bool is_literal_ascii() {
-    ASSERT_NOT_NULL(current_.literal_chars);
-    return current_.literal_chars->is_ascii();
+    return current_.literal_chars->uc16_literal();
   }
   int literal_length() const {
     ASSERT_NOT_NULL(current_.literal_chars);
@@ -361,26 +330,19 @@
     return current_.literal_chars->length() != source_length;
   }
 
-  // Similar functions for the upcoming token.
-
-  // One token look-ahead (past the token returned by Next()).
-  Token::Value peek() const { return next_.token; }
-
-  Location peek_location() const { return next_.location; }
-
   // Returns the literal string for the next token (the token that
   // would be returned if Next() were called).
+  bool is_next_literal_ascii() {
+    ASSERT_NOT_NULL(next_.literal_chars);
+    return next_.literal_chars->is_ascii();
+  }
   Vector<const char> next_literal_ascii_string() {
     ASSERT_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->ascii_literal();
   }
-  Vector<const uc16> next_literal_utf16_string() {
+  Vector<const uc16> next_literal_uc16_string() {
     ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->utf16_literal();
-  }
-  bool is_next_literal_ascii() {
-    ASSERT_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->is_ascii();
+    return next_.literal_chars->uc16_literal();
   }
   int next_literal_length() const {
     ASSERT_NOT_NULL(next_.literal_chars);
@@ -391,52 +353,7 @@
 
   static const int kCharacterLookaheadBufferSize = 1;
 
-  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
-  uc32 ScanOctalEscape(uc32 c, int length);
-
-  // Returns the location of the last seen octal literal.
-  Location octal_position() const { return octal_pos_; }
-  void clear_octal_position() { octal_pos_ = Location::invalid(); }
-
-  // Seek forward to the given position.  This operation does not
-  // work in general, for instance when there are pushed back
-  // characters, but works for seeking forward until simple delimiter
-  // tokens, which is what it is used for.
-  void SeekForward(int pos);
-
-  bool HarmonyScoping() const {
-    return harmony_scoping_;
-  }
-  void SetHarmonyScoping(bool scoping) {
-    harmony_scoping_ = scoping;
-  }
-  bool HarmonyModules() const {
-    return harmony_modules_;
-  }
-  void SetHarmonyModules(bool modules) {
-    harmony_modules_ = modules;
-  }
-
-
-  // Returns true if there was a line terminator before the peek'ed token,
-  // possibly inside a multi-line comment.
-  bool HasAnyLineTerminatorBeforeNext() const {
-    return has_line_terminator_before_next_ ||
-           has_multiline_comment_before_next_;
-  }
-
-  // Scans the input as a regular expression pattern, previous
-  // character(s) must be /(=). Returns true if a pattern is scanned.
-  bool ScanRegExpPattern(bool seen_equal);
-  // Returns true if regexp flags are scanned (always since flags can
-  // be empty).
-  bool ScanRegExpFlags();
-
-  // Tells whether the buffer contains an identifier (no escapes).
-  // Used for checking if a property name is an identifier.
-  static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
- private:
+ protected:
   // The current and look-ahead token.
   struct TokenDesc {
     Token::Value token;
@@ -461,7 +378,7 @@
     next_.literal_chars = free_buffer;
   }
 
-  INLINE(void AddLiteralChar(uc32 c)) {
+  inline void AddLiteralChar(uc32 c) {
     ASSERT_NOT_NULL(next_.literal_chars);
     next_.literal_chars->AddChar(c);
   }
@@ -506,31 +423,6 @@
 
   uc32 ScanHexNumber(int expected_length);
 
-  // Scans a single JavaScript token.
-  void Scan();
-
-  bool SkipWhiteSpace();
-  Token::Value SkipSingleLineComment();
-  Token::Value SkipMultiLineComment();
-  // Scans a possible HTML comment -- begins with '<!'.
-  Token::Value ScanHtmlComment();
-
-  void ScanDecimalDigits();
-  Token::Value ScanNumber(bool seen_period);
-  Token::Value ScanIdentifierOrKeyword();
-  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
-
-  void ScanEscape();
-  Token::Value ScanString();
-
-  // Decodes a unicode escape-sequence which is part of an identifier.
-  // If the escape sequence cannot be decoded the result is kBadChar.
-  uc32 ScanIdentifierUnicodeEscape();
-  // Recognizes a uniocde escape-sequence and adds its characters,
-  // uninterpreted, to the current literal. Used for parsing RegExp
-  // flags.
-  bool ScanLiteralUnicodeEscape();
-
   // Return the current source position.
   int source_pos() {
     return source_->pos() - kCharacterLookaheadBufferSize;
@@ -545,15 +437,115 @@
   TokenDesc current_;  // desc for current token (as returned by Next())
   TokenDesc next_;     // desc for next token (one token look-ahead)
 
-  // Input stream. Must be initialized to an Utf16CharacterStream.
-  Utf16CharacterStream* source_;
-
-
-  // Start position of the octal literal last scanned.
-  Location octal_pos_;
+  // Input stream. Must be initialized to an UC16CharacterStream.
+  UC16CharacterStream* source_;
 
   // One Unicode character look-ahead; c0_ < 0 at the end of the input.
   uc32 c0_;
+};
+
+// ----------------------------------------------------------------------------
+// JavaScriptScanner - base logic for JavaScript scanning.
+
+class JavaScriptScanner : public Scanner {
+ public:
+  // A LiteralScope that disables recording of some types of JavaScript
+  // literals. If the scanner is configured to not record the specific
+  // type of literal, the scope will not call StartLiteral.
+  class LiteralScope {
+   public:
+    explicit LiteralScope(JavaScriptScanner* self)
+        : scanner_(self), complete_(false) {
+      scanner_->StartLiteral();
+    }
+     ~LiteralScope() {
+       if (!complete_) scanner_->DropLiteral();
+     }
+    void Complete() {
+      scanner_->TerminateLiteral();
+      complete_ = true;
+    }
+
+   private:
+    JavaScriptScanner* scanner_;
+    bool complete_;
+  };
+
+  explicit JavaScriptScanner(UnicodeCache* scanner_contants);
+
+  void Initialize(UC16CharacterStream* source);
+
+  // Returns the next token.
+  Token::Value Next();
+
+  // Returns true if there was a line terminator before the peek'ed token,
+  // possibly inside a multi-line comment.
+  bool HasAnyLineTerminatorBeforeNext() const {
+    return has_line_terminator_before_next_ ||
+           has_multiline_comment_before_next_;
+  }
+
+  // Scans the input as a regular expression pattern, previous
+  // character(s) must be /(=). Returns true if a pattern is scanned.
+  bool ScanRegExpPattern(bool seen_equal);
+  // Returns true if regexp flags are scanned (always since flags can
+  // be empty).
+  bool ScanRegExpFlags();
+
+  // Tells whether the buffer contains an identifier (no escapes).
+  // Used for checking if a property name is an identifier.
+  static bool IsIdentifier(unibrow::CharacterStream* buffer);
+
+  // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+  uc32 ScanOctalEscape(uc32 c, int length);
+
+  // Returns the location of the last seen octal literal
+  Location octal_position() const { return octal_pos_; }
+  void clear_octal_position() { octal_pos_ = Location::invalid(); }
+
+  // Seek forward to the given position.  This operation does not
+  // work in general, for instance when there are pushed back
+  // characters, but works for seeking forward until simple delimiter
+  // tokens, which is what it is used for.
+  void SeekForward(int pos);
+
+  bool HarmonyBlockScoping() const {
+    return harmony_block_scoping_;
+  }
+  void SetHarmonyBlockScoping(bool block_scoping) {
+    harmony_block_scoping_ = block_scoping;
+  }
+
+
+ protected:
+  bool SkipWhiteSpace();
+  Token::Value SkipSingleLineComment();
+  Token::Value SkipMultiLineComment();
+
+  // Scans a single JavaScript token.
+  void Scan();
+
+  void ScanDecimalDigits();
+  Token::Value ScanNumber(bool seen_period);
+  Token::Value ScanIdentifierOrKeyword();
+  Token::Value ScanIdentifierSuffix(LiteralScope* literal);
+
+  void ScanEscape();
+  Token::Value ScanString();
+
+  // Scans a possible HTML comment -- begins with '<!'.
+  Token::Value ScanHtmlComment();
+
+  // Decodes a unicode escape-sequence which is part of an identifier.
+  // If the escape sequence cannot be decoded the result is kBadChar.
+  uc32 ScanIdentifierUnicodeEscape();
+  // Recognizes a uniocde escape-sequence and adds its characters,
+  // uninterpreted, to the current literal. Used for parsing RegExp
+  // flags.
+  bool ScanLiteralUnicodeEscape();
+
+  // Start position of the octal literal last scanned.
+  Location octal_pos_;
 
   // Whether there is a line terminator whitespace character after
   // the current token, and  before the next. Does not count newlines
@@ -562,10 +554,9 @@
   // Whether there is a multi-line comment that contains a
   // line-terminator after the current token, and before the next.
   bool has_multiline_comment_before_next_;
-  // Whether we scan 'let' as a keyword for harmony block-scoped let bindings.
-  bool harmony_scoping_;
-  // Whether we scan 'module', 'import', 'export' as keywords.
-  bool harmony_modules_;
+  // Whether we scan 'let' as a keyword for harmony block scoped
+  // let bindings.
+  bool harmony_block_scoping_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index 0f36234..ad31ca4 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -38,297 +38,456 @@
 namespace internal {
 
 
-Handle<ScopeInfo> ScopeInfo::Create(Scope* scope) {
-  // Collect stack and context locals.
-  ZoneList<Variable*> stack_locals(scope->StackLocalCount());
-  ZoneList<Variable*> context_locals(scope->ContextLocalCount());
-  scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
-  const int stack_local_count = stack_locals.length();
-  const int context_local_count = context_locals.length();
-  // Make sure we allocate the correct amount.
-  ASSERT(scope->StackLocalCount() == stack_local_count);
-  ASSERT(scope->ContextLocalCount() == context_local_count);
+static int CompareLocal(Variable* const* v, Variable* const* w) {
+  int x = (*v)->index();
+  int y = (*w)->index();
+  // Consider sorting them according to type as well?
+  return x - y;
+}
 
-  // Determine use and location of the function variable if it is present.
-  FunctionVariableInfo function_name_info;
-  VariableMode function_variable_mode;
-  if (scope->is_function_scope() && scope->function() != NULL) {
-    Variable* var = scope->function()->var();
-    if (!var->is_used()) {
-      function_name_info = UNUSED;
-    } else if (var->IsContextSlot()) {
-      function_name_info = CONTEXT;
-    } else {
-      ASSERT(var->IsStackLocal());
-      function_name_info = STACK;
-    }
-    function_variable_mode = var->mode();
-  } else {
-    function_name_info = NONE;
-    function_variable_mode = VAR;
-  }
 
-  const bool has_function_name = function_name_info != NONE;
-  const int parameter_count = scope->num_parameters();
-  const int length = kVariablePartIndex
-      + parameter_count + stack_local_count + 2 * context_local_count
-      + (has_function_name ? 2 : 0);
-
-  Handle<ScopeInfo> scope_info = FACTORY->NewScopeInfo(length);
-
-  // Encode the flags.
-  int flags = TypeField::encode(scope->type()) |
-      CallsEvalField::encode(scope->calls_eval()) |
-      LanguageModeField::encode(scope->language_mode()) |
-      FunctionVariableField::encode(function_name_info) |
-      FunctionVariableMode::encode(function_variable_mode);
-  scope_info->SetFlags(flags);
-  scope_info->SetParameterCount(parameter_count);
-  scope_info->SetStackLocalCount(stack_local_count);
-  scope_info->SetContextLocalCount(context_local_count);
-
-  int index = kVariablePartIndex;
+template<class Allocator>
+ScopeInfo<Allocator>::ScopeInfo(Scope* scope)
+    : function_name_(FACTORY->empty_symbol()),
+      calls_eval_(scope->calls_eval()),
+      is_strict_mode_(scope->is_strict_mode()),
+      parameters_(scope->num_parameters()),
+      stack_slots_(scope->num_stack_slots()),
+      context_slots_(scope->num_heap_slots()),
+      context_modes_(scope->num_heap_slots()) {
   // Add parameters.
-  ASSERT(index == scope_info->ParameterEntriesIndex());
-  for (int i = 0; i < parameter_count; ++i) {
-    scope_info->set(index++, *scope->parameter(i)->name());
+  for (int i = 0; i < scope->num_parameters(); i++) {
+    ASSERT(parameters_.length() == i);
+    parameters_.Add(scope->parameter(i)->name());
   }
 
-  // Add stack locals' names. We are assuming that the stack locals'
-  // slots are allocated in increasing order, so we can simply add
-  // them to the ScopeInfo object.
-  ASSERT(index == scope_info->StackLocalEntriesIndex());
-  for (int i = 0; i < stack_local_count; ++i) {
-    ASSERT(stack_locals[i]->index() == i);
-    scope_info->set(index++, *stack_locals[i]->name());
-  }
+  // Add stack locals and collect heap locals.
+  // We are assuming that the locals' slots are allocated in
+  // increasing order, so we can simply add them to the
+  // ScopeInfo lists. However, due to usage analysis, this is
+  // not true for context-allocated locals: Some of them
+  // may be parameters which are allocated before the
+  // non-parameter locals. When the non-parameter locals are
+  // sorted according to usage, the allocated slot indices may
+  // not be in increasing order with the variable list anymore.
+  // Thus, we first collect the context-allocated locals, and then
+  // sort them by context slot index before adding them to the
+  // ScopeInfo list.
+  List<Variable*, Allocator> locals(32);  // 32 is a wild guess
+  ASSERT(locals.is_empty());
+  scope->CollectUsedVariables(&locals);
+  locals.Sort(&CompareLocal);
 
-  // Due to usage analysis, context-allocated locals are not necessarily in
-  // increasing order: Some of them may be parameters which are allocated before
-  // the non-parameter locals. When the non-parameter locals are sorted
-  // according to usage, the allocated slot indices may not be in increasing
-  // order with the variable list anymore. Thus, we first need to sort them by
-  // context slot index before adding them to the ScopeInfo object.
-  context_locals.Sort(&Variable::CompareIndex);
+  List<Variable*, Allocator> heap_locals(locals.length());
+  for (int i = 0; i < locals.length(); i++) {
+    Variable* var = locals[i];
+    if (var->is_used()) {
+      switch (var->location()) {
+        case Variable::UNALLOCATED:
+        case Variable::PARAMETER:
+          break;
 
-  // Add context locals' names.
-  ASSERT(index == scope_info->ContextLocalNameEntriesIndex());
-  for (int i = 0; i < context_local_count; ++i) {
-    scope_info->set(index++, *context_locals[i]->name());
-  }
+        case Variable::LOCAL:
+          ASSERT(stack_slots_.length() == var->index());
+          stack_slots_.Add(var->name());
+          break;
 
-  // Add context locals' info.
-  ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
-  for (int i = 0; i < context_local_count; ++i) {
-    Variable* var = context_locals[i];
-    uint32_t value = ContextLocalMode::encode(var->mode()) |
-        ContextLocalInitFlag::encode(var->initialization_flag());
-    scope_info->set(index++, Smi::FromInt(value));
-  }
+        case Variable::CONTEXT:
+          heap_locals.Add(var);
+          break;
 
-  // If present, add the function variable name and its index.
-  ASSERT(index == scope_info->FunctionNameEntryIndex());
-  if (has_function_name) {
-    int var_index = scope->function()->var()->index();
-    scope_info->set(index++, *scope->function()->name());
-    scope_info->set(index++, Smi::FromInt(var_index));
-    ASSERT(function_name_info != STACK ||
-           (var_index == scope_info->StackLocalCount() &&
-            var_index == scope_info->StackSlotCount() - 1));
-    ASSERT(function_name_info != CONTEXT ||
-           var_index == scope_info->ContextLength() - 1);
-  }
-
-  ASSERT(index == scope_info->length());
-  ASSERT(scope->num_parameters() == scope_info->ParameterCount());
-  ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
-  ASSERT(scope->num_heap_slots() == scope_info->ContextLength());
-  return scope_info;
-}
-
-
-ScopeInfo* ScopeInfo::Empty() {
-  return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array());
-}
-
-
-ScopeType ScopeInfo::Type() {
-  ASSERT(length() > 0);
-  return TypeField::decode(Flags());
-}
-
-
-bool ScopeInfo::CallsEval() {
-  return length() > 0 && CallsEvalField::decode(Flags());
-}
-
-
-LanguageMode ScopeInfo::language_mode() {
-  return length() > 0 ? LanguageModeField::decode(Flags()) : CLASSIC_MODE;
-}
-
-
-int ScopeInfo::LocalCount() {
-  return StackLocalCount() + ContextLocalCount();
-}
-
-
-int ScopeInfo::StackSlotCount() {
-  if (length() > 0) {
-    bool function_name_stack_slot =
-        FunctionVariableField::decode(Flags()) == STACK;
-    return StackLocalCount() + (function_name_stack_slot ? 1 : 0);
-  }
-  return 0;
-}
-
-
-int ScopeInfo::ContextLength() {
-  if (length() > 0) {
-    int context_locals = ContextLocalCount();
-    bool function_name_context_slot =
-        FunctionVariableField::decode(Flags()) == CONTEXT;
-    bool has_context = context_locals > 0 ||
-        function_name_context_slot ||
-        Type() == WITH_SCOPE ||
-        (Type() == FUNCTION_SCOPE && CallsEval());
-    if (has_context) {
-      return Context::MIN_CONTEXT_SLOTS + context_locals +
-          (function_name_context_slot ? 1 : 0);
+        case Variable::LOOKUP:
+          // We don't expect lookup variables in the locals list.
+          UNREACHABLE();
+          break;
+      }
     }
   }
+
+  // Add heap locals.
+  if (scope->num_heap_slots() > 0) {
+    // Add user-defined slots.
+    for (int i = 0; i < heap_locals.length(); i++) {
+      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_slots_.length());
+      ASSERT(heap_locals[i]->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_modes_.length());
+      context_slots_.Add(heap_locals[i]->name());
+      context_modes_.Add(heap_locals[i]->mode());
+    }
+
+  } else {
+    ASSERT(heap_locals.length() == 0);
+  }
+
+  // Add the function context slot, if present.
+  // For now, this must happen at the very end because of the
+  // ordering of the scope info slots and the respective slot indices.
+  if (scope->is_function_scope()) {
+    VariableProxy* proxy = scope->function();
+    if (proxy != NULL &&
+        proxy->var()->is_used() &&
+        proxy->var()->IsContextSlot()) {
+      function_name_ = proxy->name();
+      // Note that we must not find the function name in the context slot
+      // list - instead it must be handled separately in the
+      // Contexts::Lookup() function. Thus record an empty symbol here so we
+      // get the correct number of context slots.
+      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_slots_.length());
+      ASSERT(proxy->var()->index() - Context::MIN_CONTEXT_SLOTS ==
+             context_modes_.length());
+      context_slots_.Add(FACTORY->empty_symbol());
+      context_modes_.Add(Variable::INTERNAL);
+    }
+  }
+}
+
+
+// Encoding format in a FixedArray object:
+//
+// - function name
+//
+// - calls eval boolean flag
+//
+// - number of variables in the context object (smi) (= function context
+//   slot index + 1)
+// - list of pairs (name, Var mode) of context-allocated variables (starting
+//   with context slot 0)
+//
+// - number of parameters (smi)
+// - list of parameter names (starting with parameter 0 first)
+//
+// - number of variables on the stack (smi)
+// - list of names of stack-allocated variables (starting with stack slot 0)
+
+// The ScopeInfo representation could be simplified and the ScopeInfo
+// re-implemented (with almost the same interface). Here is a
+// suggestion for the new format:
+//
+// - have a single list with all variable names (parameters, stack locals,
+//   context locals), followed by a list of non-Object* values containing
+//   the variables information (what kind, index, attributes)
+// - searching the linear list of names is fast and yields an index into the
+//   list if the variable name is found
+// - that list index is then used to find the variable information in the
+//   subsequent list
+// - the list entries don't have to be in any particular order, so all the
+//   current sorting business can go away
+// - the ScopeInfo lookup routines can be reduced to perhaps a single lookup
+//   which returns all information at once
+// - when gathering the information from a Scope, we only need to iterate
+//   through the local variables (parameters and context info is already
+//   present)
+
+
+static inline Object** ReadInt(Object** p, int* x) {
+  *x = (reinterpret_cast<Smi*>(*p++))->value();
+  return p;
+}
+
+
+static inline Object** ReadBool(Object** p, bool* x) {
+  *x = (reinterpret_cast<Smi*>(*p++))->value() != 0;
+  return p;
+}
+
+
+static inline Object** ReadSymbol(Object** p, Handle<String>* s) {
+  *s = Handle<String>(reinterpret_cast<String*>(*p++));
+  return p;
+}
+
+
+template <class Allocator>
+static Object** ReadList(Object** p, List<Handle<String>, Allocator >* list) {
+  ASSERT(list->is_empty());
+  int n;
+  p = ReadInt(p, &n);
+  while (n-- > 0) {
+    Handle<String> s;
+    p = ReadSymbol(p, &s);
+    list->Add(s);
+  }
+  return p;
+}
+
+
+template <class Allocator>
+static Object** ReadList(Object** p,
+                         List<Handle<String>, Allocator>* list,
+                         List<Variable::Mode, Allocator>* modes) {
+  ASSERT(list->is_empty());
+  int n;
+  p = ReadInt(p, &n);
+  while (n-- > 0) {
+    Handle<String> s;
+    int m;
+    p = ReadSymbol(p, &s);
+    p = ReadInt(p, &m);
+    list->Add(s);
+    modes->Add(static_cast<Variable::Mode>(m));
+  }
+  return p;
+}
+
+
+template<class Allocator>
+ScopeInfo<Allocator>::ScopeInfo(SerializedScopeInfo* data)
+  : function_name_(FACTORY->empty_symbol()),
+    parameters_(4),
+    stack_slots_(8),
+    context_slots_(8),
+    context_modes_(8) {
+  if (data->length() > 0) {
+    Object** p0 = data->data_start();
+    Object** p = p0;
+    p = ReadSymbol(p, &function_name_);
+    p = ReadBool(p, &calls_eval_);
+    p = ReadBool(p, &is_strict_mode_);
+    p = ReadList<Allocator>(p, &context_slots_, &context_modes_);
+    p = ReadList<Allocator>(p, &parameters_);
+    p = ReadList<Allocator>(p, &stack_slots_);
+    ASSERT((p - p0) == FixedArray::cast(data)->length());
+  }
+}
+
+
+static inline Object** WriteInt(Object** p, int x) {
+  *p++ = Smi::FromInt(x);
+  return p;
+}
+
+
+static inline Object** WriteBool(Object** p, bool b) {
+  *p++ = Smi::FromInt(b ? 1 : 0);
+  return p;
+}
+
+
+static inline Object** WriteSymbol(Object** p, Handle<String> s) {
+  *p++ = *s;
+  return p;
+}
+
+
+template <class Allocator>
+static Object** WriteList(Object** p, List<Handle<String>, Allocator >* list) {
+  const int n = list->length();
+  p = WriteInt(p, n);
+  for (int i = 0; i < n; i++) {
+    p = WriteSymbol(p, list->at(i));
+  }
+  return p;
+}
+
+
+template <class Allocator>
+static Object** WriteList(Object** p,
+                          List<Handle<String>, Allocator>* list,
+                          List<Variable::Mode, Allocator>* modes) {
+  const int n = list->length();
+  p = WriteInt(p, n);
+  for (int i = 0; i < n; i++) {
+    p = WriteSymbol(p, list->at(i));
+    p = WriteInt(p, modes->at(i));
+  }
+  return p;
+}
+
+
+template<class Allocator>
+Handle<SerializedScopeInfo> ScopeInfo<Allocator>::Serialize() {
+  // function name, calls eval, is_strict_mode, length for 3 tables:
+  const int extra_slots = 1 + 1 + 1 + 3;
+  int length = extra_slots +
+               context_slots_.length() * 2 +
+               parameters_.length() +
+               stack_slots_.length();
+
+  Handle<SerializedScopeInfo> data(
+      SerializedScopeInfo::cast(*FACTORY->NewSerializedScopeInfo(length)));
+  AssertNoAllocation nogc;
+
+  Object** p0 = data->data_start();
+  Object** p = p0;
+  p = WriteSymbol(p, function_name_);
+  p = WriteBool(p, calls_eval_);
+  p = WriteBool(p, is_strict_mode_);
+  p = WriteList(p, &context_slots_, &context_modes_);
+  p = WriteList(p, &parameters_);
+  p = WriteList(p, &stack_slots_);
+  ASSERT((p - p0) == length);
+
+  return data;
+}
+
+
+template<class Allocator>
+Handle<String> ScopeInfo<Allocator>::LocalName(int i) const {
+  // A local variable can be allocated either on the stack or in the context.
+  // For variables allocated in the context they are always preceded by
+  // Context::MIN_CONTEXT_SLOTS of fixed allocated slots in the context.
+  if (i < number_of_stack_slots()) {
+    return stack_slot_name(i);
+  } else {
+    return context_slot_name(i - number_of_stack_slots() +
+                             Context::MIN_CONTEXT_SLOTS);
+  }
+}
+
+
+template<class Allocator>
+int ScopeInfo<Allocator>::NumberOfLocals() const {
+  int number_of_locals = number_of_stack_slots();
+  if (number_of_context_slots() > 0) {
+    ASSERT(number_of_context_slots() >= Context::MIN_CONTEXT_SLOTS);
+    number_of_locals += number_of_context_slots() - Context::MIN_CONTEXT_SLOTS;
+  }
+  return number_of_locals;
+}
+
+
+Handle<SerializedScopeInfo> SerializedScopeInfo::Create(Scope* scope) {
+  ScopeInfo<ZoneListAllocationPolicy> sinfo(scope);
+  return sinfo.Serialize();
+}
+
+
+SerializedScopeInfo* SerializedScopeInfo::Empty() {
+  return reinterpret_cast<SerializedScopeInfo*>(HEAP->empty_fixed_array());
+}
+
+
+Object** SerializedScopeInfo::ContextEntriesAddr() {
+  ASSERT(length() > 0);
+  // +3 for function name, calls eval, strict mode.
+  return data_start() + 3;
+}
+
+
+Object** SerializedScopeInfo::ParameterEntriesAddr() {
+  ASSERT(length() > 0);
+  Object** p = ContextEntriesAddr();
+  int number_of_context_slots;
+  p = ReadInt(p, &number_of_context_slots);
+  return p + number_of_context_slots*2;  // *2 for pairs
+}
+
+
+Object** SerializedScopeInfo::StackSlotEntriesAddr() {
+  ASSERT(length() > 0);
+  Object** p = ParameterEntriesAddr();
+  int number_of_parameter_slots;
+  p = ReadInt(p, &number_of_parameter_slots);
+  return p + number_of_parameter_slots;
+}
+
+
+bool SerializedScopeInfo::CallsEval() {
+  if (length() > 0) {
+    Object** p = data_start() + 1;  // +1 for function name.
+    bool calls_eval;
+    p = ReadBool(p, &calls_eval);
+    return calls_eval;
+  }
+  return false;
+}
+
+
+bool SerializedScopeInfo::IsStrictMode() {
+  if (length() > 0) {
+    Object** p = data_start() + 2;  // +2 for function name, calls eval.
+    bool strict_mode;
+    p = ReadBool(p, &strict_mode);
+    return strict_mode;
+  }
+  return false;
+}
+
+
+int SerializedScopeInfo::NumberOfStackSlots() {
+  if (length() > 0) {
+    Object** p = StackSlotEntriesAddr();
+    int number_of_stack_slots;
+    ReadInt(p, &number_of_stack_slots);
+    return number_of_stack_slots;
+  }
   return 0;
 }
 
 
-bool ScopeInfo::HasFunctionName() {
+int SerializedScopeInfo::NumberOfContextSlots() {
   if (length() > 0) {
-    return NONE != FunctionVariableField::decode(Flags());
-  } else {
-    return false;
+    Object** p = ContextEntriesAddr();
+    int number_of_context_slots;
+    ReadInt(p, &number_of_context_slots);
+    return number_of_context_slots + Context::MIN_CONTEXT_SLOTS;
   }
+  return 0;
 }
 
 
-bool ScopeInfo::HasHeapAllocatedLocals() {
+bool SerializedScopeInfo::HasHeapAllocatedLocals() {
   if (length() > 0) {
-    return ContextLocalCount() > 0;
-  } else {
-    return false;
+    Object** p = ContextEntriesAddr();
+    int number_of_context_slots;
+    ReadInt(p, &number_of_context_slots);
+    return number_of_context_slots > 0;
   }
+  return false;
 }
 
 
-bool ScopeInfo::HasContext() {
-  if (length() > 0) {
-    return ContextLength() > 0;
-  } else {
-    return false;
-  }
-}
-
-
-String* ScopeInfo::FunctionName() {
-  ASSERT(HasFunctionName());
-  return String::cast(get(FunctionNameEntryIndex()));
-}
-
-
-String* ScopeInfo::ParameterName(int var) {
-  ASSERT(0 <= var && var < ParameterCount());
-  int info_index = ParameterEntriesIndex() + var;
-  return String::cast(get(info_index));
-}
-
-
-String* ScopeInfo::LocalName(int var) {
-  ASSERT(0 <= var && var < LocalCount());
-  ASSERT(StackLocalEntriesIndex() + StackLocalCount() ==
-         ContextLocalNameEntriesIndex());
-  int info_index = StackLocalEntriesIndex() + var;
-  return String::cast(get(info_index));
-}
-
-
-String* ScopeInfo::StackLocalName(int var) {
-  ASSERT(0 <= var && var < StackLocalCount());
-  int info_index = StackLocalEntriesIndex() + var;
-  return String::cast(get(info_index));
-}
-
-
-String* ScopeInfo::ContextLocalName(int var) {
-  ASSERT(0 <= var && var < ContextLocalCount());
-  int info_index = ContextLocalNameEntriesIndex() + var;
-  return String::cast(get(info_index));
-}
-
-
-VariableMode ScopeInfo::ContextLocalMode(int var) {
-  ASSERT(0 <= var && var < ContextLocalCount());
-  int info_index = ContextLocalInfoEntriesIndex() + var;
-  int value = Smi::cast(get(info_index))->value();
-  return ContextLocalMode::decode(value);
-}
-
-
-InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
-  ASSERT(0 <= var && var < ContextLocalCount());
-  int info_index = ContextLocalInfoEntriesIndex() + var;
-  int value = Smi::cast(get(info_index))->value();
-  return ContextLocalInitFlag::decode(value);
-}
-
-
-int ScopeInfo::StackSlotIndex(String* name) {
+int SerializedScopeInfo::StackSlotIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
-    int start = StackLocalEntriesIndex();
-    int end = StackLocalEntriesIndex() + StackLocalCount();
-    for (int i = start; i < end; ++i) {
-      if (name == get(i)) {
-        return i - start;
-      }
+    // Slots start after length entry.
+    Object** p0 = StackSlotEntriesAddr();
+    int number_of_stack_slots;
+    p0 = ReadInt(p0, &number_of_stack_slots);
+    Object** p = p0;
+    Object** end = p0 + number_of_stack_slots;
+    while (p != end) {
+      if (*p == name) return static_cast<int>(p - p0);
+      p++;
     }
   }
   return -1;
 }
 
-
-int ScopeInfo::ContextSlotIndex(String* name,
-                                VariableMode* mode,
-                                InitializationFlag* init_flag) {
+int SerializedScopeInfo::ContextSlotIndex(String* name, Variable::Mode* mode) {
   ASSERT(name->IsSymbol());
-  ASSERT(mode != NULL);
-  ASSERT(init_flag != NULL);
+  Isolate* isolate = GetIsolate();
+  int result = isolate->context_slot_cache()->Lookup(this, name, mode);
+  if (result != ContextSlotCache::kNotFound) return result;
   if (length() > 0) {
-    ContextSlotCache* context_slot_cache = GetIsolate()->context_slot_cache();
-    int result = context_slot_cache->Lookup(this, name, mode, init_flag);
-    if (result != ContextSlotCache::kNotFound) {
-      ASSERT(result < ContextLength());
-      return result;
-    }
-
-    int start = ContextLocalNameEntriesIndex();
-    int end = ContextLocalNameEntriesIndex() + ContextLocalCount();
-    for (int i = start; i < end; ++i) {
-      if (name == get(i)) {
-        int var = i - start;
-        *mode = ContextLocalMode(var);
-        *init_flag = ContextLocalInitFlag(var);
-        result = Context::MIN_CONTEXT_SLOTS + var;
-        context_slot_cache->Update(this, name, *mode, *init_flag, result);
-        ASSERT(result < ContextLength());
+    // Slots start after length entry.
+    Object** p0 = ContextEntriesAddr();
+    int number_of_context_slots;
+    p0 = ReadInt(p0, &number_of_context_slots);
+    Object** p = p0;
+    Object** end = p0 + number_of_context_slots * 2;
+    while (p != end) {
+      if (*p == name) {
+        ASSERT(((p - p0) & 1) == 0);
+        int v;
+        ReadInt(p + 1, &v);
+        Variable::Mode mode_value = static_cast<Variable::Mode>(v);
+        if (mode != NULL) *mode = mode_value;
+        result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
+        isolate->context_slot_cache()->Update(this, name, mode_value, result);
         return result;
       }
+      p += 2;
     }
-    context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
   }
+  isolate->context_slot_cache()->Update(this, name, Variable::INTERNAL, -1);
   return -1;
 }
 
 
-int ScopeInfo::ParameterIndex(String* name) {
+int SerializedScopeInfo::ParameterIndex(String* name) {
   ASSERT(name->IsSymbol());
   if (length() > 0) {
     // We must read parameters from the end since for
@@ -336,58 +495,41 @@
     // last declaration of that parameter is used
     // inside a function (and thus we need to look
     // at the last index). Was bug# 1110337.
-    int start = ParameterEntriesIndex();
-    int end = ParameterEntriesIndex() + ParameterCount();
-    for (int i = end - 1; i >= start; --i) {
-      if (name == get(i)) {
-        return i - start;
-      }
+    //
+    // Eventually, we should only register such parameters
+    // once, with corresponding index. This requires a new
+    // implementation of the ScopeInfo code. See also other
+    // comments in this file regarding this.
+    Object** p = ParameterEntriesAddr();
+    int number_of_parameter_slots;
+    Object** p0 = ReadInt(p, &number_of_parameter_slots);
+    p = p0 + number_of_parameter_slots;
+    while (p > p0) {
+      p--;
+      if (*p == name) return static_cast<int>(p - p0);
     }
   }
   return -1;
 }
 
 
-int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
+int SerializedScopeInfo::FunctionContextSlotIndex(String* name) {
   ASSERT(name->IsSymbol());
-  ASSERT(mode != NULL);
   if (length() > 0) {
-    if (FunctionVariableField::decode(Flags()) == CONTEXT &&
-        FunctionName() == name) {
-      *mode = FunctionVariableMode::decode(Flags());
-      return Smi::cast(get(FunctionNameEntryIndex() + 1))->value();
+    Object** p = data_start();
+    if (*p == name) {
+      p = ContextEntriesAddr();
+      int number_of_context_slots;
+      ReadInt(p, &number_of_context_slots);
+      ASSERT(number_of_context_slots != 0);
+      // The function context slot is the last entry.
+      return number_of_context_slots + Context::MIN_CONTEXT_SLOTS - 1;
     }
   }
   return -1;
 }
 
 
-int ScopeInfo::ParameterEntriesIndex() {
-  ASSERT(length() > 0);
-  return kVariablePartIndex;
-}
-
-
-int ScopeInfo::StackLocalEntriesIndex() {
-  return ParameterEntriesIndex() + ParameterCount();
-}
-
-
-int ScopeInfo::ContextLocalNameEntriesIndex() {
-  return StackLocalEntriesIndex() + StackLocalCount();
-}
-
-
-int ScopeInfo::ContextLocalInfoEntriesIndex() {
-  return ContextLocalNameEntriesIndex() + ContextLocalCount();
-}
-
-
-int ScopeInfo::FunctionNameEntryIndex() {
-  return ContextLocalInfoEntriesIndex() + ContextLocalCount();
-}
-
-
 int ContextSlotCache::Hash(Object* data, String* name) {
   // Uses only lower 32 bits if pointers are larger.
   uintptr_t addr_hash =
@@ -398,14 +540,12 @@
 
 int ContextSlotCache::Lookup(Object* data,
                              String* name,
-                             VariableMode* mode,
-                             InitializationFlag* init_flag) {
+                             Variable::Mode* mode) {
   int index = Hash(data, name);
   Key& key = keys_[index];
   if ((key.data == data) && key.name->Equals(name)) {
     Value result(values_[index]);
     if (mode != NULL) *mode = result.mode();
-    if (init_flag != NULL) *init_flag = result.initialization_flag();
     return result.index() + kNotFound;
   }
   return kNotFound;
@@ -414,8 +554,7 @@
 
 void ContextSlotCache::Update(Object* data,
                               String* name,
-                              VariableMode mode,
-                              InitializationFlag init_flag,
+                              Variable::Mode mode,
                               int slot_index) {
   String* symbol;
   ASSERT(slot_index > kNotFound);
@@ -425,9 +564,9 @@
     key.data = data;
     key.name = symbol;
     // Please note value only takes a uint as index.
-    values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
+    values_[index] = Value(mode, slot_index - kNotFound).raw();
 #ifdef DEBUG
-    ValidateEntry(data, name, mode, init_flag, slot_index);
+    ValidateEntry(data, name, mode, slot_index);
 #endif
   }
 }
@@ -442,8 +581,7 @@
 
 void ContextSlotCache::ValidateEntry(Object* data,
                                      String* name,
-                                     VariableMode mode,
-                                     InitializationFlag init_flag,
+                                     Variable::Mode mode,
                                      int slot_index) {
   String* symbol;
   if (HEAP->LookupSymbolIfExists(name, &symbol)) {
@@ -453,56 +591,51 @@
     ASSERT(key.name->Equals(name));
     Value result(values_[index]);
     ASSERT(result.mode() == mode);
-    ASSERT(result.initialization_flag() == init_flag);
     ASSERT(result.index() + kNotFound == slot_index);
   }
 }
 
 
+template <class Allocator>
 static void PrintList(const char* list_name,
                       int nof_internal_slots,
-                      int start,
-                      int end,
-                      ScopeInfo* scope_info) {
-  if (start < end) {
+                      List<Handle<String>, Allocator>& list) {
+  if (list.length() > 0) {
     PrintF("\n  // %s\n", list_name);
     if (nof_internal_slots > 0) {
       PrintF("  %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
     }
-    for (int i = nof_internal_slots; start < end; ++i, ++start) {
-      PrintF("  %2d ", i);
-      String::cast(scope_info->get(start))->ShortPrint();
+    for (int i = 0; i < list.length(); i++) {
+      PrintF("  %2d ", i + nof_internal_slots);
+      list[i]->ShortPrint();
       PrintF("\n");
     }
   }
 }
 
 
-void ScopeInfo::Print() {
+template<class Allocator>
+void ScopeInfo<Allocator>::Print() {
   PrintF("ScopeInfo ");
-  if (HasFunctionName()) {
-    FunctionName()->ShortPrint();
-  } else {
+  if (function_name_->length() > 0)
+    function_name_->ShortPrint();
+  else
     PrintF("/* no function name */");
-  }
   PrintF("{");
 
-  PrintList("parameters", 0,
-            ParameterEntriesIndex(),
-            ParameterEntriesIndex() + ParameterCount(),
-            this);
-  PrintList("stack slots", 0,
-            StackLocalEntriesIndex(),
-            StackLocalEntriesIndex() + StackLocalCount(),
-            this);
-  PrintList("context slots",
-            Context::MIN_CONTEXT_SLOTS,
-            ContextLocalNameEntriesIndex(),
-            ContextLocalNameEntriesIndex() + ContextLocalCount(),
-            this);
+  PrintList<Allocator>("parameters", 0, parameters_);
+  PrintList<Allocator>("stack slots", 0, stack_slots_);
+  PrintList<Allocator>("context slots", Context::MIN_CONTEXT_SLOTS,
+                       context_slots_);
 
   PrintF("}\n");
 }
 #endif  // DEBUG
 
+
+// Make sure the classes get instantiated by the template system.
+template class ScopeInfo<FreeStoreAllocationPolicy>;
+template class ScopeInfo<PreallocatedStorage>;
+template class ScopeInfo<ZoneListAllocationPolicy>;
+
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 93734f5..40c5c8a 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -35,6 +35,135 @@
 namespace v8 {
 namespace internal {
 
+// Scope information represents information about a functions's
+// scopes (currently only one, because we don't do any inlining)
+// and the allocation of the scope's variables. Scope information
+// is stored in a compressed form in FixedArray objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+//
+// Historical note: In other VMs built by this team, ScopeInfo was
+// usually called DebugInfo since the information was used (among
+// other things) for on-demand debugging (Self, Smalltalk). However,
+// DebugInfo seems misleading, since this information is primarily used
+// in debugging-unrelated contexts.
+
+// Forward defined as
+// template <class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+template<class Allocator>
+class ScopeInfo BASE_EMBEDDED {
+ public:
+  // Create a ScopeInfo instance from a scope.
+  explicit ScopeInfo(Scope* scope);
+
+  // Create a ScopeInfo instance from SerializedScopeInfo.
+  explicit ScopeInfo(SerializedScopeInfo* data);
+
+  // Creates a SerializedScopeInfo holding the serialized scope info.
+  Handle<SerializedScopeInfo> Serialize();
+
+  // --------------------------------------------------------------------------
+  // Lookup
+
+  Handle<String> function_name() const { return function_name_; }
+
+  Handle<String> parameter_name(int i) const { return parameters_[i]; }
+  int number_of_parameters() const { return parameters_.length(); }
+
+  Handle<String> stack_slot_name(int i) const { return stack_slots_[i]; }
+  int number_of_stack_slots() const { return stack_slots_.length(); }
+
+  Handle<String> context_slot_name(int i) const {
+    return context_slots_[i - Context::MIN_CONTEXT_SLOTS];
+  }
+  int number_of_context_slots() const {
+    int l = context_slots_.length();
+    return l == 0 ? 0 : l + Context::MIN_CONTEXT_SLOTS;
+  }
+
+  Handle<String> LocalName(int i) const;
+  int NumberOfLocals() const;
+
+  // --------------------------------------------------------------------------
+  // Debugging support
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+ private:
+  Handle<String> function_name_;
+  bool calls_eval_;
+  bool is_strict_mode_;
+  List<Handle<String>, Allocator > parameters_;
+  List<Handle<String>, Allocator > stack_slots_;
+  List<Handle<String>, Allocator > context_slots_;
+  List<Variable::Mode, Allocator > context_modes_;
+};
+
+
+// This object provides quick access to scope info details for runtime
+// routines w/o the need to explicitly create a ScopeInfo object.
+class SerializedScopeInfo : public FixedArray {
+ public :
+
+  static SerializedScopeInfo* cast(Object* object) {
+    ASSERT(object->IsSerializedScopeInfo());
+    return reinterpret_cast<SerializedScopeInfo*>(object);
+  }
+
+  // Does this scope call eval?
+  bool CallsEval();
+
+  // Is this scope a strict mode scope?
+  bool IsStrictMode();
+
+  // Return the number of stack slots for code.
+  int NumberOfStackSlots();
+
+  // Return the number of context slots for code.
+  int NumberOfContextSlots();
+
+  // Return if this has context slots besides MIN_CONTEXT_SLOTS;
+  bool HasHeapAllocatedLocals();
+
+  // Lookup support for serialized scope info. Returns the
+  // the stack slot index for a given slot name if the slot is
+  // present; otherwise returns a value < 0. The name must be a symbol
+  // (canonicalized).
+  int StackSlotIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the
+  // context slot index for a given slot name if the slot is present; otherwise
+  // returns a value < 0. The name must be a symbol (canonicalized).
+  // If the slot is present and mode != NULL, sets *mode to the corresponding
+  // mode for that variable.
+  int ContextSlotIndex(String* name, Variable::Mode* mode);
+
+  // Lookup support for serialized scope info. Returns the
+  // parameter index for a given parameter name if the parameter is present;
+  // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+  int ParameterIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the
+  // function context slot index if the function name is present (named
+  // function expressions, only), otherwise returns a value < 0. The name
+  // must be a symbol (canonicalized).
+  int FunctionContextSlotIndex(String* name);
+
+  static Handle<SerializedScopeInfo> Create(Scope* scope);
+
+  // Serializes empty scope info.
+  static SerializedScopeInfo* Empty();
+
+ private:
+  inline Object** ContextEntriesAddr();
+
+  inline Object** ParameterEntriesAddr();
+
+  inline Object** StackSlotEntriesAddr();
+};
+
+
 // Cache for mapping (data, property name) into context slot index.
 // The cache contains both positive and negative results.
 // Slot index equals -1 means the property is absent.
@@ -45,14 +174,12 @@
   // If absent, kNotFound is returned.
   int Lookup(Object* data,
              String* name,
-             VariableMode* mode,
-             InitializationFlag* init_flag);
+             Variable::Mode* mode);
 
   // Update an element in the cache.
   void Update(Object* data,
               String* name,
-              VariableMode mode,
-              InitializationFlag init_flag,
+              Variable::Mode mode,
               int slot_index);
 
   // Clear the cache.
@@ -74,8 +201,7 @@
 #ifdef DEBUG
   void ValidateEntry(Object* data,
                      String* name,
-                     VariableMode mode,
-                     InitializationFlag init_flag,
+                     Variable::Mode mode,
                      int slot_index);
 #endif
 
@@ -86,17 +212,11 @@
   };
 
   struct Value {
-    Value(VariableMode mode,
-          InitializationFlag init_flag,
-          int index) {
+    Value(Variable::Mode mode, int index) {
       ASSERT(ModeField::is_valid(mode));
-      ASSERT(InitField::is_valid(init_flag));
       ASSERT(IndexField::is_valid(index));
-      value_ = ModeField::encode(mode) |
-          IndexField::encode(index) |
-          InitField::encode(init_flag);
+      value_ = ModeField::encode(mode) | IndexField::encode(index);
       ASSERT(mode == this->mode());
-      ASSERT(init_flag == this->initialization_flag());
       ASSERT(index == this->index());
     }
 
@@ -104,20 +224,14 @@
 
     uint32_t raw() { return value_; }
 
-    VariableMode mode() { return ModeField::decode(value_); }
-
-    InitializationFlag initialization_flag() {
-      return InitField::decode(value_);
-    }
+    Variable::Mode mode() { return ModeField::decode(value_); }
 
     int index() { return IndexField::decode(value_); }
 
     // Bit fields in value_ (type, shift, size). Must be public so the
     // constants can be embedded in generated code.
-    class ModeField:  public BitField<VariableMode,       0, 3> {};
-    class InitField:  public BitField<InitializationFlag, 3, 1> {};
-    class IndexField: public BitField<int,                4, 32-4> {};
-
+    class ModeField:  public BitField<Variable::Mode, 0, 3> {};
+    class IndexField: public BitField<int,            3, 32-3> {};
    private:
     uint32_t value_;
   };
diff --git a/src/scopes.cc b/src/scopes.cc
index 859cbd1..d5a7a9f 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,7 +31,6 @@
 
 #include "bootstrapper.h"
 #include "compiler.h"
-#include "messages.h"
 #include "scopeinfo.h"
 
 #include "allocation-inl.h"
@@ -40,6 +39,26 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
+// A Zone allocator for use with LocalsMap.
+
+// TODO(isolates): It is probably worth it to change the Allocator class to
+//                 take a pointer to an isolate.
+class ZoneAllocator: public Allocator {
+ public:
+  /* nothing to do */
+  virtual ~ZoneAllocator()  {}
+
+  virtual void* New(size_t size)  { return ZONE->New(static_cast<int>(size)); }
+
+  /* ignored - Zone is freed in one fell swoop */
+  virtual void Delete(void* p)  {}
+};
+
+
+static ZoneAllocator LocalsMapAllocator;
+
+
+// ----------------------------------------------------------------------------
 // Implementation of LocalsMap
 //
 // Note: We are storing the handle locations as key values in the hash map.
@@ -57,36 +76,30 @@
 }
 
 
-VariableMap::VariableMap() : ZoneHashMap(Match, 8) {}
+// Dummy constructor
+VariableMap::VariableMap(bool gotta_love_static_overloading) : HashMap() {}
+
+VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
 VariableMap::~VariableMap() {}
 
 
-Variable* VariableMap::Declare(
-    Scope* scope,
-    Handle<String> name,
-    VariableMode mode,
-    bool is_valid_lhs,
-    Variable::Kind kind,
-    InitializationFlag initialization_flag,
-    Interface* interface) {
-  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true);
+Variable* VariableMap::Declare(Scope* scope,
+                               Handle<String> name,
+                               Variable::Mode mode,
+                               bool is_valid_lhs,
+                               Variable::Kind kind) {
+  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
     ASSERT(p->key == name.location());
-    p->value = new Variable(scope,
-                            name,
-                            mode,
-                            is_valid_lhs,
-                            kind,
-                            initialization_flag,
-                            interface);
+    p->value = new Variable(scope, name, mode, is_valid_lhs, kind);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
 
 
 Variable* VariableMap::Lookup(Handle<String> name) {
-  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false);
+  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
   if (p != NULL) {
     ASSERT(*reinterpret_cast<String**>(p->key) == *name);
     ASSERT(p->value != NULL);
@@ -99,7 +112,22 @@
 // ----------------------------------------------------------------------------
 // Implementation of Scope
 
-Scope::Scope(Scope* outer_scope, ScopeType type)
+
+// Dummy constructor
+Scope::Scope(Type type)
+    : isolate_(Isolate::Current()),
+      inner_scopes_(0),
+      variables_(false),
+      temps_(0),
+      params_(0),
+      unresolved_(0),
+      decls_(0),
+      already_resolved_(false) {
+  SetDefaults(type, NULL, Handle<SerializedScopeInfo>::null());
+}
+
+
+Scope::Scope(Scope* outer_scope, Type type)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
       variables_(),
@@ -107,22 +135,19 @@
       params_(4),
       unresolved_(16),
       decls_(4),
-      interface_(FLAG_harmony_modules &&
-                 (type == MODULE_SCOPE || type == GLOBAL_SCOPE)
-                     ? Interface::NewModule() : NULL),
       already_resolved_(false) {
-  SetDefaults(type, outer_scope, Handle<ScopeInfo>::null());
+  SetDefaults(type, outer_scope, Handle<SerializedScopeInfo>::null());
   // At some point we might want to provide outer scopes to
   // eval scopes (by walking the stack and reading the scope info).
   // In that case, the ASSERT below needs to be adjusted.
-  ASSERT_EQ(type == GLOBAL_SCOPE, outer_scope == NULL);
+  ASSERT((type == GLOBAL_SCOPE || type == EVAL_SCOPE) == (outer_scope == NULL));
   ASSERT(!HasIllegalRedeclaration());
 }
 
 
 Scope::Scope(Scope* inner_scope,
-             ScopeType type,
-             Handle<ScopeInfo> scope_info)
+             Type type,
+             Handle<SerializedScopeInfo> scope_info)
     : isolate_(Isolate::Current()),
       inner_scopes_(4),
       variables_(),
@@ -130,15 +155,12 @@
       params_(4),
       unresolved_(16),
       decls_(4),
-      interface_(NULL),
       already_resolved_(true) {
+  ASSERT(!scope_info.is_null());
   SetDefaults(type, NULL, scope_info);
-  if (!scope_info.is_null()) {
-    num_heap_slots_ = scope_info_->ContextLength();
+  if (scope_info->HasHeapAllocatedLocals()) {
+    num_heap_slots_ = scope_info_->NumberOfContextSlots();
   }
-  // Ensure at least MIN_CONTEXT_SLOTS to indicate a materialized context.
-  num_heap_slots_ = Max(num_heap_slots_,
-                        static_cast<int>(Context::MIN_CONTEXT_SLOTS));
   AddInnerScope(inner_scope);
 }
 
@@ -151,25 +173,22 @@
       params_(0),
       unresolved_(0),
       decls_(0),
-      interface_(NULL),
       already_resolved_(true) {
-  SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
+  SetDefaults(CATCH_SCOPE, NULL, Handle<SerializedScopeInfo>::null());
   AddInnerScope(inner_scope);
   ++num_var_or_const_;
-  num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
   Variable* variable = variables_.Declare(this,
                                           catch_variable_name,
-                                          VAR,
+                                          Variable::VAR,
                                           true,  // Valid left-hand side.
-                                          Variable::NORMAL,
-                                          kCreatedInitialized);
+                                          Variable::NORMAL);
   AllocateHeapSlot(variable);
 }
 
 
-void Scope::SetDefaults(ScopeType type,
+void Scope::SetDefaults(Type type,
                         Scope* outer_scope,
-                        Handle<ScopeInfo> scope_info) {
+                        Handle<SerializedScopeInfo> scope_info) {
   outer_scope_ = outer_scope;
   type_ = type;
   scope_name_ = isolate_->factory()->empty_symbol();
@@ -182,57 +201,53 @@
   scope_contains_with_ = false;
   scope_calls_eval_ = false;
   // Inherit the strict mode from the parent scope.
-  language_mode_ = (outer_scope != NULL)
-      ? outer_scope->language_mode_ : CLASSIC_MODE;
+  strict_mode_ = (outer_scope != NULL) && outer_scope->strict_mode_;
+  outer_scope_calls_eval_ = false;
   outer_scope_calls_non_strict_eval_ = false;
   inner_scope_calls_eval_ = false;
+  outer_scope_is_eval_scope_ = false;
   force_eager_compilation_ = false;
   num_var_or_const_ = 0;
   num_stack_slots_ = 0;
   num_heap_slots_ = 0;
   scope_info_ = scope_info;
-  start_position_ = RelocInfo::kNoPosition;
-  end_position_ = RelocInfo::kNoPosition;
-  if (!scope_info.is_null()) {
-    scope_calls_eval_ = scope_info->CallsEval();
-    language_mode_ = scope_info->language_mode();
-  }
 }
 
 
-Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope) {
+Scope* Scope::DeserializeScopeChain(CompilationInfo* info,
+                                    Scope* global_scope) {
   // Reconstruct the outer scope chain from a closure's context chain.
+  ASSERT(!info->closure().is_null());
+  Context* context = info->closure()->context();
   Scope* current_scope = NULL;
   Scope* innermost_scope = NULL;
   bool contains_with = false;
   while (!context->IsGlobalContext()) {
     if (context->IsWithContext()) {
-      Scope* with_scope = new Scope(current_scope,
-                                    WITH_SCOPE,
-                                    Handle<ScopeInfo>::null());
-      current_scope = with_scope;
       // All the inner scopes are inside a with.
       contains_with = true;
       for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
         s->scope_inside_with_ = true;
       }
-    } else if (context->IsFunctionContext()) {
-      ScopeInfo* scope_info = context->closure()->shared()->scope_info();
-      current_scope = new Scope(current_scope,
-                                FUNCTION_SCOPE,
-                                Handle<ScopeInfo>(scope_info));
-    } else if (context->IsBlockContext()) {
-      ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
-      current_scope = new Scope(current_scope,
-                                BLOCK_SCOPE,
-                                Handle<ScopeInfo>(scope_info));
     } else {
-      ASSERT(context->IsCatchContext());
-      String* name = String::cast(context->extension());
-      current_scope = new Scope(current_scope, Handle<String>(name));
+      if (context->IsFunctionContext()) {
+        SerializedScopeInfo* scope_info =
+            context->closure()->shared()->scope_info();
+        current_scope = new Scope(current_scope, FUNCTION_SCOPE,
+            Handle<SerializedScopeInfo>(scope_info));
+      } else if (context->IsBlockContext()) {
+        SerializedScopeInfo* scope_info =
+            SerializedScopeInfo::cast(context->extension());
+        current_scope = new Scope(current_scope, BLOCK_SCOPE,
+            Handle<SerializedScopeInfo>(scope_info));
+      } else {
+        ASSERT(context->IsCatchContext());
+        String* name = String::cast(context->extension());
+        current_scope = new Scope(current_scope, Handle<String>(name));
+      }
+      if (contains_with) current_scope->RecordWithStatement();
+      if (innermost_scope == NULL) innermost_scope = current_scope;
     }
-    if (contains_with) current_scope->RecordWithStatement();
-    if (innermost_scope == NULL) innermost_scope = current_scope;
 
     // Forget about a with when we move to a context for a different function.
     if (context->previous()->closure() != context->closure()) {
@@ -242,73 +257,39 @@
   }
 
   global_scope->AddInnerScope(current_scope);
-  global_scope->PropagateScopeInfo(false);
   return (innermost_scope == NULL) ? global_scope : innermost_scope;
 }
 
 
 bool Scope::Analyze(CompilationInfo* info) {
   ASSERT(info->function() != NULL);
-  Scope* scope = info->function()->scope();
-  Scope* top = scope;
+  Scope* top = info->function()->scope();
 
-  // Traverse the scope tree up to the first unresolved scope or the global
-  // scope and start scope resolution and variable allocation from that scope.
-  while (!top->is_global_scope() &&
-         !top->outer_scope()->already_resolved()) {
-    top = top->outer_scope();
-  }
-
-  // Allocate the variables.
-  {
-    AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate());
-    if (!top->AllocateVariables(info, &ast_node_factory)) return false;
-  }
+  while (top->outer_scope() != NULL) top = top->outer_scope();
+  top->AllocateVariables(info->calling_context());
 
 #ifdef DEBUG
   if (info->isolate()->bootstrapper()->IsActive()
           ? FLAG_print_builtin_scopes
           : FLAG_print_scopes) {
-    scope->Print();
-  }
-
-  if (FLAG_harmony_modules && FLAG_print_interfaces && top->is_global_scope()) {
-    PrintF("global : ");
-    top->interface()->Print();
+    info->function()->scope()->Print();
   }
 #endif
 
-  if (FLAG_harmony_scoping) {
-    VariableProxy* proxy = scope->CheckAssignmentToConst();
-    if (proxy != NULL) {
-      // Found an assignment to const. Throw a syntax error.
-      MessageLocation location(info->script(),
-                               proxy->position(),
-                               proxy->position());
-      Isolate* isolate = info->isolate();
-      Factory* factory = isolate->factory();
-      Handle<JSArray> array = factory->NewJSArray(0);
-      Handle<Object> result =
-          factory->NewSyntaxError("harmony_const_assign", array);
-      isolate->Throw(*result, &location);
-      return false;
-    }
-  }
-
-  info->SetScope(scope);
-  return true;
+  info->SetScope(info->function()->scope());
+  return true;  // Can not fail.
 }
 
 
-void Scope::Initialize() {
+void Scope::Initialize(bool inside_with) {
   ASSERT(!already_resolved());
 
   // Add this scope as a new inner scope of the outer scope.
   if (outer_scope_ != NULL) {
     outer_scope_->inner_scopes_.Add(this);
-    scope_inside_with_ = outer_scope_->scope_inside_with_ || is_with_scope();
+    scope_inside_with_ = outer_scope_->scope_inside_with_ || inside_with;
   } else {
-    scope_inside_with_ = is_with_scope();
+    scope_inside_with_ = inside_with;
   }
 
   // Declare convenience variables.
@@ -319,19 +300,21 @@
   // instead load them directly from the stack. Currently, the only
   // such parameter is 'this' which is passed on the stack when
   // invoking scripts
-  if (is_declaration_scope()) {
+  if (is_catch_scope() || is_block_scope()) {
+    ASSERT(outer_scope() != NULL);
+    receiver_ = outer_scope()->receiver();
+  } else {
+    ASSERT(is_function_scope() ||
+           is_global_scope() ||
+           is_eval_scope());
     Variable* var =
         variables_.Declare(this,
                            isolate_->factory()->this_symbol(),
-                           VAR,
+                           Variable::VAR,
                            false,
-                           Variable::THIS,
-                           kCreatedInitialized);
+                           Variable::THIS);
     var->AllocateTo(Variable::PARAMETER, -1);
     receiver_ = var;
-  } else {
-    ASSERT(outer_scope() != NULL);
-    receiver_ = outer_scope()->receiver();
   }
 
   if (is_function_scope()) {
@@ -340,10 +323,9 @@
     // allocated during variable allocation.
     variables_.Declare(this,
                        isolate_->factory()->arguments_symbol(),
-                       VAR,
+                       Variable::VAR,
                        true,
-                       Variable::ARGUMENTS,
-                       kCreatedInitialized);
+                       Variable::ARGUMENTS);
   }
 }
 
@@ -383,51 +365,34 @@
     return result;
   }
   // If we have a serialized scope info, we might find the variable there.
+  //
+  // We should never lookup 'arguments' in this scope as it is implicitly
+  // present in every scope.
+  ASSERT(*name != *isolate_->factory()->arguments_symbol());
   // There should be no local slot with the given name.
   ASSERT(scope_info_->StackSlotIndex(*name) < 0);
 
   // Check context slot lookup.
-  VariableMode mode;
-  InitializationFlag init_flag;
-  int index = scope_info_->ContextSlotIndex(*name, &mode, &init_flag);
+  Variable::Mode mode;
+  int index = scope_info_->ContextSlotIndex(*name, &mode);
   if (index < 0) {
     // Check parameters.
-    mode = VAR;
-    init_flag = kCreatedInitialized;
+    mode = Variable::VAR;
     index = scope_info_->ParameterIndex(*name);
-    if (index < 0) return NULL;
+    if (index < 0) {
+      // Check the function name.
+      index = scope_info_->FunctionContextSlotIndex(*name);
+      if (index < 0) return NULL;
+    }
   }
 
   Variable* var =
-      variables_.Declare(this,
-                         name,
-                         mode,
-                         true,
-                         Variable::NORMAL,
-                         init_flag);
+      variables_.Declare(this, name, mode, true, Variable::NORMAL);
   var->AllocateTo(Variable::CONTEXT, index);
   return var;
 }
 
 
-Variable* Scope::LookupFunctionVar(Handle<String> name,
-                                   AstNodeFactory<AstNullVisitor>* factory) {
-  if (function_ != NULL && function_->name().is_identical_to(name)) {
-    return function_->var();
-  } else if (!scope_info_.is_null()) {
-    // If we are backed by a scope info, try to lookup the variable there.
-    VariableMode mode;
-    int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
-    if (index < 0) return NULL;
-    Variable* var = DeclareFunctionVar(name, mode, factory);
-    var->AllocateTo(Variable::CONTEXT, index);
-    return var;
-  } else {
-    return NULL;
-  }
-}
-
-
 Variable* Scope::Lookup(Handle<String> name) {
   for (Scope* scope = this;
        scope != NULL;
@@ -439,41 +404,56 @@
 }
 
 
-void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
+Variable* Scope::DeclareFunctionVar(Handle<String> name) {
+  ASSERT(is_function_scope() && function_ == NULL);
+  Variable* function_var =
+      new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
+  function_ = new(isolate_->zone()) VariableProxy(isolate_, function_var);
+  return function_var;
+}
+
+
+void Scope::DeclareParameter(Handle<String> name, Variable::Mode mode) {
   ASSERT(!already_resolved());
   ASSERT(is_function_scope());
-  Variable* var = variables_.Declare(
-      this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
+  Variable* var =
+      variables_.Declare(this, name, mode, true, Variable::NORMAL);
   params_.Add(var);
 }
 
 
-Variable* Scope::DeclareLocal(Handle<String> name,
-                              VariableMode mode,
-                              InitializationFlag init_flag,
-                              Interface* interface) {
+Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
   ASSERT(!already_resolved());
   // This function handles VAR and CONST modes.  DYNAMIC variables are
   // introduces during variable allocation, INTERNAL variables are allocated
   // explicitly, and TEMPORARY variables are allocated via NewTemporary().
-  ASSERT(mode == VAR ||
-         mode == CONST ||
-         mode == CONST_HARMONY ||
-         mode == LET);
+  ASSERT(mode == Variable::VAR ||
+         mode == Variable::CONST ||
+         mode == Variable::LET);
   ++num_var_or_const_;
-  return variables_.Declare(
-      this, name, mode, true, Variable::NORMAL, init_flag, interface);
+  return variables_.Declare(this, name, mode, true, Variable::NORMAL);
 }
 
 
 Variable* Scope::DeclareGlobal(Handle<String> name) {
   ASSERT(is_global_scope());
-  return variables_.Declare(this,
-                            name,
-                            DYNAMIC_GLOBAL,
+  return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL,
                             true,
-                            Variable::NORMAL,
-                            kCreatedInitialized);
+                            Variable::NORMAL);
+}
+
+
+VariableProxy* Scope::NewUnresolved(Handle<String> name,
+                                    bool inside_with,
+                                    int position) {
+  // Note that we must not share the unresolved variables with
+  // the same name because they may be removed selectively via
+  // RemoveUnresolved().
+  ASSERT(!already_resolved());
+  VariableProxy* proxy = new(isolate_->zone()) VariableProxy(
+      isolate_, name, false, inside_with, position);
+  unresolved_.Add(proxy);
+  return proxy;
 }
 
 
@@ -493,10 +473,9 @@
   ASSERT(!already_resolved());
   Variable* var = new Variable(this,
                                name,
-                               TEMPORARY,
+                               Variable::TEMPORARY,
                                true,
-                               Variable::NORMAL,
-                               kCreatedInitialized);
+                               Variable::NORMAL);
   temps_.Add(var);
   return var;
 }
@@ -526,97 +505,84 @@
   int length = decls_.length();
   for (int i = 0; i < length; i++) {
     Declaration* decl = decls_[i];
-    if (decl->mode() != VAR) continue;
+    if (decl->mode() != Variable::VAR) continue;
     Handle<String> name = decl->proxy()->name();
-
-    // Iterate through all scopes until and including the declaration scope.
-    Scope* previous = NULL;
-    Scope* current = decl->scope();
-    do {
+    bool cond = true;
+    for (Scope* scope = decl->scope(); cond ; scope = scope->outer_scope_) {
       // There is a conflict if there exists a non-VAR binding.
-      Variable* other_var = current->variables_.Lookup(name);
-      if (other_var != NULL && other_var->mode() != VAR) {
+      Variable* other_var = scope->variables_.Lookup(name);
+      if (other_var != NULL && other_var->mode() != Variable::VAR) {
         return decl;
       }
-      previous = current;
-      current = current->outer_scope_;
-    } while (!previous->is_declaration_scope());
-  }
-  return NULL;
-}
 
-
-VariableProxy* Scope::CheckAssignmentToConst() {
-  // Check this scope.
-  if (is_extended_mode()) {
-    for (int i = 0; i < unresolved_.length(); i++) {
-      ASSERT(unresolved_[i]->var() != NULL);
-      if (unresolved_[i]->var()->is_const_mode() &&
-          unresolved_[i]->IsLValue()) {
-        return unresolved_[i];
-      }
+      // Include declaration scope in the iteration but stop after.
+      if (!scope->is_block_scope() && !scope->is_catch_scope()) cond = false;
     }
   }
-
-  // Check inner scopes.
-  for (int i = 0; i < inner_scopes_.length(); i++) {
-    VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
-    if (proxy != NULL) return proxy;
-  }
-
-  // No assignments to const found.
   return NULL;
 }
 
 
-void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
-                                         ZoneList<Variable*>* context_locals) {
-  ASSERT(stack_locals != NULL);
-  ASSERT(context_locals != NULL);
-
-  // Collect temporaries which are always allocated on the stack.
+template<class Allocator>
+void Scope::CollectUsedVariables(List<Variable*, Allocator>* locals) {
+  // Collect variables in this scope.
+  // Note that the function_ variable - if present - is not
+  // collected here but handled separately in ScopeInfo
+  // which is the current user of this function).
   for (int i = 0; i < temps_.length(); i++) {
     Variable* var = temps_[i];
     if (var->is_used()) {
-      ASSERT(var->IsStackLocal());
-      stack_locals->Add(var);
+      locals->Add(var);
     }
   }
-
-  // Collect declared local variables.
   for (VariableMap::Entry* p = variables_.Start();
        p != NULL;
        p = variables_.Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
     if (var->is_used()) {
-      if (var->IsStackLocal()) {
-        stack_locals->Add(var);
-      } else if (var->IsContextSlot()) {
-        context_locals->Add(var);
-      }
+      locals->Add(var);
     }
   }
 }
 
 
-bool Scope::AllocateVariables(CompilationInfo* info,
-                              AstNodeFactory<AstNullVisitor>* factory) {
+// Make sure the method gets instantiated by the template system.
+template void Scope::CollectUsedVariables(
+    List<Variable*, FreeStoreAllocationPolicy>* locals);
+template void Scope::CollectUsedVariables(
+    List<Variable*, PreallocatedStorage>* locals);
+template void Scope::CollectUsedVariables(
+    List<Variable*, ZoneListAllocationPolicy>* locals);
+
+
+void Scope::AllocateVariables(Handle<Context> context) {
+  ASSERT(outer_scope_ == NULL);  // eval or global scopes only
+
   // 1) Propagate scope information.
+  // If we are in an eval scope, we may have other outer scopes about
+  // which we don't know anything at this point. Thus we must be conservative
+  // and assume they may invoke eval themselves. Eventually we could capture
+  // this information in the ScopeInfo and then use it here (by traversing
+  // the call chain stack, at compile time).
+
+  bool eval_scope = is_eval_scope();
+  bool outer_scope_calls_eval = false;
   bool outer_scope_calls_non_strict_eval = false;
-  if (outer_scope_ != NULL) {
-    outer_scope_calls_non_strict_eval =
-        outer_scope_->outer_scope_calls_non_strict_eval() |
-        outer_scope_->calls_non_strict_eval();
+  if (!is_global_scope()) {
+    context->ComputeEvalScopeInfo(&outer_scope_calls_eval,
+                                  &outer_scope_calls_non_strict_eval);
   }
-  PropagateScopeInfo(outer_scope_calls_non_strict_eval);
+  PropagateScopeInfo(outer_scope_calls_eval,
+                     outer_scope_calls_non_strict_eval,
+                     eval_scope);
 
   // 2) Resolve variables.
-  if (!ResolveVariablesRecursively(info, factory)) return false;
+  Scope* global_scope = NULL;
+  if (is_global_scope()) global_scope = this;
+  ResolveVariablesRecursively(global_scope, context);
 
   // 3) Allocate variables.
   AllocateVariablesRecursively();
-
-  return true;
 }
 
 
@@ -661,49 +627,30 @@
 
 Scope* Scope::DeclarationScope() {
   Scope* scope = this;
-  while (!scope->is_declaration_scope()) {
+  while (scope->is_catch_scope() ||
+         scope->is_block_scope()) {
     scope = scope->outer_scope();
   }
   return scope;
 }
 
 
-Handle<ScopeInfo> Scope::GetScopeInfo() {
+Handle<SerializedScopeInfo> Scope::GetSerializedScopeInfo() {
   if (scope_info_.is_null()) {
-    scope_info_ = ScopeInfo::Create(this);
+    scope_info_ = SerializedScopeInfo::Create(this);
   }
   return scope_info_;
 }
 
 
-void Scope::GetNestedScopeChain(
-    List<Handle<ScopeInfo> >* chain,
-    int position) {
-  if (!is_eval_scope()) chain->Add(Handle<ScopeInfo>(GetScopeInfo()));
-
-  for (int i = 0; i < inner_scopes_.length(); i++) {
-    Scope* scope = inner_scopes_[i];
-    int beg_pos = scope->start_position();
-    int end_pos = scope->end_position();
-    ASSERT(beg_pos >= 0 && end_pos >= 0);
-    if (beg_pos <= position && position < end_pos) {
-      scope->GetNestedScopeChain(chain, position);
-      return;
-    }
-  }
-}
-
-
 #ifdef DEBUG
-static const char* Header(ScopeType type) {
+static const char* Header(Scope::Type type) {
   switch (type) {
-    case EVAL_SCOPE: return "eval";
-    case FUNCTION_SCOPE: return "function";
-    case MODULE_SCOPE: return "module";
-    case GLOBAL_SCOPE: return "global";
-    case CATCH_SCOPE: return "catch";
-    case BLOCK_SCOPE: return "block";
-    case WITH_SCOPE: return "with";
+    case Scope::EVAL_SCOPE: return "eval";
+    case Scope::FUNCTION_SCOPE: return "function";
+    case Scope::GLOBAL_SCOPE: return "global";
+    case Scope::CATCH_SCOPE: return "catch";
+    case Scope::BLOCK_SCOPE: return "block";
   }
   UNREACHABLE();
   return NULL;
@@ -748,9 +695,9 @@
     PrintName(var->name());
     PrintF(";  // ");
     PrintLocation(var);
-    if (var->has_forced_context_allocation()) {
+    if (var->is_accessed_from_inner_scope()) {
       if (!var->IsUnallocated()) PrintF(", ");
-      PrintF("forced context allocation");
+      PrintF("inner scope access");
     }
     PrintF("\n");
   }
@@ -786,7 +733,7 @@
     PrintF(")");
   }
 
-  PrintF(" { // (%d, %d)\n", start_position(), end_position());
+  PrintF(" {\n");
 
   // Function name, if any (named function literals, only).
   if (function_ != NULL) {
@@ -799,23 +746,18 @@
   if (HasTrivialOuterContext()) {
     Indent(n1, "// scope has trivial outer context\n");
   }
-  switch (language_mode()) {
-    case CLASSIC_MODE:
-      break;
-    case STRICT_MODE:
-      Indent(n1, "// strict mode scope\n");
-      break;
-    case EXTENDED_MODE:
-      Indent(n1, "// extended mode scope\n");
-      break;
-  }
+  if (is_strict_mode()) Indent(n1, "// strict mode scope\n");
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
   if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
+  if (outer_scope_calls_eval_) Indent(n1, "// outer scope calls 'eval'\n");
   if (outer_scope_calls_non_strict_eval_) {
     Indent(n1, "// outer scope calls 'eval' in non-strict context\n");
   }
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
+  if (outer_scope_is_eval_scope_) {
+    Indent(n1, "// outer scope is 'eval' scope\n");
+  }
   if (num_stack_slots_ > 0) { Indent(n1, "// ");
   PrintF("%d stack slots\n", num_stack_slots_); }
   if (num_heap_slots_ > 0) { Indent(n1, "// ");
@@ -837,9 +779,9 @@
 
   Indent(n1, "// dynamic vars\n");
   if (dynamics_ != NULL) {
-    PrintMap(n1, dynamics_->GetMap(DYNAMIC));
-    PrintMap(n1, dynamics_->GetMap(DYNAMIC_LOCAL));
-    PrintMap(n1, dynamics_->GetMap(DYNAMIC_GLOBAL));
+    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC));
+    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_LOCAL));
+    PrintMap(n1, dynamics_->GetMap(Variable::DYNAMIC_GLOBAL));
   }
 
   // Print inner scopes (disable by providing negative n).
@@ -855,20 +797,13 @@
 #endif  // DEBUG
 
 
-Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
+Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
   if (dynamics_ == NULL) dynamics_ = new DynamicScopePart();
   VariableMap* map = dynamics_->GetMap(mode);
   Variable* var = map->Lookup(name);
   if (var == NULL) {
     // Declare a new non-local.
-    InitializationFlag init_flag = (mode == VAR)
-        ? kCreatedInitialized : kNeedsInitialization;
-    var = map->Declare(NULL,
-                       name,
-                       mode,
-                       true,
-                       Variable::NORMAL,
-                       init_flag);
+    var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
     // Allocate it by giving it a dynamic lookup.
     var->AllocateTo(Variable::LOOKUP, -1);
   }
@@ -876,178 +811,198 @@
 }
 
 
+// Lookup a variable starting with this scope. The result is either
+// the statically resolved variable belonging to an outer scope, or
+// NULL. It may be NULL because a) we couldn't find a variable, or b)
+// because the variable is just a guess (and may be shadowed by
+// another variable that is introduced dynamically via an 'eval' call
+// or a 'with' statement).
 Variable* Scope::LookupRecursive(Handle<String> name,
-                                 BindingKind* binding_kind,
-                                 AstNodeFactory<AstNullVisitor>* factory) {
-  ASSERT(binding_kind != NULL);
+                                 bool from_inner_scope,
+                                 Variable** invalidated_local) {
+  // If we find a variable, but the current scope calls 'eval', the found
+  // variable may not be the correct one (the 'eval' may introduce a
+  // property with the same name). In that case, remember that the variable
+  // found is just a guess.
+  bool guess = scope_calls_eval_;
+
   // Try to find the variable in this scope.
   Variable* var = LocalLookup(name);
 
-  // We found a variable and we are done. (Even if there is an 'eval' in
-  // this scope which introduces the same variable again, the resulting
-  // variable remains the same.)
   if (var != NULL) {
-    *binding_kind = BOUND;
-    return var;
-  }
+    // We found a variable. If this is not an inner lookup, we are done.
+    // (Even if there is an 'eval' in this scope which introduces the
+    // same variable again, the resulting variable remains the same.
+    // Note that enclosing 'with' statements are handled at the call site.)
+    if (!from_inner_scope)
+      return var;
 
-  // We did not find a variable locally. Check against the function variable,
-  // if any. We can do this for all scopes, since the function variable is
-  // only present - if at all - for function scopes.
-  *binding_kind = UNBOUND;
-  var = LookupFunctionVar(name, factory);
-  if (var != NULL) {
-    *binding_kind = BOUND;
-  } else if (outer_scope_ != NULL) {
-    var = outer_scope_->LookupRecursive(name, binding_kind, factory);
-    if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
-      var->ForceContextAllocation();
-    }
   } else {
-    ASSERT(is_global_scope());
+    // We did not find a variable locally. Check against the function variable,
+    // if any. We can do this for all scopes, since the function variable is
+    // only present - if at all - for function scopes.
+    //
+    // This lookup corresponds to a lookup in the "intermediate" scope sitting
+    // between this scope and the outer scope. (ECMA-262, 3rd., requires that
+    // the name of named function literal is kept in an intermediate scope
+    // in between this scope and the next outer scope.)
+    if (function_ != NULL && function_->name().is_identical_to(name)) {
+      var = function_->var();
+
+    } else if (outer_scope_ != NULL) {
+      var = outer_scope_->LookupRecursive(name, true, invalidated_local);
+      // We may have found a variable in an outer scope. However, if
+      // the current scope is inside a 'with', the actual variable may
+      // be a property introduced via the 'with' statement. Then, the
+      // variable we may have found is just a guess.
+      if (scope_inside_with_)
+        guess = true;
+    }
+
+    // If we did not find a variable, we are done.
+    if (var == NULL)
+      return NULL;
   }
 
-  if (is_with_scope()) {
-    // The current scope is a with scope, so the variable binding can not be
-    // statically resolved. However, note that it was necessary to do a lookup
-    // in the outer scope anyway, because if a binding exists in an outer scope,
-    // the associated variable has to be marked as potentially being accessed
-    // from inside of an inner with scope (the property may not be in the 'with'
-    // object).
-    *binding_kind = DYNAMIC_LOOKUP;
-    return NULL;
-  } else if (calls_non_strict_eval()) {
-    // A variable binding may have been found in an outer scope, but the current
-    // scope makes a non-strict 'eval' call, so the found variable may not be
-    // the correct one (the 'eval' may introduce a binding with the same name).
-    // In that case, change the lookup result to reflect this situation.
-    if (*binding_kind == BOUND) {
-      *binding_kind = BOUND_EVAL_SHADOWED;
-    } else if (*binding_kind == UNBOUND) {
-      *binding_kind = UNBOUND_EVAL_SHADOWED;
-    }
+  ASSERT(var != NULL);
+
+  // If this is a lookup from an inner scope, mark the variable.
+  if (from_inner_scope) {
+    var->MarkAsAccessedFromInnerScope();
   }
+
+  // If the variable we have found is just a guess, invalidate the
+  // result. If the found variable is local, record that fact so we
+  // can generate fast code to get it if it is not shadowed by eval.
+  if (guess) {
+    if (!var->is_global()) *invalidated_local = var;
+    var = NULL;
+  }
+
   return var;
 }
 
 
-bool Scope::ResolveVariable(CompilationInfo* info,
-                            VariableProxy* proxy,
-                            AstNodeFactory<AstNullVisitor>* factory) {
-  ASSERT(info->global_scope()->is_global_scope());
+void Scope::ResolveVariable(Scope* global_scope,
+                            Handle<Context> context,
+                            VariableProxy* proxy) {
+  ASSERT(global_scope == NULL || global_scope->is_global_scope());
 
   // If the proxy is already resolved there's nothing to do
   // (functions and consts may be resolved by the parser).
-  if (proxy->var() != NULL) return true;
+  if (proxy->var() != NULL) return;
 
   // Otherwise, try to resolve the variable.
-  BindingKind binding_kind;
-  Variable* var = LookupRecursive(proxy->name(), &binding_kind, factory);
-  switch (binding_kind) {
-    case BOUND:
-      // We found a variable binding.
-      break;
+  Variable* invalidated_local = NULL;
+  Variable* var = LookupRecursive(proxy->name(), false, &invalidated_local);
 
-    case BOUND_EVAL_SHADOWED:
-      // We found a variable variable binding that might be shadowed
-      // by 'eval' introduced variable bindings.
-      if (var->is_global()) {
-        var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+  if (proxy->inside_with()) {
+    // If we are inside a local 'with' statement, all bets are off
+    // and we cannot resolve the proxy to a local variable even if
+    // we found an outer matching variable.
+    // Note that we must do a lookup anyway, because if we find one,
+    // we must mark that variable as potentially accessed from this
+    // inner scope (the property may not be in the 'with' object).
+    var = NonLocal(proxy->name(), Variable::DYNAMIC);
+
+  } else {
+    // We are not inside a local 'with' statement.
+
+    if (var == NULL) {
+      // We did not find the variable. We have a global variable
+      // if we are in the global scope (we know already that we
+      // are outside a 'with' statement) or if there is no way
+      // that the variable might be introduced dynamically (through
+      // a local or outer eval() call, or an outer 'with' statement),
+      // or we don't know about the outer scope (because we are
+      // in an eval scope).
+      if (is_global_scope() ||
+          !(scope_inside_with_ || outer_scope_is_eval_scope_ ||
+            scope_calls_eval_ || outer_scope_calls_eval_)) {
+        // We must have a global variable.
+        ASSERT(global_scope != NULL);
+        var = global_scope->DeclareGlobal(proxy->name());
+
+      } else if (scope_inside_with_) {
+        // If we are inside a with statement we give up and look up
+        // the variable at runtime.
+        var = NonLocal(proxy->name(), Variable::DYNAMIC);
+
+      } else if (invalidated_local != NULL) {
+        // No with statements are involved and we found a local
+        // variable that might be shadowed by eval introduced
+        // variables.
+        var = NonLocal(proxy->name(), Variable::DYNAMIC_LOCAL);
+        var->set_local_if_not_shadowed(invalidated_local);
+
+      } else if (outer_scope_is_eval_scope_) {
+        // No with statements and we did not find a local and the code
+        // is executed with a call to eval.  The context contains
+        // scope information that we can use to determine if the
+        // variable is global if it is not shadowed by eval-introduced
+        // variables.
+        if (context->GlobalIfNotShadowedByEval(proxy->name())) {
+          var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
+
+        } else {
+          var = NonLocal(proxy->name(), Variable::DYNAMIC);
+        }
+
       } else {
-        Variable* invalidated = var;
-        var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
-        var->set_local_if_not_shadowed(invalidated);
+        // No with statements and we did not find a local and the code
+        // is not executed with a call to eval.  We know that this
+        // variable is global unless it is shadowed by eval-introduced
+        // variables.
+        var = NonLocal(proxy->name(), Variable::DYNAMIC_GLOBAL);
       }
-      break;
-
-    case UNBOUND:
-      // No binding has been found. Declare a variable in global scope.
-      var = info->global_scope()->DeclareGlobal(proxy->name());
-      break;
-
-    case UNBOUND_EVAL_SHADOWED:
-      // No binding has been found. But some scope makes a
-      // non-strict 'eval' call.
-      var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
-      break;
-
-    case DYNAMIC_LOOKUP:
-      // The variable could not be resolved statically.
-      var = NonLocal(proxy->name(), DYNAMIC);
-      break;
-  }
-
-  ASSERT(var != NULL);
-  proxy->BindTo(var);
-
-  if (FLAG_harmony_modules) {
-    bool ok;
-#ifdef DEBUG
-    if (FLAG_print_interface_details)
-      PrintF("# Resolve %s:\n", var->name()->ToAsciiArray());
-#endif
-    proxy->interface()->Unify(var->interface(), &ok);
-    if (!ok) {
-#ifdef DEBUG
-      if (FLAG_print_interfaces) {
-        PrintF("SCOPES TYPE ERROR\n");
-        PrintF("proxy: ");
-        proxy->interface()->Print();
-        PrintF("var: ");
-        var->interface()->Print();
-      }
-#endif
-
-      // Inconsistent use of module. Throw a syntax error.
-      // TODO(rossberg): generate more helpful error message.
-      MessageLocation location(info->script(),
-                               proxy->position(),
-                               proxy->position());
-      Isolate* isolate = Isolate::Current();
-      Factory* factory = isolate->factory();
-      Handle<JSArray> array = factory->NewJSArray(1);
-      USE(JSObject::SetElement(array, 0, var->name(), NONE, kStrictMode));
-      Handle<Object> result =
-          factory->NewSyntaxError("module_type_error", array);
-      isolate->Throw(*result, &location);
-      return false;
     }
   }
 
-  return true;
+  proxy->BindTo(var);
 }
 
 
-bool Scope::ResolveVariablesRecursively(
-    CompilationInfo* info,
-    AstNodeFactory<AstNullVisitor>* factory) {
-  ASSERT(info->global_scope()->is_global_scope());
+void Scope::ResolveVariablesRecursively(Scope* global_scope,
+                                        Handle<Context> context) {
+  ASSERT(global_scope == NULL || global_scope->is_global_scope());
 
   // Resolve unresolved variables for this scope.
   for (int i = 0; i < unresolved_.length(); i++) {
-    if (!ResolveVariable(info, unresolved_[i], factory)) return false;
+    ResolveVariable(global_scope, context, unresolved_[i]);
   }
 
   // Resolve unresolved variables for inner scopes.
   for (int i = 0; i < inner_scopes_.length(); i++) {
-    if (!inner_scopes_[i]->ResolveVariablesRecursively(info, factory))
-      return false;
+    inner_scopes_[i]->ResolveVariablesRecursively(global_scope, context);
   }
-
-  return true;
 }
 
 
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_non_strict_eval ) {
+bool Scope::PropagateScopeInfo(bool outer_scope_calls_eval,
+                               bool outer_scope_calls_non_strict_eval,
+                               bool outer_scope_is_eval_scope) {
+  if (outer_scope_calls_eval) {
+    outer_scope_calls_eval_ = true;
+  }
+
   if (outer_scope_calls_non_strict_eval) {
     outer_scope_calls_non_strict_eval_ = true;
   }
 
+  if (outer_scope_is_eval_scope) {
+    outer_scope_is_eval_scope_ = true;
+  }
+
+  bool calls_eval = scope_calls_eval_ || outer_scope_calls_eval_;
+  bool is_eval = is_eval_scope() || outer_scope_is_eval_scope_;
   bool calls_non_strict_eval =
-      this->calls_non_strict_eval() || outer_scope_calls_non_strict_eval_;
+      (scope_calls_eval_ && !is_strict_mode()) ||
+      outer_scope_calls_non_strict_eval_;
   for (int i = 0; i < inner_scopes_.length(); i++) {
     Scope* inner_scope = inner_scopes_[i];
-    if (inner_scope->PropagateScopeInfo(calls_non_strict_eval)) {
+    if (inner_scope->PropagateScopeInfo(calls_eval,
+                                        calls_non_strict_eval,
+                                        is_eval)) {
       inner_scope_calls_eval_ = true;
     }
     if (inner_scope->force_eager_compilation_) {
@@ -1064,7 +1019,7 @@
   // via an eval() call.  This is only possible if the variable has a
   // visible name.
   if ((var->is_this() || var->name()->length() > 0) &&
-      (var->has_forced_context_allocation() ||
+      (var->is_accessed_from_inner_scope() ||
        scope_calls_eval_ ||
        inner_scope_calls_eval_ ||
        scope_contains_with_ ||
@@ -1085,9 +1040,9 @@
   //
   // Exceptions: temporary variables are never allocated in a context;
   // catch-bound variables are always allocated in a context.
-  if (var->mode() == TEMPORARY) return false;
+  if (var->mode() == Variable::TEMPORARY) return false;
   if (is_catch_scope() || is_block_scope()) return true;
-  return var->has_forced_context_allocation() ||
+  return var->is_accessed_from_inner_scope() ||
       scope_calls_eval_ ||
       inner_scope_calls_eval_ ||
       scope_contains_with_ ||
@@ -1140,7 +1095,7 @@
     // In strict mode 'arguments' does not alias formal parameters.
     // Therefore in strict mode we allocate parameters as if 'arguments'
     // were not used.
-    uses_nonstrict_arguments = is_classic_mode();
+    uses_nonstrict_arguments = !is_strict_mode();
   }
 
   // The same parameter may occur multiple times in the parameters_ list.
@@ -1151,8 +1106,9 @@
     Variable* var = params_[i];
     ASSERT(var->scope() == this);
     if (uses_nonstrict_arguments) {
-      // Force context allocation of the parameter.
-      var->ForceContextAllocation();
+      // Give the parameter a use from an inner scope, to force allocation
+      // to the context.
+      var->MarkAsAccessedFromInnerScope();
     }
 
     if (MustAllocate(var)) {
@@ -1227,15 +1183,21 @@
   if (is_function_scope()) AllocateParameterLocals();
   AllocateNonParameterLocals();
 
-  // Force allocation of a context for this scope if necessary. For a 'with'
-  // scope and for a function scope that makes an 'eval' call we need a context,
-  // even if no local variables were statically allocated in the scope.
-  bool must_have_context = is_with_scope() ||
-      (is_function_scope() && calls_eval());
+  // Allocate context if necessary.
+  bool must_have_local_context = false;
+  if (scope_calls_eval_ || scope_contains_with_) {
+    // The context for the eval() call or 'with' statement in this scope.
+    // Unless we are in the global or an eval scope, we need a local
+    // context even if we didn't statically allocate any locals in it,
+    // and the compiler will access the context variable. If we are
+    // not in an inner scope, the scope is provided from the outside.
+    must_have_local_context = is_function_scope();
+  }
 
   // If we didn't allocate any locals in the local context, then we only
-  // need the minimal number of slots if we must have a context.
-  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS && !must_have_context) {
+  // need the minimal number of slots if we must have a local context.
+  if (num_heap_slots_ == Context::MIN_CONTEXT_SLOTS &&
+      !must_have_local_context) {
     num_heap_slots_ = 0;
   }
 
@@ -1243,17 +1205,4 @@
   ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
 }
 
-
-int Scope::StackLocalCount() const {
-  return num_stack_slots() -
-      (function_ != NULL && function_->var()->IsStackLocal() ? 1 : 0);
-}
-
-
-int Scope::ContextLocalCount() const {
-  if (num_heap_slots() == 0) return 0;
-  return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
-      (function_ != NULL && function_->var()->IsContextSlot() ? 1 : 0);
-}
-
 } }  // namespace v8::internal
diff --git a/src/scopes.h b/src/scopes.h
index d315b7e..2917a63 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,7 @@
 #define V8_SCOPES_H_
 
 #include "ast.h"
-#include "zone.h"
+#include "hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -38,19 +38,21 @@
 
 
 // A hash map to support fast variable declaration and lookup.
-class VariableMap: public ZoneHashMap {
+class VariableMap: public HashMap {
  public:
   VariableMap();
 
+  // Dummy constructor.  This constructor doesn't set up the map
+  // properly so don't use it unless you have a good reason.
+  explicit VariableMap(bool gotta_love_static_overloading);
+
   virtual ~VariableMap();
 
   Variable* Declare(Scope* scope,
                     Handle<String> name,
-                    VariableMode mode,
+                    Variable::Mode mode,
                     bool is_valid_lhs,
-                    Variable::Kind kind,
-                    InitializationFlag initialization_flag,
-                    Interface* interface = Interface::NewValue());
+                    Variable::Kind kind);
 
   Variable* Lookup(Handle<String> name);
 };
@@ -62,8 +64,8 @@
 // and setup time for scopes that don't need them.
 class DynamicScopePart : public ZoneObject {
  public:
-  VariableMap* GetMap(VariableMode mode) {
-    int index = mode - DYNAMIC;
+  VariableMap* GetMap(Variable::Mode mode) {
+    int index = mode - Variable::DYNAMIC;
     ASSERT(index >= 0 && index < 3);
     return &maps_[index];
   }
@@ -87,19 +89,28 @@
   // ---------------------------------------------------------------------------
   // Construction
 
-  Scope(Scope* outer_scope, ScopeType type);
+  enum Type {
+    EVAL_SCOPE,      // The top-level scope for an eval source.
+    FUNCTION_SCOPE,  // The top-level scope for a function.
+    GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
+    CATCH_SCOPE,     // The scope introduced by catch.
+    BLOCK_SCOPE      // The scope introduced by a new block.
+  };
+
+  Scope(Scope* outer_scope, Type type);
 
   // Compute top scope and allocate variables. For lazy compilation the top
   // scope only contains the single lazily compiled function, so this
   // doesn't re-allocate variables repeatedly.
   static bool Analyze(CompilationInfo* info);
 
-  static Scope* DeserializeScopeChain(Context* context, Scope* global_scope);
+  static Scope* DeserializeScopeChain(CompilationInfo* info,
+                                      Scope* innermost_scope);
 
   // The scope name is only used for printing/debugging.
   void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
 
-  void Initialize();
+  void Initialize(bool inside_with);
 
   // Checks if the block scope is redundant, i.e. it does not contain any
   // block scoped declarations. In that case it is removed from the scope
@@ -112,13 +123,6 @@
   // Lookup a variable in this scope. Returns the variable or NULL if not found.
   Variable* LocalLookup(Handle<String> name);
 
-  // This lookup corresponds to a lookup in the "intermediate" scope sitting
-  // between this scope and the outer scope. (ECMA-262, 3rd., requires that
-  // the name of named function literal is kept in an intermediate scope
-  // in between this scope and the next outer scope.)
-  Variable* LookupFunctionVar(Handle<String> name,
-                              AstNodeFactory<AstNullVisitor>* factory);
-
   // Lookup a variable in this scope or outer scopes.
   // Returns the variable or NULL if not found.
   Variable* Lookup(Handle<String> name);
@@ -126,28 +130,16 @@
   // Declare the function variable for a function literal. This variable
   // is in an intermediate scope between this function scope and the the
   // outer scope. Only possible for function scopes; at most one variable.
-  template<class Visitor>
-  Variable* DeclareFunctionVar(Handle<String> name,
-                               VariableMode mode,
-                               AstNodeFactory<Visitor>* factory) {
-    ASSERT(is_function_scope() && function_ == NULL);
-    Variable* function_var = new Variable(
-        this, name, mode, true, Variable::NORMAL, kCreatedInitialized);
-    function_ = factory->NewVariableProxy(function_var);
-    return function_var;
-  }
+  Variable* DeclareFunctionVar(Handle<String> name);
 
   // Declare a parameter in this scope.  When there are duplicated
   // parameters the rightmost one 'wins'.  However, the implementation
   // expects all parameters to be declared and from left to right.
-  void DeclareParameter(Handle<String> name, VariableMode mode);
+  void DeclareParameter(Handle<String> name, Variable::Mode mode);
 
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
-  Variable* DeclareLocal(Handle<String> name,
-                         VariableMode mode,
-                         InitializationFlag init_flag,
-                         Interface* interface = Interface::NewValue());
+  Variable* DeclareLocal(Handle<String> name, Variable::Mode mode);
 
   // Declare an implicit global variable in this scope which must be a
   // global scope.  The variable was introduced (possibly from an inner
@@ -156,20 +148,9 @@
   Variable* DeclareGlobal(Handle<String> name);
 
   // Create a new unresolved variable.
-  template<class Visitor>
-  VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory,
-                               Handle<String> name,
-                               int position = RelocInfo::kNoPosition,
-                               Interface* interface = Interface::NewValue()) {
-    // Note that we must not share the unresolved variables with
-    // the same name because they may be removed selectively via
-    // RemoveUnresolved().
-    ASSERT(!already_resolved());
-    VariableProxy* proxy =
-        factory->NewVariableProxy(name, false, position, interface);
-    unresolved_.Add(proxy);
-    return proxy;
-  }
+  VariableProxy* NewUnresolved(Handle<String> name,
+                               bool inside_with,
+                               int position = RelocInfo::kNoPosition);
 
   // Remove a unresolved variable. During parsing, an unresolved variable
   // may have been added optimistically, but then only the variable name
@@ -211,11 +192,6 @@
   // scope over a let binding of the same name.
   Declaration* CheckConflictingVarDeclarations();
 
-  // For harmony block scoping mode: Check if the scope has variable proxies
-  // that are used as lvalues and point to const variables. Assumes that scopes
-  // have been analyzed and variables been resolved.
-  VariableProxy* CheckAssignmentToConst();
-
   // ---------------------------------------------------------------------------
   // Scope-specific info.
 
@@ -223,42 +199,11 @@
   void RecordWithStatement() { scope_contains_with_ = true; }
 
   // Inform the scope that the corresponding code contains an eval call.
-  void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
+  void RecordEvalCall() { scope_calls_eval_ = true; }
 
-  // Set the strict mode flag (unless disabled by a global flag).
-  void SetLanguageMode(LanguageMode language_mode) {
-    language_mode_ = language_mode;
-  }
-
-  // Position in the source where this scope begins and ends.
-  //
-  // * For the scope of a with statement
-  //     with (obj) stmt
-  //   start position: start position of first token of 'stmt'
-  //   end position: end position of last token of 'stmt'
-  // * For the scope of a block
-  //     { stmts }
-  //   start position: start position of '{'
-  //   end position: end position of '}'
-  // * For the scope of a function literal or decalaration
-  //     function fun(a,b) { stmts }
-  //   start position: start position of '('
-  //   end position: end position of '}'
-  // * For the scope of a catch block
-  //     try { stms } catch(e) { stmts }
-  //   start position: start position of '('
-  //   end position: end position of ')'
-  // * For the scope of a for-statement
-  //     for (let x ...) stmt
-  //   start position: start position of '('
-  //   end position: end position of last token of 'stmt'
-  int start_position() const { return start_position_; }
-  void set_start_position(int statement_pos) {
-    start_position_ = statement_pos;
-  }
-  int end_position() const { return end_position_; }
-  void set_end_position(int statement_pos) {
-    end_position_ = statement_pos;
+  // Enable strict mode for the scope (unless disabled by a global flag).
+  void EnableStrictMode() {
+    strict_mode_ = FLAG_strict_mode;
   }
 
   // ---------------------------------------------------------------------------
@@ -267,29 +212,17 @@
   // Specific scope types.
   bool is_eval_scope() const { return type_ == EVAL_SCOPE; }
   bool is_function_scope() const { return type_ == FUNCTION_SCOPE; }
-  bool is_module_scope() const { return type_ == MODULE_SCOPE; }
   bool is_global_scope() const { return type_ == GLOBAL_SCOPE; }
   bool is_catch_scope() const { return type_ == CATCH_SCOPE; }
   bool is_block_scope() const { return type_ == BLOCK_SCOPE; }
-  bool is_with_scope() const { return type_ == WITH_SCOPE; }
-  bool is_declaration_scope() const {
-    return is_eval_scope() || is_function_scope() || is_global_scope();
-  }
-  bool is_classic_mode() const {
-    return language_mode() == CLASSIC_MODE;
-  }
-  bool is_extended_mode() const {
-    return language_mode() == EXTENDED_MODE;
-  }
-  bool is_strict_or_extended_eval_scope() const {
-    return is_eval_scope() && !is_classic_mode();
+  bool is_strict_mode() const { return strict_mode_; }
+  bool is_strict_mode_eval_scope() const {
+    return is_eval_scope() && is_strict_mode();
   }
 
   // Information about which scopes calls eval.
   bool calls_eval() const { return scope_calls_eval_; }
-  bool calls_non_strict_eval() {
-    return scope_calls_eval_ && is_classic_mode();
-  }
+  bool outer_scope_calls_eval() const { return outer_scope_calls_eval_; }
   bool outer_scope_calls_non_strict_eval() const {
     return outer_scope_calls_non_strict_eval_;
   }
@@ -299,15 +232,12 @@
   // Does this scope contain a with statement.
   bool contains_with() const { return scope_contains_with_; }
 
+  // The scope immediately surrounding this scope, or NULL.
+  Scope* outer_scope() const { return outer_scope_; }
+
   // ---------------------------------------------------------------------------
   // Accessors.
 
-  // The type of this scope.
-  ScopeType type() const { return type_; }
-
-  // The language mode of this scope.
-  LanguageMode language_mode() const { return language_mode_; }
-
   // The variable corresponding the 'this' value.
   Variable* receiver() { return receiver_; }
 
@@ -334,23 +264,23 @@
   // Declarations list.
   ZoneList<Declaration*>* declarations() { return &decls_; }
 
-  // Inner scope list.
-  ZoneList<Scope*>* inner_scopes() { return &inner_scopes_; }
-
-  // The scope immediately surrounding this scope, or NULL.
-  Scope* outer_scope() const { return outer_scope_; }
-
-  // The interface as inferred so far; only for module scopes.
-  Interface* interface() const { return interface_; }
 
   // ---------------------------------------------------------------------------
   // Variable allocation.
 
-  // Collect stack and context allocated local variables in this scope. Note
-  // that the function variable - if present - is not collected and should be
-  // handled separately.
-  void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
-                                    ZoneList<Variable*>* context_locals);
+  // Collect all used locals in this scope.
+  template<class Allocator>
+  void CollectUsedVariables(List<Variable*, Allocator>* locals);
+
+  // Resolve and fill in the allocation information for all variables
+  // in this scopes. Must be called *after* all scopes have been
+  // processed (parsed) to ensure that unresolved variables can be
+  // resolved properly.
+  //
+  // In the case of code compiled and run using 'eval', the context
+  // parameter is the context in which eval was called.  In all other
+  // cases the context parameter is an empty handle.
+  void AllocateVariables(Handle<Context> context);
 
   // Current number of var or const locals.
   int num_var_or_const() { return num_var_or_const_; }
@@ -359,20 +289,12 @@
   int num_stack_slots() const { return num_stack_slots_; }
   int num_heap_slots() const { return num_heap_slots_; }
 
-  int StackLocalCount() const;
-  int ContextLocalCount() const;
-
   // Make sure this scope and all outer scopes are eagerly compiled.
   void ForceEagerCompilation()  { force_eager_compilation_ = true; }
 
   // Determine if we can use lazy compilation for this scope.
   bool AllowsLazyCompilation() const;
 
-  // True if we can lazily recompile functions with this scope.
-  bool allows_lazy_recompilation() const {
-    return !force_eager_compilation_;
-  }
-
   // True if the outer context of this scope is always the global context.
   bool HasTrivialOuterContext() const;
 
@@ -383,14 +305,7 @@
   // where var declarations will be hoisted to in the implementation.
   Scope* DeclarationScope();
 
-  Handle<ScopeInfo> GetScopeInfo();
-
-  // Get the chain of nested scopes within this scope for the source statement
-  // position. The scopes will be added to the list from the outermost scope to
-  // the innermost scope. Only nested block, catch or with scopes are tracked
-  // and will be returned, but no inner function scopes.
-  void GetNestedScopeChain(List<Handle<ScopeInfo> >* chain,
-                           int statement_position);
+  Handle<SerializedScopeInfo> GetSerializedScopeInfo();
 
   // ---------------------------------------------------------------------------
   // Strict mode support.
@@ -415,6 +330,8 @@
  protected:
   friend class ParserFactory;
 
+  explicit Scope(Type type);
+
   Isolate* const isolate_;
 
   // Scope tree.
@@ -422,7 +339,7 @@
   ZoneList<Scope*> inner_scopes_;  // the immediately enclosed inner scopes
 
   // The scope type.
-  ScopeType type_;
+  Type type_;
 
   // Debugging support.
   Handle<String> scope_name_;
@@ -449,8 +366,6 @@
   VariableProxy* function_;
   // Convenience variable; function scopes only.
   Variable* arguments_;
-  // Interface; module scopes only.
-  Interface* interface_;
 
   // Illegal redeclaration.
   Expression* illegal_redecl_;
@@ -464,15 +379,14 @@
   // This scope or a nested catch scope or with scope contain an 'eval' call. At
   // the 'eval' call site this scope is the declaration scope.
   bool scope_calls_eval_;
-  // The language mode of this scope.
-  LanguageMode language_mode_;
-  // Source positions.
-  int start_position_;
-  int end_position_;
+  // This scope is a strict mode scope.
+  bool strict_mode_;
 
   // Computed via PropagateScopeInfo.
+  bool outer_scope_calls_eval_;
   bool outer_scope_calls_non_strict_eval_;
   bool inner_scope_calls_eval_;
+  bool outer_scope_is_eval_scope_;
   bool force_eager_compilation_;
 
   // True if it doesn't need scope resolution (e.g., if the scope was
@@ -482,80 +396,32 @@
   // Computed as variables are declared.
   int num_var_or_const_;
 
-  // Computed via AllocateVariables; function, block and catch scopes only.
+  // Computed via AllocateVariables; function scopes only.
   int num_stack_slots_;
   int num_heap_slots_;
 
-  // Serialized scope info support.
-  Handle<ScopeInfo> scope_info_;
+  // Serialized scopes support.
+  Handle<SerializedScopeInfo> scope_info_;
   bool already_resolved() { return already_resolved_; }
 
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
-  Variable* NonLocal(Handle<String> name, VariableMode mode);
+  Variable* NonLocal(Handle<String> name, Variable::Mode mode);
 
   // Variable resolution.
-  // Possible results of a recursive variable lookup telling if and how a
-  // variable is bound. These are returned in the output parameter *binding_kind
-  // of the LookupRecursive function.
-  enum BindingKind {
-    // The variable reference could be statically resolved to a variable binding
-    // which is returned. There is no 'with' statement between the reference and
-    // the binding and no scope between the reference scope (inclusive) and
-    // binding scope (exclusive) makes a non-strict 'eval' call.
-    BOUND,
-
-    // The variable reference could be statically resolved to a variable binding
-    // which is returned. There is no 'with' statement between the reference and
-    // the binding, but some scope between the reference scope (inclusive) and
-    // binding scope (exclusive) makes a non-strict 'eval' call, that might
-    // possibly introduce variable bindings shadowing the found one. Thus the
-    // found variable binding is just a guess.
-    BOUND_EVAL_SHADOWED,
-
-    // The variable reference could not be statically resolved to any binding
-    // and thus should be considered referencing a global variable. NULL is
-    // returned. The variable reference is not inside any 'with' statement and
-    // no scope between the reference scope (inclusive) and global scope
-    // (exclusive) makes a non-strict 'eval' call.
-    UNBOUND,
-
-    // The variable reference could not be statically resolved to any binding
-    // NULL is returned. The variable reference is not inside any 'with'
-    // statement, but some scope between the reference scope (inclusive) and
-    // global scope (exclusive) makes a non-strict 'eval' call, that might
-    // possibly introduce a variable binding. Thus the reference should be
-    // considered referencing a global variable unless it is shadowed by an
-    // 'eval' introduced binding.
-    UNBOUND_EVAL_SHADOWED,
-
-    // The variable could not be statically resolved and needs to be looked up
-    // dynamically. NULL is returned. There are two possible reasons:
-    // * A 'with' statement has been encountered and there is no variable
-    //   binding for the name between the variable reference and the 'with'.
-    //   The variable potentially references a property of the 'with' object.
-    // * The code is being executed as part of a call to 'eval' and the calling
-    //   context chain contains either a variable binding for the name or it
-    //   contains a 'with' context.
-    DYNAMIC_LOOKUP
-  };
-
-  // Lookup a variable reference given by name recursively starting with this
-  // scope. If the code is executed because of a call to 'eval', the context
-  // parameter should be set to the calling context of 'eval'.
   Variable* LookupRecursive(Handle<String> name,
-                            BindingKind* binding_kind,
-                            AstNodeFactory<AstNullVisitor>* factory);
-  MUST_USE_RESULT
-  bool ResolveVariable(CompilationInfo* info,
-                       VariableProxy* proxy,
-                       AstNodeFactory<AstNullVisitor>* factory);
-  MUST_USE_RESULT
-  bool ResolveVariablesRecursively(CompilationInfo* info,
-                                   AstNodeFactory<AstNullVisitor>* factory);
+                            bool from_inner_function,
+                            Variable** invalidated_local);
+  void ResolveVariable(Scope* global_scope,
+                       Handle<Context> context,
+                       VariableProxy* proxy);
+  void ResolveVariablesRecursively(Scope* global_scope,
+                                   Handle<Context> context);
 
   // Scope analysis.
-  bool PropagateScopeInfo(bool outer_scope_calls_non_strict_eval);
+  bool PropagateScopeInfo(bool outer_scope_calls_eval,
+                          bool outer_scope_calls_non_strict_eval,
+                          bool outer_scope_is_eval_scope);
   bool HasTrivialContext() const;
 
   // Predicates.
@@ -571,21 +437,9 @@
   void AllocateNonParameterLocals();
   void AllocateVariablesRecursively();
 
-  // Resolve and fill in the allocation information for all variables
-  // in this scopes. Must be called *after* all scopes have been
-  // processed (parsed) to ensure that unresolved variables can be
-  // resolved properly.
-  //
-  // In the case of code compiled and run using 'eval', the context
-  // parameter is the context in which eval was called.  In all other
-  // cases the context parameter is an empty handle.
-  MUST_USE_RESULT
-  bool AllocateVariables(CompilationInfo* info,
-                         AstNodeFactory<AstNullVisitor>* factory);
-
  private:
-  // Construct a scope based on the scope info.
-  Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info);
+  // Construct a function or block scope based on the scope info.
+  Scope(Scope* inner_scope, Type type, Handle<SerializedScopeInfo> scope_info);
 
   // Construct a catch scope with a binding for the name.
   Scope(Scope* inner_scope, Handle<String> catch_variable_name);
@@ -597,9 +451,9 @@
     }
   }
 
-  void SetDefaults(ScopeType type,
+  void SetDefaults(Type type,
                    Scope* outer_scope,
-                   Handle<ScopeInfo> scope_info);
+                   Handle<SerializedScopeInfo> scope_info);
 };
 
 } }  // namespace v8::internal
diff --git a/src/serialize.cc b/src/serialize.cc
index 01d5f1c..ecb480a 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -273,22 +273,14 @@
       STUB_CACHE_TABLE,
       2,
       "StubCache::primary_->value");
-  Add(stub_cache->map_reference(StubCache::kPrimary).address(),
-      STUB_CACHE_TABLE,
-      3,
-      "StubCache::primary_->map");
   Add(stub_cache->key_reference(StubCache::kSecondary).address(),
       STUB_CACHE_TABLE,
-      4,
+      3,
       "StubCache::secondary_->key");
   Add(stub_cache->value_reference(StubCache::kSecondary).address(),
       STUB_CACHE_TABLE,
-      5,
+      4,
       "StubCache::secondary_->value");
-  Add(stub_cache->map_reference(StubCache::kSecondary).address(),
-      STUB_CACHE_TABLE,
-      6,
-      "StubCache::secondary_->map");
 
   // Runtime entries
   Add(ExternalReference::perform_gc_function(isolate).address(),
@@ -308,28 +300,16 @@
       RUNTIME_ENTRY,
       4,
       "HandleScope::DeleteExtensions");
-  Add(ExternalReference::
-          incremental_marking_record_write_function(isolate).address(),
-      RUNTIME_ENTRY,
-      5,
-      "IncrementalMarking::RecordWrite");
-  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
-      RUNTIME_ENTRY,
-      6,
-      "StoreBuffer::StoreBufferOverflow");
-  Add(ExternalReference::
-          incremental_evacuation_record_write_function(isolate).address(),
-      RUNTIME_ENTRY,
-      7,
-      "IncrementalMarking::RecordWrite");
-
-
 
   // Miscellaneous
-  Add(ExternalReference::roots_array_start(isolate).address(),
+  Add(ExternalReference::the_hole_value_location(isolate).address(),
+      UNCLASSIFIED,
+      2,
+      "Factory::the_hole_value().location()");
+  Add(ExternalReference::roots_address(isolate).address(),
       UNCLASSIFIED,
       3,
-      "Heap::roots_array_start()");
+      "Heap::roots_address()");
   Add(ExternalReference::address_of_stack_limit(isolate).address(),
       UNCLASSIFIED,
       4,
@@ -371,145 +351,129 @@
       "Heap::always_allocate_scope_depth()");
   Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
       UNCLASSIFIED,
-      14,
+      13,
       "Heap::NewSpaceAllocationLimitAddress()");
   Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
       UNCLASSIFIED,
-      15,
+      14,
       "Heap::NewSpaceAllocationTopAddress()");
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Add(ExternalReference::debug_break(isolate).address(),
       UNCLASSIFIED,
-      16,
+      15,
       "Debug::Break()");
   Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
       UNCLASSIFIED,
-      17,
+      16,
       "Debug::step_in_fp_addr()");
 #endif
   Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
       UNCLASSIFIED,
-      18,
+      17,
       "add_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
       UNCLASSIFIED,
-      19,
+      18,
       "sub_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
       UNCLASSIFIED,
-      20,
+      19,
       "mul_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
       UNCLASSIFIED,
-      21,
+      20,
       "div_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
       UNCLASSIFIED,
-      22,
+      21,
       "mod_two_doubles");
   Add(ExternalReference::compare_doubles(isolate).address(),
       UNCLASSIFIED,
-      23,
+      22,
       "compare_doubles");
 #ifndef V8_INTERPRETED_REGEXP
   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
       UNCLASSIFIED,
-      24,
+      23,
       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
   Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
       UNCLASSIFIED,
-      25,
+      24,
       "RegExpMacroAssembler*::CheckStackGuardState()");
   Add(ExternalReference::re_grow_stack(isolate).address(),
       UNCLASSIFIED,
-      26,
+      25,
       "NativeRegExpMacroAssembler::GrowStack()");
   Add(ExternalReference::re_word_character_map().address(),
       UNCLASSIFIED,
-      27,
+      26,
       "NativeRegExpMacroAssembler::word_character_map");
 #endif  // V8_INTERPRETED_REGEXP
   // Keyed lookup cache.
   Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
       UNCLASSIFIED,
-      28,
+      27,
       "KeyedLookupCache::keys()");
   Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
       UNCLASSIFIED,
-      29,
+      28,
       "KeyedLookupCache::field_offsets()");
   Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
       UNCLASSIFIED,
-      30,
+      29,
       "TranscendentalCache::caches()");
   Add(ExternalReference::handle_scope_next_address().address(),
       UNCLASSIFIED,
-      31,
+      30,
       "HandleScope::next");
   Add(ExternalReference::handle_scope_limit_address().address(),
       UNCLASSIFIED,
-      32,
+      31,
       "HandleScope::limit");
   Add(ExternalReference::handle_scope_level_address().address(),
       UNCLASSIFIED,
-      33,
+      32,
       "HandleScope::level");
   Add(ExternalReference::new_deoptimizer_function(isolate).address(),
       UNCLASSIFIED,
-      34,
+      33,
       "Deoptimizer::New()");
   Add(ExternalReference::compute_output_frames_function(isolate).address(),
       UNCLASSIFIED,
-      35,
+      34,
       "Deoptimizer::ComputeOutputFrames()");
   Add(ExternalReference::address_of_min_int().address(),
       UNCLASSIFIED,
-      36,
+      35,
       "LDoubleConstant::min_int");
   Add(ExternalReference::address_of_one_half().address(),
       UNCLASSIFIED,
-      37,
+      36,
       "LDoubleConstant::one_half");
   Add(ExternalReference::isolate_address().address(),
       UNCLASSIFIED,
-      38,
+      37,
       "isolate");
   Add(ExternalReference::address_of_minus_zero().address(),
       UNCLASSIFIED,
-      39,
+      38,
       "LDoubleConstant::minus_zero");
   Add(ExternalReference::address_of_negative_infinity().address(),
       UNCLASSIFIED,
-      40,
+      39,
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function(isolate).address(),
       UNCLASSIFIED,
-      41,
+      40,
       "power_double_double_function");
   Add(ExternalReference::power_double_int_function(isolate).address(),
       UNCLASSIFIED,
-      42,
+      41,
       "power_double_int_function");
-  Add(ExternalReference::store_buffer_top(isolate).address(),
+  Add(ExternalReference::arguments_marker_location(isolate).address(),
       UNCLASSIFIED,
-      43,
-      "store_buffer_top");
-  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
-      UNCLASSIFIED,
-      44,
-      "canonical_nan");
-  Add(ExternalReference::address_of_the_hole_nan().address(),
-      UNCLASSIFIED,
-      45,
-      "the_hole_nan");
-  Add(ExternalReference::get_date_field_function(isolate).address(),
-      UNCLASSIFIED,
-      46,
-      "JSDate::GetField");
-  Add(ExternalReference::date_cache_stamp(isolate).address(),
-      UNCLASSIFIED,
-      47,
-      "date_cache_stamp");
+      42,
+      "Factory::arguments_marker().location()");
 }
 
 
@@ -605,7 +569,6 @@
       maybe_new_allocation =
           reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
     }
-    ASSERT(!maybe_new_allocation->IsFailure());
     Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
     HeapObject* new_object = HeapObject::cast(new_allocation);
     address = new_object->address();
@@ -614,13 +577,14 @@
     ASSERT(SpaceIsLarge(space_index));
     LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
     Object* new_allocation;
-    if (space_index == kLargeData || space_index == kLargeFixedArray) {
+    if (space_index == kLargeData) {
+      new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked();
+    } else if (space_index == kLargeFixedArray) {
       new_allocation =
-          lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
+          lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
     } else {
       ASSERT_EQ(kLargeCode, space_index);
-      new_allocation =
-          lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
+      new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked();
     }
     HeapObject* new_object = HeapObject::cast(new_allocation);
     // Record all large objects in the same space.
@@ -665,7 +629,6 @@
 
 void Deserializer::Deserialize() {
   isolate_ = Isolate::Current();
-  ASSERT(isolate_ != NULL);
   // Don't GC while deserializing - just expand the heap.
   AlwaysAllocateScope always_allocate;
   // Don't use the free lists while deserializing.
@@ -685,14 +648,6 @@
 
   isolate_->heap()->set_global_contexts_list(
       isolate_->heap()->undefined_value());
-
-  // Update data pointers to the external strings containing natives sources.
-  for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
-    Object* source = isolate_->heap()->natives_source_cache()->get(i);
-    if (!source->IsUndefined()) {
-      ExternalAsciiString::cast(source)->update_data_cache();
-    }
-  }
 }
 
 
@@ -730,8 +685,9 @@
 // This routine writes the new object into the pointer provided and then
 // returns true if the new object was in young space and false otherwise.
 // The reason for this strange interface is that otherwise the object is
-// written very late, which means the FreeSpace map is not set up by the
-// time we need to use it to mark the space at the end of a page free.
+// written very late, which means the ByteArray map is not set up by the
+// time we need to use it to mark the space at the end of a page free (by
+// making it into a byte array).
 void Deserializer::ReadObject(int space_number,
                               Space* space,
                               Object** write_back) {
@@ -781,13 +737,8 @@
 void Deserializer::ReadChunk(Object** current,
                              Object** limit,
                              int source_space,
-                             Address current_object_address) {
+                             Address address) {
   Isolate* const isolate = isolate_;
-  bool write_barrier_needed = (current_object_address != NULL &&
-                               source_space != NEW_SPACE &&
-                               source_space != CELL_SPACE &&
-                               source_space != CODE_SPACE &&
-                               source_space != OLD_DATA_SPACE);
   while (current < limit) {
     int data = source_->Get();
     switch (data) {
@@ -807,7 +758,8 @@
         if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
           ASSIGN_DEST_SPACE(space_number)                                      \
           ReadObject(space_number, dest_space, current);                       \
-          emit_write_barrier = (space_number == NEW_SPACE);                    \
+          emit_write_barrier =                                                 \
+            (space_number == NEW_SPACE && source_space != NEW_SPACE);          \
         } else {                                                               \
           Object* new_object = NULL;  /* May not be a real Object pointer. */  \
           if (where == kNewObject) {                                           \
@@ -815,25 +767,25 @@
             ReadObject(space_number, dest_space, &new_object);                 \
           } else if (where == kRootArray) {                                    \
             int root_id = source_->GetInt();                                   \
-            new_object = isolate->heap()->roots_array_start()[root_id];        \
-            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
+            new_object = isolate->heap()->roots_address()[root_id];            \
           } else if (where == kPartialSnapshotCache) {                         \
             int cache_index = source_->GetInt();                               \
             new_object = isolate->serialize_partial_snapshot_cache()           \
                 [cache_index];                                                 \
-            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
           } else if (where == kExternalReference) {                            \
             int reference_id = source_->GetInt();                              \
             Address address = external_reference_decoder_->                    \
                 Decode(reference_id);                                          \
             new_object = reinterpret_cast<Object*>(address);                   \
           } else if (where == kBackref) {                                      \
-            emit_write_barrier = (space_number == NEW_SPACE);                  \
+            emit_write_barrier =                                               \
+              (space_number == NEW_SPACE && source_space != NEW_SPACE);        \
             new_object = GetAddressFromEnd(data & kSpaceMask);                 \
           } else {                                                             \
             ASSERT(where == kFromStart);                                       \
             if (offset_from_start == kUnknownOffsetFromStart) {                \
-              emit_write_barrier = (space_number == NEW_SPACE);                \
+              emit_write_barrier =                                             \
+                (space_number == NEW_SPACE && source_space != NEW_SPACE);      \
               new_object = GetAddressFromStart(data & kSpaceMask);             \
             } else {                                                           \
               Address object_address = pages_[space_number][0] +               \
@@ -849,24 +801,23 @@
           if (how == kFromCode) {                                              \
             Address location_of_branch_data =                                  \
                 reinterpret_cast<Address>(current);                            \
-            Assembler::deserialization_set_special_target_at(                  \
-                location_of_branch_data,                                       \
-                reinterpret_cast<Address>(new_object));                        \
-            location_of_branch_data += Assembler::kSpecialTargetSize;          \
-            current = reinterpret_cast<Object**>(location_of_branch_data);     \
-            current_was_incremented = true;                                    \
+            Assembler::set_target_at(location_of_branch_data,                  \
+                                     reinterpret_cast<Address>(new_object));   \
+            if (within == kFirstInstruction) {                                 \
+              location_of_branch_data += Assembler::kCallTargetSize;           \
+              current = reinterpret_cast<Object**>(location_of_branch_data);   \
+              current_was_incremented = true;                                  \
+            }                                                                  \
           } else {                                                             \
             *current = new_object;                                             \
           }                                                                    \
         }                                                                      \
-        if (emit_write_barrier && write_barrier_needed) {                      \
-          Address current_address = reinterpret_cast<Address>(current);        \
-          isolate->heap()->RecordWrite(                                        \
-              current_object_address,                                          \
-              static_cast<int>(current_address - current_object_address));     \
+        if (emit_write_barrier) {                                              \
+          isolate->heap()->RecordWrite(address, static_cast<int>(              \
+              reinterpret_cast<Address>(current) - address));                  \
         }                                                                      \
         if (!current_was_incremented) {                                        \
-          current++;                                                           \
+          current++;   /* Increment current if it wasn't done above. */        \
         }                                                                      \
         break;                                                                 \
       }                                                                        \
@@ -913,17 +864,11 @@
   CASE_STATEMENT(where, how, within, kLargeCode)                               \
   CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
 
-#define FOUR_CASES(byte_code)             \
-  case byte_code:                         \
-  case byte_code + 1:                     \
-  case byte_code + 2:                     \
-  case byte_code + 3:
-
-#define SIXTEEN_CASES(byte_code)          \
-  FOUR_CASES(byte_code)                   \
-  FOUR_CASES(byte_code + 4)               \
-  FOUR_CASES(byte_code + 8)               \
-  FOUR_CASES(byte_code + 12)
+#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number,                    \
+                                       space_number,                           \
+                                       offset_from_start)                      \
+  CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number)      \
+  CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start)
 
       // We generate 15 cases and bodies that process special tags that combine
       // the raw data tag and the length into one byte.
@@ -947,38 +892,6 @@
         break;
       }
 
-      SIXTEEN_CASES(kRootArrayLowConstants)
-      SIXTEEN_CASES(kRootArrayHighConstants) {
-        int root_id = RootArrayConstantFromByteCode(data);
-        Object* object = isolate->heap()->roots_array_start()[root_id];
-        ASSERT(!isolate->heap()->InNewSpace(object));
-        *current++ = object;
-        break;
-      }
-
-      case kRepeat: {
-        int repeats = source_->GetInt();
-        Object* object = current[-1];
-        ASSERT(!isolate->heap()->InNewSpace(object));
-        for (int i = 0; i < repeats; i++) current[i] = object;
-        current += repeats;
-        break;
-      }
-
-      STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
-                    Heap::kOldSpaceRoots);
-      STATIC_ASSERT(kMaxRepeats == 12);
-      FOUR_CASES(kConstantRepeat)
-      FOUR_CASES(kConstantRepeat + 4)
-      FOUR_CASES(kConstantRepeat + 8) {
-        int repeats = RepeatsForCode(data);
-        Object* object = current[-1];
-        ASSERT(!isolate->heap()->InNewSpace(object));
-        for (int i = 0; i < repeats; i++) current[i] = object;
-        current += repeats;
-        break;
-      }
-
       // Deserialize a new object and write a pointer to it to the current
       // object.
       ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
@@ -990,21 +903,6 @@
       // Find a recently deserialized object using its offset from the current
       // allocation point and write a pointer to it to the current object.
       ALL_SPACES(kBackref, kPlain, kStartOfObject)
-#if V8_TARGET_ARCH_MIPS
-      // Deserialize a new object from pointer found in code and write
-      // a pointer to it to the current object. Required only for MIPS, and
-      // omitted on the other architectures because it is fully unrolled and
-      // would cause bloat.
-      ONE_PER_SPACE(kNewObject, kFromCode, kStartOfObject)
-      // Find a recently deserialized code object using its offset from the
-      // current allocation point and write a pointer to it to the current
-      // object. Required only for MIPS.
-      ALL_SPACES(kBackref, kFromCode, kStartOfObject)
-      // Find an already deserialized code object using its offset from
-      // the start and write a pointer to it to the current object.
-      // Required only for MIPS.
-      ALL_SPACES(kFromStart, kFromCode, kStartOfObject)
-#endif
       // Find a recently deserialized code object using its offset from the
       // current allocation point and write a pointer to its first instruction
       // to the current code object or the instruction pointer in a function
@@ -1019,6 +917,9 @@
       // start and write a pointer to its first instruction to the current code
       // object.
       ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
+      // Find an already deserialized object at one of the predetermined popular
+      // offsets from the start and write a pointer to it in the current object.
+      COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS)
       // Find an object in the roots array and write a pointer to it to the
       // current object.
       CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
@@ -1060,6 +961,7 @@
 #undef CASE_BODY
 #undef ONE_PER_SPACE
 #undef ALL_SPACES
+#undef EMIT_COMMON_REFERENCE_PATTERNS
 #undef ASSIGN_DEST_SPACE
 
       case kNewPage: {
@@ -1071,11 +973,6 @@
         break;
       }
 
-      case kSkip: {
-        current++;
-        break;
-      }
-
       case kNativesStringResource: {
         int index = source_->Get();
         Vector<const char> source_vector = Natives::GetRawScriptSource(index);
@@ -1111,17 +1008,45 @@
   PutSection(static_cast<int>(integer & 0x7f), "IntLastPart");
 }
 
+#ifdef DEBUG
+
+void Deserializer::Synchronize(const char* tag) {
+  int data = source_->Get();
+  // If this assert fails then that indicates that you have a mismatch between
+  // the number of GC roots when serializing and deserializing.
+  ASSERT_EQ(kSynchronize, data);
+  do {
+    int character = source_->Get();
+    if (character == 0) break;
+    if (FLAG_debug_serialization) {
+      PrintF("%c", character);
+    }
+  } while (true);
+  if (FLAG_debug_serialization) {
+    PrintF("\n");
+  }
+}
+
+
+void Serializer::Synchronize(const char* tag) {
+  sink_->Put(kSynchronize, tag);
+  int character;
+  do {
+    character = *tag++;
+    sink_->PutSection(character, "TagCharacter");
+  } while (character != 0);
+}
+
+#endif
 
 Serializer::Serializer(SnapshotByteSink* sink)
     : sink_(sink),
       current_root_index_(0),
       external_reference_encoder_(new ExternalReferenceEncoder),
-      large_object_total_(0),
-      root_index_wave_front_(0) {
-  isolate_ = Isolate::Current();
+      large_object_total_(0) {
   // The serializer is meant to be used only to generate initial heap images
   // from a context in which there is only one isolate.
-  ASSERT(isolate_->IsDefaultIsolate());
+  ASSERT(Isolate::Current()->IsDefaultIsolate());
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
   }
@@ -1141,8 +1066,11 @@
   CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
   CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
   // We don't support serializing installed extensions.
-  CHECK(!isolate->has_installed_extensions());
-
+  for (RegisteredExtension* ext = v8::RegisteredExtension::first_extension();
+       ext != NULL;
+       ext = ext->next()) {
+    CHECK_NE(v8::INSTALLED, ext->state());
+  }
   HEAP->IterateStrongRoots(this, VISIT_ONLY_STRONG);
 }
 
@@ -1169,17 +1097,8 @@
 
 
 void Serializer::VisitPointers(Object** start, Object** end) {
-  Isolate* isolate = Isolate::Current();
-
   for (Object** current = start; current < end; current++) {
-    if (start == isolate->heap()->roots_array_start()) {
-      root_index_wave_front_ =
-          Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
-    }
-    if (reinterpret_cast<Address>(current) ==
-        isolate->heap()->store_buffer()->TopAddress()) {
-      sink_->Put(kSkip, "Skip");
-    } else if ((*current)->IsSmi()) {
+    if ((*current)->IsSmi()) {
       sink_->Put(kRawData, "RawData");
       sink_->PutInt(kPointerSize, "length");
       for (int i = 0; i < kPointerSize; i++) {
@@ -1243,23 +1162,10 @@
 }
 
 
-int Serializer::RootIndex(HeapObject* heap_object, HowToCode from) {
-  Heap* heap = HEAP;
-  if (heap->InNewSpace(heap_object)) return kInvalidRootIndex;
-  for (int i = 0; i < root_index_wave_front_; i++) {
-    Object* root = heap->roots_array_start()[i];
-    if (!root->IsSmi() && root == heap_object) {
-#if V8_TARGET_ARCH_MIPS
-      if (from == kFromCode) {
-        // In order to avoid code bloat in the deserializer we don't have
-        // support for the encoding that specifies a particular root should
-        // be written into the lui/ori instructions on MIPS.  Therefore we
-        // should not generate such serialization data for MIPS.
-        return kInvalidRootIndex;
-      }
-#endif
-      return i;
-    }
+int PartialSerializer::RootIndex(HeapObject* heap_object) {
+  for (int i = 0; i < Heap::kRootListLength; i++) {
+    Object* root = HEAP->roots_address()[i];
+    if (root == heap_object) return i;
   }
   return kInvalidRootIndex;
 }
@@ -1295,8 +1201,18 @@
   // all objects) then we should shift out the bits that are always 0.
   if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
   if (from_start) {
-    sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
-    sink_->PutInt(address, "address");
+#define COMMON_REFS_CASE(pseudo_space, actual_space, offset)                   \
+    if (space == actual_space && address == offset &&                          \
+        how_to_code == kPlain && where_to_point == kStartOfObject) {           \
+      sink_->Put(kFromStart + how_to_code + where_to_point +                   \
+                 pseudo_space, "RefSer");                                      \
+    } else  /* NOLINT */
+    COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+    {  /* NOLINT */
+      sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
+      sink_->PutInt(address, "address");
+    }
   } else {
     sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
     sink_->PutInt(address, "address");
@@ -1311,12 +1227,6 @@
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
-  int root_index;
-  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
-    PutRoot(root_index, heap_object, how_to_code, where_to_point);
-    return;
-  }
-
   if (address_mapper_.IsMapped(heap_object)) {
     int space = SpaceOfAlreadySerializedObject(heap_object);
     int address = address_mapper_.MappedTo(heap_object);
@@ -1347,28 +1257,6 @@
 }
 
 
-void Serializer::PutRoot(int root_index,
-                         HeapObject* object,
-                         SerializerDeserializer::HowToCode how_to_code,
-                         SerializerDeserializer::WhereToPoint where_to_point) {
-  if (how_to_code == kPlain &&
-      where_to_point == kStartOfObject &&
-      root_index < kRootArrayNumberOfConstantEncodings &&
-      !HEAP->InNewSpace(object)) {
-    if (root_index < kRootArrayNumberOfLowConstantEncodings) {
-      sink_->Put(kRootArrayLowConstants + root_index, "RootLoConstant");
-    } else {
-      sink_->Put(kRootArrayHighConstants + root_index -
-                     kRootArrayNumberOfLowConstantEncodings,
-                 "RootHiConstant");
-    }
-  } else {
-    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
-    sink_->PutInt(root_index, "root_index");
-  }
-}
-
-
 void PartialSerializer::SerializeObject(
     Object* o,
     HowToCode how_to_code,
@@ -1376,16 +1264,10 @@
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
-  if (heap_object->IsMap()) {
-    // The code-caches link to context-specific code objects, which
-    // the startup and context serializes cannot currently handle.
-    ASSERT(Map::cast(heap_object)->code_cache() ==
-           heap_object->GetHeap()->raw_unchecked_empty_fixed_array());
-  }
-
   int root_index;
-  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
-    PutRoot(root_index, heap_object, how_to_code, where_to_point);
+  if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+    sink_->PutInt(root_index, "root_index");
     return;
   }
 
@@ -1463,48 +1345,14 @@
     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
 
     while (current < end && !(*current)->IsSmi()) {
-      HeapObject* current_contents = HeapObject::cast(*current);
-      int root_index = serializer_->RootIndex(current_contents, kPlain);
-      // Repeats are not subject to the write barrier so there are only some
-      // objects that can be used in a repeat encoding.  These are the early
-      // ones in the root array that are never in new space.
-      if (current != start &&
-          root_index != kInvalidRootIndex &&
-          root_index < kRootArrayNumberOfConstantEncodings &&
-          current_contents == current[-1]) {
-        ASSERT(!HEAP->InNewSpace(current_contents));
-        int repeat_count = 1;
-        while (current < end - 1 && current[repeat_count] == current_contents) {
-          repeat_count++;
-        }
-        current += repeat_count;
-        bytes_processed_so_far_ += repeat_count * kPointerSize;
-        if (repeat_count > kMaxRepeats) {
-          sink_->Put(kRepeat, "SerializeRepeats");
-          sink_->PutInt(repeat_count, "SerializeRepeats");
-        } else {
-          sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
-        }
-      } else {
-        serializer_->SerializeObject(current_contents, kPlain, kStartOfObject);
-        bytes_processed_so_far_ += kPointerSize;
-        current++;
-      }
+      serializer_->SerializeObject(*current, kPlain, kStartOfObject);
+      bytes_processed_so_far_ += kPointerSize;
+      current++;
     }
   }
 }
 
 
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
-  Object** current = rinfo->target_object_address();
-
-  OutputRawData(rinfo->target_address_address());
-  HowToCode representation = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
-  serializer_->SerializeObject(*current, representation, kStartOfObject);
-  bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
 void Serializer::ObjectSerializer::VisitExternalReferences(Address* start,
                                                            Address* end) {
   Address references_start = reinterpret_cast<Address>(start);
@@ -1519,20 +1367,6 @@
 }
 
 
-void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
-  Address references_start = rinfo->target_address_address();
-  OutputRawData(references_start);
-
-  Address* current = rinfo->target_reference_address();
-  int representation = rinfo->IsCodedSpecially() ?
-                       kFromCode + kStartOfObject : kPlain + kStartOfObject;
-  sink_->Put(kExternalReference + representation, "ExternalRef");
-  int reference_id = serializer_->EncodeExternalReference(*current);
-  sink_->PutInt(reference_id, "reference id");
-  bytes_processed_so_far_ += rinfo->target_address_size();
-}
-
-
 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
   Address target_start = rinfo->target_address_address();
   OutputRawData(target_start);
@@ -1586,7 +1420,7 @@
     if (!source->IsUndefined()) {
       ExternalAsciiString* string = ExternalAsciiString::cast(source);
       typedef v8::String::ExternalAsciiStringResource Resource;
-      const Resource* resource = string->resource();
+      Resource* resource = string->resource();
       if (resource == *resource_pointer) {
         sink_->Put(kNativesStringResource, "NativesStringResource");
         sink_->PutSection(i, "NativesStringResourceEnd");
@@ -1684,8 +1518,8 @@
     // serialized address.
     CHECK(IsPowerOf2(Page::kPageSize));
     int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
-    CHECK(size <= SpaceAreaSize(space));
-    if (used_in_this_page + size > SpaceAreaSize(space)) {
+    CHECK(size <= Page::kObjectAreaSize);
+    if (used_in_this_page + size > Page::kObjectAreaSize) {
       *new_page = true;
       fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
     }
@@ -1696,13 +1530,4 @@
 }
 
 
-int Serializer::SpaceAreaSize(int space) {
-  if (space == CODE_SPACE) {
-    return isolate_->memory_allocator()->CodePageAreaSize();
-  } else {
-    return Page::kPageSize - Page::kObjectStartOffset;
-  }
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
index f50e23e..66d6fb5 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -187,6 +187,24 @@
 };
 
 
+// It is very common to have a reference to objects at certain offsets in the
+// heap.  These offsets have been determined experimentally.  We code
+// references to such objects in a single byte that encodes the way the pointer
+// is written (only plain pointers allowed), the space number and the offset.
+// This only works for objects in the first page of a space.  Don't use this for
+// things in newspace since it bypasses the write barrier.
+
+static const int k64 = (sizeof(uintptr_t) - 4) / 4;
+
+#define COMMON_REFERENCE_PATTERNS(f)                               \
+  f(kNumberOfSpaces, 2, (11 - k64))                                \
+  f((kNumberOfSpaces + 1), 2, 0)                                   \
+  f((kNumberOfSpaces + 2), 2, (142 - 16 * k64))                    \
+  f((kNumberOfSpaces + 3), 2, (74 - 15 * k64))                     \
+  f((kNumberOfSpaces + 4), 2, 5)                                   \
+  f((kNumberOfSpaces + 5), 1, 135)                                 \
+  f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
+
 #define COMMON_RAW_LENGTHS(f)        \
   f(1, 1)  \
   f(2, 2)  \
@@ -220,15 +238,14 @@
     kRootArray = 0x9,               // Object is found in root array.
     kPartialSnapshotCache = 0xa,    // Object is in the cache.
     kExternalReference = 0xb,       // Pointer to an external reference.
-    kSkip = 0xc,                    // Skip a pointer sized cell.
-    // 0xd-0xf                         Free.
+    // 0xc-0xf                         Free.
     kBackref = 0x10,                 // Object is described relative to end.
     // 0x11-0x18                       One per space.
-    // 0x19-0x1f                       Free.
+    // 0x19-0x1f                       Common backref offsets.
     kFromStart = 0x20,              // Object is described relative to start.
     // 0x21-0x28                       One per space.
     // 0x29-0x2f                       Free.
-    // 0x30-0x3f                       Used by misc. tags below.
+    // 0x30-0x3f                       Used by misc tags below.
     kPointedToMask = 0x3f
   };
 
@@ -261,29 +278,9 @@
   // is referred to from external strings in the snapshot.
   static const int kNativesStringResource = 0x71;
   static const int kNewPage = 0x72;
-  static const int kRepeat = 0x73;
-  static const int kConstantRepeat = 0x74;
-  // 0x74-0x7f            Repeat last word (subtract 0x73 to get the count).
-  static const int kMaxRepeats = 0x7f - 0x73;
-  static int CodeForRepeats(int repeats) {
-    ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
-    return 0x73 + repeats;
-  }
-  static int RepeatsForCode(int byte_code) {
-    ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
-    return byte_code - 0x73;
-  }
-  static const int kRootArrayLowConstants = 0xb0;
-  // 0xb0-0xbf            Things from the first 16 elements of the root array.
-  static const int kRootArrayHighConstants = 0xf0;
-  // 0xf0-0xff            Things from the next 16 elements of the root array.
-  static const int kRootArrayNumberOfConstantEncodings = 0x20;
-  static const int kRootArrayNumberOfLowConstantEncodings = 0x10;
-  static int RootArrayConstantFromByteCode(int byte_code) {
-    int constant = (byte_code & 0xf) | ((byte_code & 0x40) >> 2);
-    ASSERT(constant >= 0 && constant < kRootArrayNumberOfConstantEncodings);
-    return constant;
-  }
+  // 0x73-0x7f                            Free.
+  // 0xb0-0xbf                            Free.
+  // 0xf0-0xff                            Free.
 
 
   static const int kLargeData = LAST_SPACE;
@@ -341,6 +338,10 @@
   // Deserialize a single object and the objects reachable from it.
   void DeserializePartial(Object** root);
 
+#ifdef DEBUG
+  virtual void Synchronize(const char* tag);
+#endif
+
  private:
   virtual void VisitPointers(Object** start, Object** end);
 
@@ -352,13 +353,7 @@
     UNREACHABLE();
   }
 
-  // Fills in some heap data in an area from start to end (non-inclusive).  The
-  // space id is used for the write barrier.  The object_address is the address
-  // of the object we are writing into, or NULL if we are not writing into an
-  // object, i.e. if we are writing a series of tagged values that are not on
-  // the heap.
-  void ReadChunk(
-      Object** start, Object** end, int space, Address object_address);
+  void ReadChunk(Object** start, Object** end, int space, Address address);
   HeapObject* GetAddressFromStart(int space);
   inline HeapObject* GetAddressFromEnd(int space);
   Address Allocate(int space_number, Space* space, int size);
@@ -479,19 +474,14 @@
   static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
   static bool enabled() { return serialization_enabled_; }
   SerializationAddressMapper* address_mapper() { return &address_mapper_; }
-  void PutRoot(
-      int index, HeapObject* object, HowToCode how, WhereToPoint where);
+#ifdef DEBUG
+  virtual void Synchronize(const char* tag);
+#endif
 
  protected:
   static const int kInvalidRootIndex = -1;
-
-  int RootIndex(HeapObject* heap_object, HowToCode from);
+  virtual int RootIndex(HeapObject* heap_object) = 0;
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
-  intptr_t root_index_wave_front() { return root_index_wave_front_; }
-  void set_root_index_wave_front(intptr_t value) {
-    ASSERT(value >= root_index_wave_front_);
-    root_index_wave_front_ = value;
-  }
 
   class ObjectSerializer : public ObjectVisitor {
    public:
@@ -507,9 +497,7 @@
         bytes_processed_so_far_(0) { }
     void Serialize();
     void VisitPointers(Object** start, Object** end);
-    void VisitEmbeddedPointer(RelocInfo* target);
     void VisitExternalReferences(Address* start, Address* end);
-    void VisitExternalReference(RelocInfo* rinfo);
     void VisitCodeTarget(RelocInfo* target);
     void VisitCodeEntry(Address entry_address);
     void VisitGlobalPropertyCell(RelocInfo* rinfo);
@@ -556,9 +544,6 @@
     return external_reference_encoder_->Encode(addr);
   }
 
-  int SpaceAreaSize(int space);
-
-  Isolate* isolate_;
   // Keep track of the fullness of each space in order to generate
   // relative addresses for back references.  Large objects are
   // just numbered sequentially since relative addresses make no
@@ -572,12 +557,10 @@
   static bool too_late_to_enable_now_;
   int large_object_total_;
   SerializationAddressMapper address_mapper_;
-  intptr_t root_index_wave_front_;
 
   friend class ObjectSerializer;
   friend class Deserializer;
 
- private:
   DISALLOW_COPY_AND_ASSIGN(Serializer);
 };
 
@@ -588,7 +571,6 @@
                     SnapshotByteSink* sink)
     : Serializer(sink),
       startup_serializer_(startup_snapshot_serializer) {
-    set_root_index_wave_front(Heap::kStrongRootListLength);
   }
 
   // Serialize the objects reachable from a single object pointer.
@@ -598,6 +580,7 @@
                                WhereToPoint where_to_point);
 
  protected:
+  virtual int RootIndex(HeapObject* o);
   virtual int PartialSnapshotCacheIndex(HeapObject* o);
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
     // Scripts should be referred only through shared function infos.  We can't
@@ -607,7 +590,7 @@
     ASSERT(!o->IsScript());
     return o->IsString() || o->IsSharedFunctionInfo() ||
            o->IsHeapNumber() || o->IsCode() ||
-           o->IsScopeInfo() ||
+           o->IsSerializedScopeInfo() ||
            o->map() == HEAP->fixed_cow_array_map();
   }
 
@@ -622,14 +605,14 @@
   explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
     // Clear the cache of objects used by the partial snapshot.  After the
     // strong roots have been serialized we can create a partial snapshot
-    // which will repopulate the cache with objects needed by that partial
+    // which will repopulate the cache with objects neede by that partial
     // snapshot.
     Isolate::Current()->set_serialize_partial_snapshot_cache_length(0);
   }
   // Serialize the current state of the heap.  The order is:
   // 1) Strong references.
   // 2) Partial snapshot cache.
-  // 3) Weak references (e.g. the symbol table).
+  // 3) Weak references (eg the symbol table).
   virtual void SerializeStrongReferences();
   virtual void SerializeObject(Object* o,
                                HowToCode how_to_code,
@@ -641,6 +624,7 @@
   }
 
  private:
+  virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
     return false;
   }
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 3709009..35d7224 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,253 +37,398 @@
 
 
 // -----------------------------------------------------------------------------
-// Bitmap
-
-void Bitmap::Clear(MemoryChunk* chunk) {
-  Bitmap* bitmap = chunk->markbits();
-  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
-  chunk->ResetLiveBytes();
-}
-
-
-// -----------------------------------------------------------------------------
 // PageIterator
 
-
-PageIterator::PageIterator(PagedSpace* space)
-    : space_(space),
-      prev_page_(&space->anchor_),
-      next_page_(prev_page_->next_page()) { }
-
-
 bool PageIterator::has_next() {
-  return next_page_ != &space_->anchor_;
+  return prev_page_ != stop_page_;
 }
 
 
 Page* PageIterator::next() {
   ASSERT(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
+  prev_page_ = (prev_page_ == NULL)
+               ? space_->first_page_
+               : prev_page_->next_page();
   return prev_page_;
 }
 
 
 // -----------------------------------------------------------------------------
-// NewSpacePageIterator
+// Page
 
-
-NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
-    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
-      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
-      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
-
-NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
-    : prev_page_(space->anchor()),
-      next_page_(prev_page_->next_page()),
-      last_page_(prev_page_->prev_page()) { }
-
-NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
-    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
-      next_page_(NewSpacePage::FromAddress(start)),
-      last_page_(NewSpacePage::FromLimit(limit)) {
-  SemiSpace::AssertValidRange(start, limit);
+Page* Page::next_page() {
+  return heap_->isolate()->memory_allocator()->GetNextPage(this);
 }
 
 
-bool NewSpacePageIterator::has_next() {
-  return prev_page_ != last_page_;
+Address Page::AllocationTop() {
+  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
+  return owner->PageAllocationTop(this);
 }
 
 
-NewSpacePage* NewSpacePageIterator::next() {
-  ASSERT(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
-  return prev_page_;
-}
-
-
-// -----------------------------------------------------------------------------
-// HeapObjectIterator
-HeapObject* HeapObjectIterator::FromCurrentPage() {
-  while (cur_addr_ != cur_end_) {
-    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
-      cur_addr_ = space_->limit();
-      continue;
-    }
-    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
-    cur_addr_ += obj_size;
-    ASSERT(cur_addr_ <= cur_end_);
-    if (!obj->IsFiller()) {
-      ASSERT_OBJECT_SIZE(obj_size);
-      return obj;
-    }
+Address Page::AllocationWatermark() {
+  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
+  if (this == owner->AllocationTopPage()) {
+    return owner->top();
   }
-  return NULL;
+  return address() + AllocationWatermarkOffset();
+}
+
+
+uint32_t Page::AllocationWatermarkOffset() {
+  return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
+                               kAllocationWatermarkOffsetShift);
+}
+
+
+void Page::SetAllocationWatermark(Address allocation_watermark) {
+  if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
+    // When iterating intergenerational references during scavenge
+    // we might decide to promote an encountered young object.
+    // We will allocate a space for such an object and put it
+    // into the promotion queue to process it later.
+    // If space for object was allocated somewhere beyond allocation
+    // watermark this might cause garbage pointers to appear under allocation
+    // watermark. To avoid visiting them during dirty regions iteration
+    // which might be still in progress we store a valid allocation watermark
+    // value and mark this page as having an invalid watermark.
+    SetCachedAllocationWatermark(AllocationWatermark());
+    InvalidateWatermark(true);
+  }
+
+  flags_ = (flags_ & kFlagsMask) |
+           Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
+  ASSERT(AllocationWatermarkOffset()
+         == static_cast<uint32_t>(Offset(allocation_watermark)));
+}
+
+
+void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
+  mc_first_forwarded = allocation_watermark;
+}
+
+
+Address Page::CachedAllocationWatermark() {
+  return mc_first_forwarded;
+}
+
+
+uint32_t Page::GetRegionMarks() {
+  return dirty_regions_;
+}
+
+
+void Page::SetRegionMarks(uint32_t marks) {
+  dirty_regions_ = marks;
+}
+
+
+int Page::GetRegionNumberForAddress(Address addr) {
+  // Each page is divided into 256 byte regions. Each region has a corresponding
+  // dirty mark bit in the page header. Region can contain intergenerational
+  // references iff its dirty mark is set.
+  // A normal 8K page contains exactly 32 regions so all region marks fit
+  // into 32-bit integer field. To calculate a region number we just divide
+  // offset inside page by region size.
+  // A large page can contain more then 32 regions. But we want to avoid
+  // additional write barrier code for distinguishing between large and normal
+  // pages so we just ignore the fact that addr points into a large page and
+  // calculate region number as if addr pointed into a normal 8K page. This way
+  // we get a region number modulo 32 so for large pages several regions might
+  // be mapped to a single dirty mark.
+  ASSERT_PAGE_ALIGNED(this->address());
+  STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
+
+  // We are using masking with kPageAlignmentMask instead of Page::Offset()
+  // to get an offset to the beginning of 8K page containing addr not to the
+  // beginning of actual page which can be bigger then 8K.
+  intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
+  return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
+}
+
+
+uint32_t Page::GetRegionMaskForAddress(Address addr) {
+  return 1 << GetRegionNumberForAddress(addr);
+}
+
+
+uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
+  uint32_t result = 0;
+  static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
+  if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
+    result = kAllRegionsDirtyMarks;
+  } else if (length_in_bytes > 0) {
+    int start_region = GetRegionNumberForAddress(start);
+    int end_region =
+        GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
+    uint32_t start_mask = (~0) << start_region;
+    uint32_t end_mask = ~((~1) << end_region);
+    result = start_mask & end_mask;
+    // if end_region < start_region, the mask is ored.
+    if (result == 0) result = start_mask | end_mask;
+  }
+#ifdef DEBUG
+  if (FLAG_enable_slow_asserts) {
+    uint32_t expected = 0;
+    for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
+      expected |= GetRegionMaskForAddress(a);
+    }
+    ASSERT(expected == result);
+  }
+#endif
+  return result;
+}
+
+
+void Page::MarkRegionDirty(Address address) {
+  SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
+}
+
+
+bool Page::IsRegionDirty(Address address) {
+  return GetRegionMarks() & GetRegionMaskForAddress(address);
+}
+
+
+void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
+  int rstart = GetRegionNumberForAddress(start);
+  int rend = GetRegionNumberForAddress(end);
+
+  if (reaches_limit) {
+    end += 1;
+  }
+
+  if ((rend - rstart) == 0) {
+    return;
+  }
+
+  uint32_t bitmask = 0;
+
+  if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
+      || (start == ObjectAreaStart())) {
+    // First region is fully covered
+    bitmask = 1 << rstart;
+  }
+
+  while (++rstart < rend) {
+    bitmask |= 1 << rstart;
+  }
+
+  if (bitmask) {
+    SetRegionMarks(GetRegionMarks() & ~bitmask);
+  }
+}
+
+
+void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
+  heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
+}
+
+
+bool Page::IsWatermarkValid() {
+  return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
+      heap_->page_watermark_invalidated_mark_;
+}
+
+
+void Page::InvalidateWatermark(bool value) {
+  if (value) {
+    flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+             heap_->page_watermark_invalidated_mark_;
+  } else {
+    flags_ =
+        (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
+        (heap_->page_watermark_invalidated_mark_ ^
+         (1 << WATERMARK_INVALIDATED));
+  }
+
+  ASSERT(IsWatermarkValid() == !value);
+}
+
+
+bool Page::GetPageFlag(PageFlag flag) {
+  return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
+}
+
+
+void Page::SetPageFlag(PageFlag flag, bool value) {
+  if (value) {
+    flags_ |= static_cast<intptr_t>(1 << flag);
+  } else {
+    flags_ &= ~static_cast<intptr_t>(1 << flag);
+  }
+}
+
+
+void Page::ClearPageFlags() {
+  flags_ = 0;
+}
+
+
+void Page::ClearGCFields() {
+  InvalidateWatermark(true);
+  SetAllocationWatermark(ObjectAreaStart());
+  if (heap_->gc_state() == Heap::SCAVENGE) {
+    SetCachedAllocationWatermark(ObjectAreaStart());
+  }
+  SetRegionMarks(kAllRegionsCleanMarks);
+}
+
+
+bool Page::WasInUseBeforeMC() {
+  return GetPageFlag(WAS_IN_USE_BEFORE_MC);
+}
+
+
+void Page::SetWasInUseBeforeMC(bool was_in_use) {
+  SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
+}
+
+
+bool Page::IsLargeObjectPage() {
+  return !GetPageFlag(IS_NORMAL_PAGE);
+}
+
+
+void Page::SetIsLargeObjectPage(bool is_large_object_page) {
+  SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
+}
+
+Executability Page::PageExecutability() {
+  return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+}
+
+
+void Page::SetPageExecutability(Executability executable) {
+  SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
 }
 
 
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
-  OS::Protect(start, size);
+void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
+  address_ = a;
+  size_ = s;
+  owner_ = o;
+  executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
+  owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
 }
 
 
-void MemoryAllocator::Unprotect(Address start,
-                                size_t size,
-                                Executability executable) {
-  OS::Unprotect(start, size, executable);
+bool MemoryAllocator::IsValidChunk(int chunk_id) {
+  if (!IsValidChunkId(chunk_id)) return false;
+
+  ChunkInfo& c = chunks_[chunk_id];
+  return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
 }
 
 
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Protect(chunks_[id].address(), chunks_[id].size());
+bool MemoryAllocator::IsValidChunkId(int chunk_id) {
+  return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
 }
 
 
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
-                chunks_[id].owner()->executable() == EXECUTABLE);
+bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
+  ASSERT(p->is_valid());
+
+  int chunk_id = GetChunkId(p);
+  if (!IsValidChunkId(chunk_id)) return false;
+
+  ChunkInfo& c = chunks_[chunk_id];
+  return (c.address() <= p->address()) &&
+         (p->address() < c.address() + c.size()) &&
+         (space == c.owner());
 }
 
-#endif
+
+Page* MemoryAllocator::GetNextPage(Page* p) {
+  ASSERT(p->is_valid());
+  intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
+  return Page::FromAddress(AddressFrom<Address>(raw_addr));
+}
+
+
+int MemoryAllocator::GetChunkId(Page* p) {
+  ASSERT(p->is_valid());
+  return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
+}
+
+
+void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
+  ASSERT(prev->is_valid());
+  int chunk_id = GetChunkId(prev);
+  ASSERT_PAGE_ALIGNED(next->address());
+  prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
+}
+
+
+PagedSpace* MemoryAllocator::PageOwner(Page* page) {
+  int chunk_id = GetChunkId(page);
+  ASSERT(IsValidChunk(chunk_id));
+  return chunks_[chunk_id].owner();
+}
+
+
+bool MemoryAllocator::InInitialChunk(Address address) {
+  if (initial_chunk_ == NULL) return false;
+
+  Address start = static_cast<Address>(initial_chunk_->address());
+  return (start <= address) && (address < start + initial_chunk_->size());
+}
 
 
 // --------------------------------------------------------------------------
 // PagedSpace
-Page* Page::Initialize(Heap* heap,
-                       MemoryChunk* chunk,
-                       Executability executable,
-                       PagedSpace* owner) {
-  Page* page = reinterpret_cast<Page*>(chunk);
-  ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
-  ASSERT(chunk->owner() == owner);
-  owner->IncreaseCapacity(page->area_size());
-  owner->Free(page->area_start(), page->area_size());
-
-  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-
-  return page;
-}
-
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
   if (!p->is_valid()) return false;
-  return p->owner() == this;
-}
-
-
-void MemoryChunk::set_scan_on_scavenge(bool scan) {
-  if (scan) {
-    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
-    SetFlag(SCAN_ON_SCAVENGE);
-  } else {
-    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
-    ClearFlag(SCAN_ON_SCAVENGE);
-  }
-  heap_->incremental_marking()->SetOldSpacePageFlags(this);
-}
-
-
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
-  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
-      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
-  if (maybe->owner() != NULL) return maybe;
-  LargeObjectIterator iterator(HEAP->lo_space());
-  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
-    // Fixed arrays are the only pointer-containing objects in large object
-    // space.
-    if (o->IsFixedArray()) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
-      if (chunk->Contains(addr)) {
-        return chunk;
-      }
-    }
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-PointerChunkIterator::PointerChunkIterator(Heap* heap)
-    : state_(kOldPointerState),
-      old_pointer_iterator_(heap->old_pointer_space()),
-      map_iterator_(heap->map_space()),
-      lo_iterator_(heap->lo_space()) { }
-
-
-Page* Page::next_page() {
-  ASSERT(next_chunk()->owner() == owner());
-  return static_cast<Page*>(next_chunk());
-}
-
-
-Page* Page::prev_page() {
-  ASSERT(prev_chunk()->owner() == owner());
-  return static_cast<Page*>(prev_chunk());
-}
-
-
-void Page::set_next_page(Page* page) {
-  ASSERT(page->owner() == owner());
-  set_next_chunk(page);
-}
-
-
-void Page::set_prev_page(Page* page) {
-  ASSERT(page->owner() == owner());
-  set_prev_chunk(page);
+  return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
 }
 
 
 // Try linear allocation in the page of alloc_info's allocation top.  Does
-// not contain slow case logic (e.g. move to the next page or try free list
+// not contain slow case logic (eg, move to the next page or try free list
 // allocation) so it can be used by all the allocation functions and for all
 // the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
-  Address current_top = allocation_info_.top;
+HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
+                                         int size_in_bytes) {
+  Address current_top = alloc_info->top;
   Address new_top = current_top + size_in_bytes;
-  if (new_top > allocation_info_.limit) return NULL;
+  if (new_top > alloc_info->limit) return NULL;
 
-  allocation_info_.top = new_top;
+  alloc_info->top = new_top;
+  ASSERT(alloc_info->VerifyPagedAllocation());
+  accounting_stats_.AllocateBytes(size_in_bytes);
   return HeapObject::FromAddress(current_top);
 }
 
 
 // Raw allocation.
 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
-  HeapObject* object = AllocateLinearly(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
-    }
-    return object;
-  }
-
-  object = free_list_.Allocate(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
-    }
-    return object;
-  }
+  ASSERT(HasBeenSetup());
+  ASSERT_OBJECT_SIZE(size_in_bytes);
+  HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
+  if (object != NULL) return object;
 
   object = SlowAllocateRaw(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
-    }
-    return object;
-  }
+  if (object != NULL) return object;
+
+  return Failure::RetryAfterGC(identity());
+}
+
+
+// Reallocating (and promoting) objects during a compacting collection.
+MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
+  ASSERT(HasBeenSetup());
+  ASSERT_OBJECT_SIZE(size_in_bytes);
+  HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+  if (object != NULL) return object;
+
+  object = SlowMCAllocateRaw(size_in_bytes);
+  if (object != NULL) return object;
 
   return Failure::RetryAfterGC(identity());
 }
@@ -292,29 +437,27 @@
 // -----------------------------------------------------------------------------
 // NewSpace
 
+MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
+                                           AllocationInfo* alloc_info) {
+  Address new_top = alloc_info->top + size_in_bytes;
+  if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
 
-MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top;
-  if (allocation_info_.limit - old_top < size_in_bytes) {
-    return SlowAllocateRaw(size_in_bytes);
-  }
-
-  Object* obj = HeapObject::FromAddress(allocation_info_.top);
-  allocation_info_.top += size_in_bytes;
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
+  Object* obj = HeapObject::FromAddress(alloc_info->top);
+  alloc_info->top = new_top;
+#ifdef DEBUG
+  SemiSpace* space =
+      (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
+  ASSERT(space->low() <= alloc_info->top
+         && alloc_info->top <= space->high()
+         && alloc_info->limit == space->high());
+#endif
   return obj;
 }
 
 
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
-  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-  return static_cast<LargePage*>(chunk);
-}
-
-
 intptr_t LargeObjectSpace::Available() {
-  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
+  return LargeObjectChunk::ObjectSizeFor(
+      heap()->isolate()->memory_allocator()->Available());
 }
 
 
@@ -324,23 +467,16 @@
   ASSERT(string->IsSeqString());
   ASSERT(string->address() + StringType::SizeFor(string->length()) ==
          allocation_info_.top);
-  Address old_top = allocation_info_.top;
   allocation_info_.top =
       string->address() + StringType::SizeFor(length);
   string->set_length(length);
-  if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
-    int delta = static_cast<int>(old_top - allocation_info_.top);
-    MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta);
-  }
 }
 
 
 bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  Map* map = object->map();
-  Heap* heap = object->GetHeap();
-  return map == heap->raw_unchecked_free_space_map()
-      || map == heap->raw_unchecked_one_pointer_filler_map()
-      || map == heap->raw_unchecked_two_pointer_filler_map();
+  return object->map() == HEAP->raw_unchecked_byte_array_map()
+      || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
+      || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
 }
 
 } }  // namespace v8::internal
diff --git a/src/spaces.cc b/src/spaces.cc
index defe352..97c6d2a 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -35,87 +35,112 @@
 namespace v8 {
 namespace internal {
 
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+  ASSERT((space).low() <= (info).top                  \
+         && (info).top <= (space).high()              \
+         && (info).limit == (space).high())
 
 // ----------------------------------------------------------------------------
 // HeapObjectIterator
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  // You can't actually iterate over the anchor page.  It is not a real page,
-  // just an anchor for the double linked page list.  Initialize as if we have
-  // reached the end of the anchor page, then the first iteration will move on
-  // to the first page.
-  Initialize(space,
-             NULL,
-             NULL,
-             kAllPagesInSpace,
-             NULL);
+  Initialize(space->bottom(), space->top(), NULL);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
                                        HeapObjectCallback size_func) {
-  // You can't actually iterate over the anchor page.  It is not a real page,
-  // just an anchor for the double linked page list.  Initialize the current
-  // address and end as NULL, then the first iteration will move on
-  // to the first page.
-  Initialize(space,
-             NULL,
-             NULL,
-             kAllPagesInSpace,
-             size_func);
+  Initialize(space->bottom(), space->top(), size_func);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
+  Initialize(start, space->top(), NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
+                                       HeapObjectCallback size_func) {
+  Initialize(start, space->top(), size_func);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(Page* page,
                                        HeapObjectCallback size_func) {
-  Space* owner = page->owner();
-  ASSERT(owner == HEAP->old_pointer_space() ||
-         owner == HEAP->old_data_space() ||
-         owner == HEAP->map_space() ||
-         owner == HEAP->cell_space() ||
-         owner == HEAP->code_space());
-  Initialize(reinterpret_cast<PagedSpace*>(owner),
-             page->area_start(),
-             page->area_end(),
-             kOnePageOnly,
-             size_func);
-  ASSERT(page->WasSweptPrecisely());
+  Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
 }
 
 
-void HeapObjectIterator::Initialize(PagedSpace* space,
-                                    Address cur, Address end,
-                                    HeapObjectIterator::PageMode mode,
+void HeapObjectIterator::Initialize(Address cur, Address end,
                                     HeapObjectCallback size_f) {
-  // Check that we actually can iterate this space.
-  ASSERT(!space->was_swept_conservatively());
-
-  space_ = space;
   cur_addr_ = cur;
-  cur_end_ = end;
-  page_mode_ = mode;
+  end_addr_ = end;
+  end_page_ = Page::FromAllocationTop(end);
   size_func_ = size_f;
+  Page* p = Page::FromAllocationTop(cur_addr_);
+  cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
+
+#ifdef DEBUG
+  Verify();
+#endif
 }
 
 
-// We have hit the end of the page and should advance to the next block of
-// objects.  This happens at the end of the page.
-bool HeapObjectIterator::AdvanceToNextPage() {
-  ASSERT(cur_addr_ == cur_end_);
-  if (page_mode_ == kOnePageOnly) return false;
-  Page* cur_page;
-  if (cur_addr_ == NULL) {
-    cur_page = space_->anchor();
-  } else {
-    cur_page = Page::FromAddress(cur_addr_ - 1);
-    ASSERT(cur_addr_ == cur_page->area_end());
-  }
+HeapObject* HeapObjectIterator::FromNextPage() {
+  if (cur_addr_ == end_addr_) return NULL;
+
+  Page* cur_page = Page::FromAllocationTop(cur_addr_);
   cur_page = cur_page->next_page();
-  if (cur_page == space_->anchor()) return false;
-  cur_addr_ = cur_page->area_start();
-  cur_end_ = cur_page->area_end();
-  ASSERT(cur_page->WasSweptPrecisely());
-  return true;
+  ASSERT(cur_page->is_valid());
+
+  cur_addr_ = cur_page->ObjectAreaStart();
+  cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
+
+  if (cur_addr_ == end_addr_) return NULL;
+  ASSERT(cur_addr_ < cur_limit_);
+#ifdef DEBUG
+  Verify();
+#endif
+  return FromCurrentPage();
+}
+
+
+#ifdef DEBUG
+void HeapObjectIterator::Verify() {
+  Page* p = Page::FromAllocationTop(cur_addr_);
+  ASSERT(p == Page::FromAllocationTop(cur_limit_));
+  ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// PageIterator
+
+PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
+  prev_page_ = NULL;
+  switch (mode) {
+    case PAGES_IN_USE:
+      stop_page_ = space->AllocationTopPage();
+      break;
+    case PAGES_USED_BY_MC:
+      stop_page_ = space->MCRelocationTopPage();
+      break;
+    case ALL_PAGES:
+#ifdef DEBUG
+      // Verify that the cached last page in the space is actually the
+      // last page.
+      for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
+        if (!p->next_page()->is_valid()) {
+          ASSERT(space->last_page_ == p);
+        }
+      }
+#endif
+      stop_page_ = space->last_page_;
+      break;
+  }
 }
 
 
@@ -132,7 +157,7 @@
 }
 
 
-bool CodeRange::SetUp(const size_t requested) {
+bool CodeRange::Setup(const size_t requested) {
   ASSERT(code_range_ == NULL);
 
   code_range_ = new VirtualMemory(requested);
@@ -146,12 +171,7 @@
   // We are sure that we have mapped a block of requested addresses.
   ASSERT(code_range_->size() == requested);
   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
-  Address base = reinterpret_cast<Address>(code_range_->address());
-  Address aligned_base =
-      RoundUp(reinterpret_cast<Address>(code_range_->address()),
-              MemoryChunk::kAlignment);
-  size_t size = code_range_->size() - (aligned_base - base);
-  allocation_list_.Add(FreeBlock(aligned_base, size));
+  allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
   current_allocation_block_index_ = 0;
   return true;
 }
@@ -208,8 +228,7 @@
 
 
 
-Address CodeRange::AllocateRawMemory(const size_t requested,
-                                     size_t* allocated) {
+void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
   ASSERT(current_allocation_block_index_ < allocation_list_.length());
   if (requested > allocation_list_[current_allocation_block_index_].size) {
     // Find an allocation block large enough.  This function call may
@@ -217,19 +236,14 @@
     GetNextAllocationBlock(requested);
   }
   // Commit the requested memory at the start of the current allocation block.
-  size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
+  *allocated = RoundUp(requested, Page::kPageSize);
   FreeBlock current = allocation_list_[current_allocation_block_index_];
-  if (aligned_requested >= (current.size - Page::kPageSize)) {
+  if (*allocated >= current.size - Page::kPageSize) {
     // Don't leave a small free block, useless for a large object or chunk.
     *allocated = current.size;
-  } else {
-    *allocated = aligned_requested;
   }
   ASSERT(*allocated <= current.size);
-  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!MemoryAllocator::CommitCodePage(code_range_,
-                                       current.start,
-                                       *allocated)) {
+  if (!code_range_->Commit(current.start, *allocated, true)) {
     *allocated = 0;
     return NULL;
   }
@@ -242,8 +256,7 @@
 }
 
 
-void CodeRange::FreeRawMemory(Address address, size_t length) {
-  ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
+void CodeRange::FreeRawMemory(void* address, size_t length) {
   free_list_.Add(FreeBlock(address, length));
   code_range_->Uncommit(address, length);
 }
@@ -261,379 +274,149 @@
 // MemoryAllocator
 //
 
+// 270 is an estimate based on the static default heap size of a pair of 256K
+// semispaces and a 64M old generation.
+const int kEstimatedNumberOfChunks = 270;
+
+
 MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
       capacity_(0),
       capacity_executable_(0),
       size_(0),
-      size_executable_(0) {
+      size_executable_(0),
+      initial_chunk_(NULL),
+      chunks_(kEstimatedNumberOfChunks),
+      free_chunk_ids_(kEstimatedNumberOfChunks),
+      max_nof_chunks_(0),
+      top_(0) {
 }
 
 
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
+void MemoryAllocator::Push(int free_chunk_id) {
+  ASSERT(max_nof_chunks_ > 0);
+  ASSERT(top_ < max_nof_chunks_);
+  free_chunk_ids_[top_++] = free_chunk_id;
+}
+
+
+int MemoryAllocator::Pop() {
+  ASSERT(top_ > 0);
+  return free_chunk_ids_[--top_];
+}
+
+
+bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
   capacity_ = RoundUp(capacity, Page::kPageSize);
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   ASSERT_GE(capacity_, capacity_executable_);
 
+  // Over-estimate the size of chunks_ array.  It assumes the expansion of old
+  // space is always in the unit of a chunk (kChunkSize) except the last
+  // expansion.
+  //
+  // Due to alignment, allocated space might be one page less than required
+  // number (kPagesPerChunk) of pages for old spaces.
+  //
+  // Reserve two chunk ids for semispaces, one for map space, one for old
+  // space, and one for code space.
+  max_nof_chunks_ =
+      static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
+  if (max_nof_chunks_ > kMaxNofChunks) return false;
+
   size_ = 0;
   size_executable_ = 0;
-
+  ChunkInfo info;  // uninitialized element.
+  for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
+    chunks_.Add(info);
+    free_chunk_ids_.Add(i);
+  }
+  top_ = max_nof_chunks_;
   return true;
 }
 
 
 void MemoryAllocator::TearDown() {
-  // Check that spaces were torn down before MemoryAllocator.
-  ASSERT(size_ == 0);
-  // TODO(gc) this will be true again when we fix FreeMemory.
-  // ASSERT(size_executable_ == 0);
+  for (int i = 0; i < max_nof_chunks_; i++) {
+    if (chunks_[i].address() != NULL) DeleteChunk(i);
+  }
+  chunks_.Clear();
+  free_chunk_ids_.Clear();
+
+  if (initial_chunk_ != NULL) {
+    LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
+    delete initial_chunk_;
+    initial_chunk_ = NULL;
+  }
+
+  ASSERT(top_ == max_nof_chunks_);  // all chunks are free
+  top_ = 0;
   capacity_ = 0;
   capacity_executable_ = 0;
+  size_ = 0;
+  max_nof_chunks_ = 0;
 }
 
 
-void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
-                                 Executability executable) {
-  // TODO(gc) make code_range part of memory allocator?
-  ASSERT(reservation->IsReserved());
-  size_t size = reservation->size();
-  ASSERT(size_ >= size);
-  size_ -= size;
+void* MemoryAllocator::AllocateRawMemory(const size_t requested,
+                                         size_t* allocated,
+                                         Executability executable) {
+  if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
+    return NULL;
+  }
 
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
+  void* mem;
   if (executable == EXECUTABLE) {
-    ASSERT(size_executable_ >= size);
-    size_executable_ -= size;
-  }
-  // Code which is part of the code-range does not have its own VirtualMemory.
-  ASSERT(!isolate_->code_range()->contains(
-      static_cast<Address>(reservation->address())));
-  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
-  reservation->Release();
-}
-
-
-void MemoryAllocator::FreeMemory(Address base,
-                                 size_t size,
-                                 Executability executable) {
-  // TODO(gc) make code_range part of memory allocator?
-  ASSERT(size_ >= size);
-  size_ -= size;
-
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
-  if (executable == EXECUTABLE) {
-    ASSERT(size_executable_ >= size);
-    size_executable_ -= size;
-  }
-  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
-    ASSERT(executable == EXECUTABLE);
-    isolate_->code_range()->FreeRawMemory(base, size);
-  } else {
-    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
-    bool result = VirtualMemory::ReleaseRegion(base, size);
-    USE(result);
-    ASSERT(result);
-  }
-}
-
-
-Address MemoryAllocator::ReserveAlignedMemory(size_t size,
-                                              size_t alignment,
-                                              VirtualMemory* controller) {
-  VirtualMemory reservation(size, alignment);
-
-  if (!reservation.IsReserved()) return NULL;
-  size_ += reservation.size();
-  Address base = RoundUp(static_cast<Address>(reservation.address()),
-                         alignment);
-  controller->TakeControl(&reservation);
-  return base;
-}
-
-
-Address MemoryAllocator::AllocateAlignedMemory(size_t size,
-                                               size_t alignment,
-                                               Executability executable,
-                                               VirtualMemory* controller) {
-  VirtualMemory reservation;
-  Address base = ReserveAlignedMemory(size, alignment, &reservation);
-  if (base == NULL) return NULL;
-
-  if (executable == EXECUTABLE) {
-    CommitCodePage(&reservation, base, size);
-  } else {
-    if (!reservation.Commit(base,
-                            size,
-                            executable == EXECUTABLE)) {
-      return NULL;
-    }
-  }
-
-  controller->TakeControl(&reservation);
-  return base;
-}
-
-
-void Page::InitializeAsAnchor(PagedSpace* owner) {
-  set_owner(owner);
-  set_prev_page(this);
-  set_next_page(this);
-}
-
-
-NewSpacePage* NewSpacePage::Initialize(Heap* heap,
-                                       Address start,
-                                       SemiSpace* semi_space) {
-  Address area_start = start + NewSpacePage::kObjectStartOffset;
-  Address area_end = start + Page::kPageSize;
-
-  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
-                                               start,
-                                               Page::kPageSize,
-                                               area_start,
-                                               area_end,
-                                               NOT_EXECUTABLE,
-                                               semi_space);
-  chunk->set_next_chunk(NULL);
-  chunk->set_prev_chunk(NULL);
-  chunk->initialize_scan_on_scavenge(true);
-  bool in_to_space = (semi_space->id() != kFromSpace);
-  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
-                             : MemoryChunk::IN_FROM_SPACE);
-  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
-                                       : MemoryChunk::IN_TO_SPACE));
-  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
-  heap->incremental_marking()->SetNewSpacePageFlags(page);
-  return page;
-}
-
-
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
-  set_owner(semi_space);
-  set_next_chunk(this);
-  set_prev_chunk(this);
-  // Flags marks this invalid page as not being in new-space.
-  // All real new-space pages will be in new-space.
-  SetFlags(0, ~0);
-}
-
-
-MemoryChunk* MemoryChunk::Initialize(Heap* heap,
-                                     Address base,
-                                     size_t size,
-                                     Address area_start,
-                                     Address area_end,
-                                     Executability executable,
-                                     Space* owner) {
-  MemoryChunk* chunk = FromAddress(base);
-
-  ASSERT(base == chunk->address());
-
-  chunk->heap_ = heap;
-  chunk->size_ = size;
-  chunk->area_start_ = area_start;
-  chunk->area_end_ = area_end;
-  chunk->flags_ = 0;
-  chunk->set_owner(owner);
-  chunk->InitializeReservedMemory();
-  chunk->slots_buffer_ = NULL;
-  chunk->skip_list_ = NULL;
-  chunk->ResetLiveBytes();
-  Bitmap::Clear(chunk);
-  chunk->initialize_scan_on_scavenge(false);
-  chunk->SetFlag(WAS_SWEPT_PRECISELY);
-
-  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
-  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
-
-  if (executable == EXECUTABLE) {
-    chunk->SetFlag(IS_EXECUTABLE);
-  }
-
-  if (owner == heap->old_data_space()) {
-    chunk->SetFlag(CONTAINS_ONLY_DATA);
-  }
-
-  return chunk;
-}
-
-
-void MemoryChunk::InsertAfter(MemoryChunk* other) {
-  next_chunk_ = other->next_chunk_;
-  prev_chunk_ = other;
-  other->next_chunk_->prev_chunk_ = this;
-  other->next_chunk_ = this;
-}
-
-
-void MemoryChunk::Unlink() {
-  if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
-    heap_->decrement_scan_on_scavenge_pages();
-    ClearFlag(SCAN_ON_SCAVENGE);
-  }
-  next_chunk_->prev_chunk_ = prev_chunk_;
-  prev_chunk_->next_chunk_ = next_chunk_;
-  prev_chunk_ = NULL;
-  next_chunk_ = NULL;
-}
-
-
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
-                                            Executability executable,
-                                            Space* owner) {
-  size_t chunk_size;
-  Heap* heap = isolate_->heap();
-  Address base = NULL;
-  VirtualMemory reservation;
-  Address area_start = NULL;
-  Address area_end = NULL;
-  if (executable == EXECUTABLE) {
-    chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
-                         OS::CommitPageSize()) + CodePageGuardSize();
-
     // Check executable memory limit.
-    if (size_executable_ + chunk_size > capacity_executable_) {
+    if (size_executable_ + requested >
+        static_cast<size_t>(capacity_executable_)) {
       LOG(isolate_,
           StringEvent("MemoryAllocator::AllocateRawMemory",
                       "V8 Executable Allocation capacity exceeded"));
       return NULL;
     }
-
     // Allocate executable memory either from code range or from the
     // OS.
     if (isolate_->code_range()->exists()) {
-      base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
-      ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
-                       MemoryChunk::kAlignment));
-      if (base == NULL) return NULL;
-      size_ += chunk_size;
-      // Update executable memory size.
-      size_executable_ += chunk_size;
+      mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
     } else {
-      base = AllocateAlignedMemory(chunk_size,
-                                   MemoryChunk::kAlignment,
-                                   executable,
-                                   &reservation);
-      if (base == NULL) return NULL;
-      // Update executable memory size.
-      size_executable_ += reservation.size();
+      mem = OS::Allocate(requested, allocated, true);
     }
-
-#ifdef DEBUG
-    ZapBlock(base, CodePageGuardStartOffset());
-    ZapBlock(base + CodePageAreaStartOffset(), body_size);
-#endif
-    area_start = base + CodePageAreaStartOffset();
-    area_end = area_start + body_size;
+    // Update executable memory size.
+    size_executable_ += static_cast<int>(*allocated);
   } else {
-    chunk_size = MemoryChunk::kObjectStartOffset + body_size;
-    base = AllocateAlignedMemory(chunk_size,
-                                 MemoryChunk::kAlignment,
-                                 executable,
-                                 &reservation);
-
-    if (base == NULL) return NULL;
+    mem = OS::Allocate(requested, allocated, false);
+  }
+  int alloced = static_cast<int>(*allocated);
+  size_ += alloced;
 
 #ifdef DEBUG
-    ZapBlock(base, chunk_size);
+  ZapBlock(reinterpret_cast<Address>(mem), alloced);
 #endif
-
-    area_start = base + Page::kObjectStartOffset;
-    area_end = base + chunk_size;
-  }
-
-  isolate_->counters()->memory_allocated()->
-      Increment(static_cast<int>(chunk_size));
-
-  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
-  if (owner != NULL) {
-    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  }
-
-  MemoryChunk* result = MemoryChunk::Initialize(heap,
-                                                base,
-                                                chunk_size,
-                                                area_start,
-                                                area_end,
-                                                executable,
-                                                owner);
-  result->set_reserved_memory(&reservation);
-  return result;
+  isolate_->counters()->memory_allocated()->Increment(alloced);
+  return mem;
 }
 
 
-Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
+void MemoryAllocator::FreeRawMemory(void* mem,
+                                    size_t length,
                                     Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
-                                     executable,
-                                     owner);
-
-  if (chunk == NULL) return NULL;
-
-  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
-}
-
-
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
-                                              Executability executable,
-                                              Space* owner) {
-  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
-  if (chunk == NULL) return NULL;
-  return LargePage::Initialize(isolate_->heap(), chunk);
-}
-
-
-void MemoryAllocator::Free(MemoryChunk* chunk) {
-  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
-  if (chunk->owner() != NULL) {
-    ObjectSpace space =
-        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
-    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
-  }
-
-  isolate_->heap()->RememberUnmappedPage(
-      reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
-
-  delete chunk->slots_buffer();
-  delete chunk->skip_list();
-
-  VirtualMemory* reservation = chunk->reserved_memory();
-  if (reservation->IsReserved()) {
-    FreeMemory(reservation, chunk->executable());
-  } else {
-    FreeMemory(chunk->address(),
-               chunk->size(),
-               chunk->executable());
-  }
-}
-
-
-bool MemoryAllocator::CommitBlock(Address start,
-                                  size_t size,
-                                  Executability executable) {
-  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
 #ifdef DEBUG
-  ZapBlock(start, size);
+  // Do not try to zap the guard page.
+  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+  ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
 #endif
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-  return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
-  if (!VirtualMemory::UncommitRegion(start, size)) return false;
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-  return true;
-}
-
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
-  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
-    Memory::Address_at(start + s) = kZapValue;
+  if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
+    isolate_->code_range()->FreeRawMemory(mem, length);
+  } else {
+    OS::Free(mem, length);
   }
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
+  size_ -= static_cast<int>(length);
+  if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
+
+  ASSERT(size_ >= 0);
+  ASSERT(size_executable_ >= 0);
 }
 
 
@@ -682,6 +465,269 @@
   UNREACHABLE();
 }
 
+void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
+  ASSERT(initial_chunk_ == NULL);
+
+  initial_chunk_ = new VirtualMemory(requested);
+  CHECK(initial_chunk_ != NULL);
+  if (!initial_chunk_->IsReserved()) {
+    delete initial_chunk_;
+    initial_chunk_ = NULL;
+    return NULL;
+  }
+
+  // We are sure that we have mapped a block of requested addresses.
+  ASSERT(initial_chunk_->size() == requested);
+  LOG(isolate_,
+      NewEvent("InitialChunk", initial_chunk_->address(), requested));
+  size_ += static_cast<int>(requested);
+  return initial_chunk_->address();
+}
+
+
+static int PagesInChunk(Address start, size_t size) {
+  // The first page starts on the first page-aligned address from start onward
+  // and the last page ends on the last page-aligned address before
+  // start+size.  Page::kPageSize is a power of two so we can divide by
+  // shifting.
+  return static_cast<int>((RoundDown(start + size, Page::kPageSize)
+      - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
+}
+
+
+Page* MemoryAllocator::AllocatePages(int requested_pages,
+                                     int* allocated_pages,
+                                     PagedSpace* owner) {
+  if (requested_pages <= 0) return Page::FromAddress(NULL);
+  size_t chunk_size = requested_pages * Page::kPageSize;
+
+  void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
+  if (chunk == NULL) return Page::FromAddress(NULL);
+  LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
+
+  *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+
+  // We may 'lose' a page due to alignment.
+  ASSERT(*allocated_pages >= kPagesPerChunk - 1);
+
+  size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
+
+  // Check that we got at least one page that we can use.
+  if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
+    FreeRawMemory(chunk,
+                  chunk_size,
+                  owner->executable());
+    LOG(isolate_, DeleteEvent("PagedChunk", chunk));
+    return Page::FromAddress(NULL);
+  }
+
+  if (guard_size != 0) {
+    OS::Guard(chunk, guard_size);
+    chunk_size -= guard_size;
+    chunk = static_cast<Address>(chunk) + guard_size;
+    --*allocated_pages;
+  }
+
+  int chunk_id = Pop();
+  chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
+
+  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+  PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+  Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
+
+  return new_pages;
+}
+
+
+Page* MemoryAllocator::CommitPages(Address start, size_t size,
+                                   PagedSpace* owner, int* num_pages) {
+  ASSERT(start != NULL);
+  *num_pages = PagesInChunk(start, size);
+  ASSERT(*num_pages > 0);
+  ASSERT(initial_chunk_ != NULL);
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));
+  if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
+    return Page::FromAddress(NULL);
+  }
+#ifdef DEBUG
+  ZapBlock(start, size);
+#endif
+  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
+
+  // So long as we correctly overestimated the number of chunks we should not
+  // run out of chunk ids.
+  CHECK(!OutOfChunkIds());
+  int chunk_id = Pop();
+  chunks_[chunk_id].init(start, size, owner);
+  return InitializePagesInChunk(chunk_id, *num_pages, owner);
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start,
+                                  size_t size,
+                                  Executability executable) {
+  ASSERT(start != NULL);
+  ASSERT(size > 0);
+  ASSERT(initial_chunk_ != NULL);
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));
+
+  if (!initial_chunk_->Commit(start, size, executable)) return false;
+#ifdef DEBUG
+  ZapBlock(start, size);
+#endif
+  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
+  return true;
+}
+
+
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+  ASSERT(start != NULL);
+  ASSERT(size > 0);
+  ASSERT(initial_chunk_ != NULL);
+  ASSERT(InInitialChunk(start));
+  ASSERT(InInitialChunk(start + size - 1));
+
+  if (!initial_chunk_->Uncommit(start, size)) return false;
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+  return true;
+}
+
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+    Memory::Address_at(start + s) = kZapValue;
+  }
+}
+
+
+Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+                                              PagedSpace* owner) {
+  ASSERT(IsValidChunk(chunk_id));
+  ASSERT(pages_in_chunk > 0);
+
+  Address chunk_start = chunks_[chunk_id].address();
+
+  Address low = RoundUp(chunk_start, Page::kPageSize);
+
+#ifdef DEBUG
+  size_t chunk_size = chunks_[chunk_id].size();
+  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+  ASSERT(pages_in_chunk <=
+        ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
+#endif
+
+  Address page_addr = low;
+  for (int i = 0; i < pages_in_chunk; i++) {
+    Page* p = Page::FromAddress(page_addr);
+    p->heap_ = owner->heap();
+    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+    p->InvalidateWatermark(true);
+    p->SetIsLargeObjectPage(false);
+    p->SetAllocationWatermark(p->ObjectAreaStart());
+    p->SetCachedAllocationWatermark(p->ObjectAreaStart());
+    page_addr += Page::kPageSize;
+  }
+
+  // Set the next page of the last page to 0.
+  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+  last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+  return Page::FromAddress(low);
+}
+
+
+Page* MemoryAllocator::FreePages(Page* p) {
+  if (!p->is_valid()) return p;
+
+  // Find the first page in the same chunk as 'p'
+  Page* first_page = FindFirstPageInSameChunk(p);
+  Page* page_to_return = Page::FromAddress(NULL);
+
+  if (p != first_page) {
+    // Find the last page in the same chunk as 'prev'.
+    Page* last_page = FindLastPageInSameChunk(p);
+    first_page = GetNextPage(last_page);  // first page in next chunk
+
+    // set the next_page of last_page to NULL
+    SetNextPage(last_page, Page::FromAddress(NULL));
+    page_to_return = p;  // return 'p' when exiting
+  }
+
+  while (first_page->is_valid()) {
+    int chunk_id = GetChunkId(first_page);
+    ASSERT(IsValidChunk(chunk_id));
+
+    // Find the first page of the next chunk before deleting this chunk.
+    first_page = GetNextPage(FindLastPageInSameChunk(first_page));
+
+    // Free the current chunk.
+    DeleteChunk(chunk_id);
+  }
+
+  return page_to_return;
+}
+
+
+void MemoryAllocator::FreeAllPages(PagedSpace* space) {
+  for (int i = 0, length = chunks_.length(); i < length; i++) {
+    if (chunks_[i].owner() == space) {
+      DeleteChunk(i);
+    }
+  }
+}
+
+
+void MemoryAllocator::DeleteChunk(int chunk_id) {
+  ASSERT(IsValidChunk(chunk_id));
+
+  ChunkInfo& c = chunks_[chunk_id];
+
+  // We cannot free a chunk contained in the initial chunk because it was not
+  // allocated with AllocateRawMemory.  Instead we uncommit the virtual
+  // memory.
+  if (InInitialChunk(c.address())) {
+    // TODO(1240712): VirtualMemory::Uncommit has a return value which
+    // is ignored here.
+    initial_chunk_->Uncommit(c.address(), c.size());
+    Counters* counters = isolate_->counters();
+    counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
+  } else {
+    LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
+    ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
+    size_t size = c.size();
+    size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
+    FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
+    PerformAllocationCallback(space, kAllocationActionFree, size);
+  }
+  c.init(NULL, 0, NULL);
+  Push(chunk_id);
+}
+
+
+Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
+  int chunk_id = GetChunkId(p);
+  ASSERT(IsValidChunk(chunk_id));
+
+  Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
+  return Page::FromAddress(low);
+}
+
+
+Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
+  int chunk_id = GetChunkId(p);
+  ASSERT(IsValidChunk(chunk_id));
+
+  Address chunk_start = chunks_[chunk_id].address();
+  size_t chunk_size = chunks_[chunk_id].size();
+
+  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
+  ASSERT(chunk_start <= p->address() && p->address() < high);
+
+  return Page::FromAddress(high - Page::kPageSize);
+}
+
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
@@ -694,75 +740,74 @@
 #endif
 
 
-int MemoryAllocator::CodePageGuardStartOffset() {
-  // We are guarding code pages: the first OS page after the header
-  // will be protected as non-writable.
-  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
-}
+void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
+                                                 Page** first_page,
+                                                 Page** last_page,
+                                                 Page** last_page_in_use) {
+  Page* first = NULL;
+  Page* last = NULL;
 
+  for (int i = 0, length = chunks_.length(); i < length; i++) {
+    ChunkInfo& chunk = chunks_[i];
 
-int MemoryAllocator::CodePageGuardSize() {
-  return static_cast<int>(OS::CommitPageSize());
-}
-
-
-int MemoryAllocator::CodePageAreaStartOffset() {
-  // We are guarding code pages: the first OS page after the header
-  // will be protected as non-writable.
-  return CodePageGuardStartOffset() + CodePageGuardSize();
-}
-
-
-int MemoryAllocator::CodePageAreaEndOffset() {
-  // We are guarding code pages: the last OS page will be protected as
-  // non-writable.
-  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
-}
-
-
-bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
-                                     Address start,
-                                     size_t size) {
-  // Commit page header (not executable).
-  if (!vm->Commit(start,
-                  CodePageGuardStartOffset(),
-                  false)) {
-    return false;
+    if (chunk.owner() == space) {
+      if (first == NULL) {
+        Address low = RoundUp(chunk.address(), Page::kPageSize);
+        first = Page::FromAddress(low);
+      }
+      last = RelinkPagesInChunk(i,
+                                chunk.address(),
+                                chunk.size(),
+                                last,
+                                last_page_in_use);
+    }
   }
 
-  // Create guard page after the header.
-  if (!vm->Guard(start + CodePageGuardStartOffset())) {
-    return false;
+  if (first_page != NULL) {
+    *first_page = first;
   }
 
-  // Commit page body (executable).
-  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
-  if (!vm->Commit(start + CodePageAreaStartOffset(),
-                  area_size,
-                  true)) {
-    return false;
+  if (last_page != NULL) {
+    *last_page = last;
   }
-
-  // Create guard page after the allocatable area.
-  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
-    return false;
-  }
-
-  return true;
 }
 
 
-// -----------------------------------------------------------------------------
-// MemoryChunk implementation
+Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
+                                          Address chunk_start,
+                                          size_t chunk_size,
+                                          Page* prev,
+                                          Page** last_page_in_use) {
+  Address page_addr = RoundUp(chunk_start, Page::kPageSize);
+  int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
 
-void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
-  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
-  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
-    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+  if (prev->is_valid()) {
+    SetNextPage(prev, Page::FromAddress(page_addr));
   }
-  chunk->IncrementLiveBytes(by);
+
+  for (int i = 0; i < pages_in_chunk; i++) {
+    Page* p = Page::FromAddress(page_addr);
+    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+    page_addr += Page::kPageSize;
+
+    p->InvalidateWatermark(true);
+    if (p->WasInUseBeforeMC()) {
+      *last_page_in_use = p;
+    }
+  }
+
+  // Set the next page of the last page to 0.
+  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+  last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+  if (last_page->WasInUseBeforeMC()) {
+    *last_page_in_use = last_page;
+  }
+
+  return last_page;
 }
 
+
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
@@ -770,171 +815,296 @@
                        intptr_t max_capacity,
                        AllocationSpace id,
                        Executability executable)
-    : Space(heap, id, executable),
-      free_list_(this),
-      was_swept_conservatively_(false),
-      first_unswept_page_(Page::FromAddress(NULL)),
-      unswept_free_bytes_(0) {
-  if (id == CODE_SPACE) {
-    area_size_ = heap->isolate()->memory_allocator()->
-        CodePageAreaSize();
-  } else {
-    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
-  }
+    : Space(heap, id, executable) {
   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
-      * AreaSize();
+                  * Page::kObjectAreaSize;
   accounting_stats_.Clear();
 
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
 
-  anchor_.InitializeAsAnchor(this);
+  mc_forwarding_info_.top = NULL;
+  mc_forwarding_info_.limit = NULL;
 }
 
 
-bool PagedSpace::SetUp() {
+bool PagedSpace::Setup(Address start, size_t size) {
+  if (HasBeenSetup()) return false;
+
+  int num_pages = 0;
+  // Try to use the virtual memory range passed to us.  If it is too small to
+  // contain at least one page, ignore it and allocate instead.
+  int pages_in_chunk = PagesInChunk(start, size);
+  if (pages_in_chunk > 0) {
+    first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
+        RoundUp(start, Page::kPageSize),
+        Page::kPageSize * pages_in_chunk,
+        this, &num_pages);
+  } else {
+    int requested_pages =
+        Min(MemoryAllocator::kPagesPerChunk,
+            static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
+    first_page_ =
+        Isolate::Current()->memory_allocator()->AllocatePages(
+            requested_pages, &num_pages, this);
+    if (!first_page_->is_valid()) return false;
+  }
+
+  // We are sure that the first page is valid and that we have at least one
+  // page.
+  ASSERT(first_page_->is_valid());
+  ASSERT(num_pages > 0);
+  accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
+  ASSERT(Capacity() <= max_capacity_);
+
+  // Sequentially clear region marks in the newly allocated
+  // pages and cache the current last page in the space.
+  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
+    last_page_ = p;
+  }
+
+  // Use first_page_ for allocation.
+  SetAllocationInfo(&allocation_info_, first_page_);
+
+  page_list_is_chunk_ordered_ = true;
+
   return true;
 }
 
 
-bool PagedSpace::HasBeenSetUp() {
-  return true;
+bool PagedSpace::HasBeenSetup() {
+  return (Capacity() > 0);
 }
 
 
 void PagedSpace::TearDown() {
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    heap()->isolate()->memory_allocator()->Free(iterator.next());
-  }
-  anchor_.set_next_page(&anchor_);
-  anchor_.set_prev_page(&anchor_);
+  Isolate::Current()->memory_allocator()->FreeAllPages(this);
+  first_page_ = NULL;
   accounting_stats_.Clear();
 }
 
 
+void PagedSpace::MarkAllPagesClean() {
+  PageIterator it(this, PageIterator::ALL_PAGES);
+  while (it.has_next()) {
+    it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
+  }
+}
+
+
 MaybeObject* PagedSpace::FindObject(Address addr) {
-  // Note: this function can only be called on precisely swept spaces.
+  // Note: this function can only be called before or after mark-compact GC
+  // because it accesses map pointers.
   ASSERT(!heap()->mark_compact_collector()->in_use());
 
   if (!Contains(addr)) return Failure::Exception();
 
   Page* p = Page::FromAddress(addr);
-  HeapObjectIterator it(p, NULL);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    Address cur = obj->address();
+  ASSERT(IsUsed(p));
+  Address cur = p->ObjectAreaStart();
+  Address end = p->AllocationTop();
+  while (cur < end) {
+    HeapObject* obj = HeapObject::FromAddress(cur);
     Address next = cur + obj->Size();
     if ((cur <= addr) && (addr < next)) return obj;
+    cur = next;
   }
 
   UNREACHABLE();
   return Failure::Exception();
 }
 
-bool PagedSpace::CanExpand() {
-  ASSERT(max_capacity_ % AreaSize() == 0);
-  ASSERT(Capacity() % AreaSize() == 0);
+
+bool PagedSpace::IsUsed(Page* page) {
+  PageIterator it(this, PageIterator::PAGES_IN_USE);
+  while (it.has_next()) {
+    if (page == it.next()) return true;
+  }
+  return false;
+}
+
+
+void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
+  alloc_info->top = p->ObjectAreaStart();
+  alloc_info->limit = p->ObjectAreaEnd();
+  ASSERT(alloc_info->VerifyPagedAllocation());
+}
+
+
+void PagedSpace::MCResetRelocationInfo() {
+  // Set page indexes.
+  int i = 0;
+  PageIterator it(this, PageIterator::ALL_PAGES);
+  while (it.has_next()) {
+    Page* p = it.next();
+    p->mc_page_index = i++;
+  }
+
+  // Set mc_forwarding_info_ to the first page in the space.
+  SetAllocationInfo(&mc_forwarding_info_, first_page_);
+  // All the bytes in the space are 'available'.  We will rediscover
+  // allocated and wasted bytes during GC.
+  accounting_stats_.Reset();
+}
+
+
+int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
+#ifdef DEBUG
+  // The Contains function considers the address at the beginning of a
+  // page in the page, MCSpaceOffsetForAddress considers it is in the
+  // previous page.
+  if (Page::IsAlignedToPageSize(addr)) {
+    ASSERT(Contains(addr - kPointerSize));
+  } else {
+    ASSERT(Contains(addr));
+  }
+#endif
+
+  // If addr is at the end of a page, it belongs to previous page
+  Page* p = Page::IsAlignedToPageSize(addr)
+            ? Page::FromAllocationTop(addr)
+            : Page::FromAddress(addr);
+  int index = p->mc_page_index;
+  return (index * Page::kPageSize) + p->Offset(addr);
+}
+
+
+// Slow case for reallocating and promoting objects during a compacting
+// collection.  This function is not space-specific.
+HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
+  Page* current_page = TopPageOf(mc_forwarding_info_);
+  if (!current_page->next_page()->is_valid()) {
+    if (!Expand(current_page)) {
+      return NULL;
+    }
+  }
+
+  // There are surely more pages in the space now.
+  ASSERT(current_page->next_page()->is_valid());
+  // We do not add the top of page block for current page to the space's
+  // free list---the block may contain live objects so we cannot write
+  // bookkeeping information to it.  Instead, we will recover top of page
+  // blocks when we move objects to their new locations.
+  //
+  // We do however write the allocation pointer to the page.  The encoding
+  // of forwarding addresses is as an offset in terms of live bytes, so we
+  // need quick access to the allocation top of each page to decode
+  // forwarding addresses.
+  current_page->SetAllocationWatermark(mc_forwarding_info_.top);
+  current_page->next_page()->InvalidateWatermark(true);
+  SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
+  return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
+}
+
+
+bool PagedSpace::Expand(Page* last_page) {
+  ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
+  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
 
   if (Capacity() == max_capacity_) return false;
 
   ASSERT(Capacity() < max_capacity_);
+  // Last page must be valid and its next page is invalid.
+  ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
 
-  // Are we going to exceed capacity for this space?
-  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
+  int available_pages =
+      static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
+  // We don't want to have to handle small chunks near the end so if there are
+  // not kPagesPerChunk pages available without exceeding the max capacity then
+  // act as if memory has run out.
+  if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
 
-  return true;
-}
+  int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
+  Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
+      desired_pages, &desired_pages, this);
+  if (!p->is_valid()) return false;
 
-bool PagedSpace::Expand() {
-  if (!CanExpand()) return false;
-
-  Page* p = heap()->isolate()->memory_allocator()->
-      AllocatePage(this, executable());
-  if (p == NULL) return false;
-
+  accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
   ASSERT(Capacity() <= max_capacity_);
 
-  p->InsertAfter(anchor_.prev_page());
+  heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
+
+  // Sequentially clear region marks of new pages and and cache the
+  // new last page in the space.
+  while (p->is_valid()) {
+    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
+    last_page_ = p;
+    p = p->next_page();
+  }
 
   return true;
 }
 
 
+#ifdef DEBUG
 int PagedSpace::CountTotalPages() {
-  PageIterator it(this);
   int count = 0;
-  while (it.has_next()) {
-    it.next();
+  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
     count++;
   }
   return count;
 }
+#endif
 
 
-void PagedSpace::ReleasePage(Page* page) {
-  ASSERT(page->LiveBytes() == 0);
-  ASSERT(AreaSize() == page->area_size());
-
-  // Adjust list of unswept pages if the page is the head of the list.
-  if (first_unswept_page_ == page) {
-    first_unswept_page_ = page->next_page();
-    if (first_unswept_page_ == anchor()) {
-      first_unswept_page_ = Page::FromAddress(NULL);
-    }
+void PagedSpace::Shrink() {
+  if (!page_list_is_chunk_ordered_) {
+    // We can't shrink space if pages is not chunk-ordered
+    // (see comment for class MemoryAllocator for definition).
+    return;
   }
 
-  if (page->WasSwept()) {
-    intptr_t size = free_list_.EvictFreeListItems(page);
-    accounting_stats_.AllocateBytes(size);
-    ASSERT_EQ(AreaSize(), static_cast<int>(size));
-  } else {
-    DecreaseUnsweptFreeBytes(page);
+  // Release half of free pages.
+  Page* top_page = AllocationTopPage();
+  ASSERT(top_page->is_valid());
+
+  // Count the number of pages we would like to free.
+  int pages_to_free = 0;
+  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+    pages_to_free++;
   }
 
-  if (Page::FromAllocationTop(allocation_info_.top) == page) {
-    allocation_info_.top = allocation_info_.limit = NULL;
+  // Free pages after top_page.
+  Page* p = heap()->isolate()->memory_allocator()->
+      FreePages(top_page->next_page());
+  heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
+
+  // Find out how many pages we failed to free and update last_page_.
+  // Please note pages can only be freed in whole chunks.
+  last_page_ = top_page;
+  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
+    pages_to_free--;
+    last_page_ = p;
   }
 
-  page->Unlink();
-  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
-    heap()->isolate()->memory_allocator()->Free(page);
-  } else {
-    heap()->QueueMemoryChunkForFree(page);
-  }
-
-  ASSERT(Capacity() > 0);
-  ASSERT(Capacity() % AreaSize() == 0);
-  accounting_stats_.ShrinkSpace(AreaSize());
+  accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
+  ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
 }
 
 
-void PagedSpace::ReleaseAllUnusedPages() {
-  PageIterator it(this);
-  while (it.has_next()) {
-    Page* page = it.next();
-    if (!page->WasSwept()) {
-      if (page->LiveBytes() == 0) ReleasePage(page);
-    } else {
-      HeapObject* obj = HeapObject::FromAddress(page->area_start());
-      if (obj->IsFreeSpace() &&
-          FreeSpace::cast(obj)->size() == AreaSize()) {
-        // Sometimes we allocate memory from free list but don't
-        // immediately initialize it (e.g. see PagedSpace::ReserveSpace
-        // called from Heap::ReserveSpace that can cause GC before
-        // reserved space is actually initialized).
-        // Thus we can't simply assume that obj represents a valid
-        // node still owned by a free list
-        // Instead we should verify that the page is fully covered
-        // by free list items.
-        FreeList::SizeStats sizes;
-        free_list_.CountFreeListItems(page, &sizes);
-        if (sizes.Total() == AreaSize()) {
-          ReleasePage(page);
-        }
-      }
-    }
+bool PagedSpace::EnsureCapacity(int capacity) {
+  if (Capacity() >= capacity) return true;
+
+  // Start from the allocation top and loop to the last page in the space.
+  Page* last_page = AllocationTopPage();
+  Page* next_page = last_page->next_page();
+  while (next_page->is_valid()) {
+    last_page = heap()->isolate()->memory_allocator()->
+        FindLastPageInSameChunk(next_page);
+    next_page = last_page->next_page();
   }
-  heap()->FreeQueuedChunks();
+
+  // Expand the space until it has the required capacity or expansion fails.
+  do {
+    if (!Expand(last_page)) return false;
+    ASSERT(last_page->next_page()->is_valid());
+    last_page =
+        heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
+            last_page->next_page());
+  } while (Capacity() < capacity);
+
+  return true;
 }
 
 
@@ -944,52 +1114,61 @@
 
 
 #ifdef DEBUG
+// We do not assume that the PageIterator works, because it depends on the
+// invariants we are checking during verification.
 void PagedSpace::Verify(ObjectVisitor* visitor) {
-  // We can only iterate over the pages if they were swept precisely.
-  if (was_swept_conservatively_) return;
+  // The allocation pointer should be valid, and it should be in a page in the
+  // space.
+  ASSERT(allocation_info_.VerifyPagedAllocation());
+  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
+  ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
 
-  bool allocation_pointer_found_in_space =
-      (allocation_info_.top == allocation_info_.limit);
-  PageIterator page_iterator(this);
-  while (page_iterator.has_next()) {
-    Page* page = page_iterator.next();
-    ASSERT(page->owner() == this);
-    if (page == Page::FromAllocationTop(allocation_info_.top)) {
-      allocation_pointer_found_in_space = true;
-    }
-    ASSERT(page->WasSweptPrecisely());
-    HeapObjectIterator it(page, NULL);
-    Address end_of_previous_object = page->area_start();
-    Address top = page->area_end();
-    int black_size = 0;
-    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-      ASSERT(end_of_previous_object <= object->address());
-
-      // The first word should be a map, and we expect all map pointers to
-      // be in map space.
-      Map* map = object->map();
-      ASSERT(map->IsMap());
-      ASSERT(heap()->map_space()->Contains(map));
-
-      // Perform space-specific object verification.
-      VerifyObject(object);
-
-      // The object itself should look OK.
-      object->Verify();
-
-      // All the interior pointers should be contained in the heap.
-      int size = object->Size();
-      object->IterateBody(map->instance_type(), size, visitor);
-      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
-        black_size += size;
+  // Loop over all the pages.
+  bool above_allocation_top = false;
+  Page* current_page = first_page_;
+  while (current_page->is_valid()) {
+    if (above_allocation_top) {
+      // We don't care what's above the allocation top.
+    } else {
+      Address top = current_page->AllocationTop();
+      if (current_page == top_page) {
+        ASSERT(top == allocation_info_.top);
+        // The next page will be above the allocation top.
+        above_allocation_top = true;
       }
 
-      ASSERT(object->address() + size <= top);
-      end_of_previous_object = object->address() + size;
+      // It should be packed with objects from the bottom to the top.
+      Address current = current_page->ObjectAreaStart();
+      while (current < top) {
+        HeapObject* object = HeapObject::FromAddress(current);
+
+        // The first word should be a map, and we expect all map pointers to
+        // be in map space.
+        Map* map = object->map();
+        ASSERT(map->IsMap());
+        ASSERT(heap()->map_space()->Contains(map));
+
+        // Perform space-specific object verification.
+        VerifyObject(object);
+
+        // The object itself should look OK.
+        object->Verify();
+
+        // All the interior pointers should be contained in the heap and
+        // have page regions covering intergenerational references should be
+        // marked dirty.
+        int size = object->Size();
+        object->IterateBody(map->instance_type(), size, visitor);
+
+        current += size;
+      }
+
+      // The allocation pointer should not be in the middle of an object.
+      ASSERT(current == top);
     }
-    ASSERT_LE(black_size, page->LiveBytes());
+
+    current_page = current_page->next_page();
   }
-  ASSERT(allocation_pointer_found_in_space);
 }
 #endif
 
@@ -998,28 +1177,18 @@
 // NewSpace implementation
 
 
-bool NewSpace::SetUp(int reserved_semispace_capacity,
-                     int maximum_semispace_capacity) {
-  // Set up new space based on the preallocated memory block defined by
+bool NewSpace::Setup(Address start, int size) {
+  // Setup new space based on the preallocated memory block defined by
   // start and size. The provided space is divided into two semi-spaces.
   // To support fast containment testing in the new space, the size of
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-
-  size_t size = 2 * reserved_semispace_capacity;
-  Address base =
-      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
-          size, size, &reservation_);
-  if (base == NULL) return false;
-
-  chunk_base_ = base;
-  chunk_size_ = static_cast<uintptr_t>(size);
-  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
+  int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
 
   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
 
-  // Allocate and set up the histogram arrays if necessary.
+  // Allocate and setup the histogram arrays if necessary.
   allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
   promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
 
@@ -1028,28 +1197,31 @@
   INSTANCE_TYPE_LIST(SET_NAME)
 #undef SET_NAME
 
-  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
-  ASSERT(static_cast<intptr_t>(chunk_size_) >=
-         2 * heap()->ReservedSemiSpaceSize());
-  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
+  ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
+  ASSERT(IsAddressAligned(start, size, 0));
 
-  to_space_.SetUp(chunk_base_,
-                  initial_semispace_capacity,
-                  maximum_semispace_capacity);
-  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
-                    initial_semispace_capacity,
-                    maximum_semispace_capacity);
-  if (!to_space_.Commit()) {
+  if (!to_space_.Setup(start,
+                       initial_semispace_capacity,
+                       maximum_semispace_capacity)) {
+    return false;
+  }
+  if (!from_space_.Setup(start + maximum_semispace_capacity,
+                         initial_semispace_capacity,
+                         maximum_semispace_capacity)) {
     return false;
   }
 
-  start_ = chunk_base_;
-  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
+  start_ = start;
+  address_mask_ = ~(size - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
 
-  ResetAllocationInfo();
+  allocation_info_.top = to_space_.low();
+  allocation_info_.limit = to_space_.high();
+  mc_forwarding_info_.top = NULL;
+  mc_forwarding_info_.limit = NULL;
 
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   return true;
 }
 
@@ -1067,34 +1239,28 @@
   start_ = NULL;
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
+  mc_forwarding_info_.top = NULL;
+  mc_forwarding_info_.limit = NULL;
 
   to_space_.TearDown();
   from_space_.TearDown();
-
-  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
-
-  ASSERT(reservation_.IsReserved());
-  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
-                                                    NOT_EXECUTABLE);
-  chunk_base_ = NULL;
-  chunk_size_ = 0;
 }
 
 
 void NewSpace::Flip() {
-  SemiSpace::Swap(&from_space_, &to_space_);
+  SemiSpace tmp = from_space_;
+  from_space_ = to_space_;
+  to_space_ = tmp;
 }
 
 
 void NewSpace::Grow() {
-  // Double the semispace size but only up to maximum capacity.
   ASSERT(Capacity() < MaximumCapacity());
-  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
-  if (to_space_.GrowTo(new_capacity)) {
-    // Only grow from space if we managed to grow to-space.
-    if (!from_space_.GrowTo(new_capacity)) {
-      // If we managed to grow to-space but couldn't grow from-space,
-      // attempt to shrink to-space.
+  if (to_space_.Grow()) {
+    // Only grow from space if we managed to grow to space.
+    if (!from_space_.Grow()) {
+      // If we managed to grow to space but couldn't grow from space,
+      // attempt to shrink to space.
       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1102,20 +1268,21 @@
       }
     }
   }
+  allocation_info_.limit = to_space_.high();
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::Shrink() {
   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
-  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
+  int rounded_new_capacity =
+      RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
   if (rounded_new_capacity < Capacity() &&
       to_space_.ShrinkTo(rounded_new_capacity))  {
-    // Only shrink from-space if we managed to shrink to-space.
-    from_space_.Reset();
+    // Only shrink from space if we managed to shrink to space.
     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
-      // If we managed to shrink to-space but couldn't shrink from
-      // space, attempt to grow to-space again.
+      // If we managed to shrink to space but couldn't shrink from
+      // space, attempt to grow to space again.
       if (!to_space_.GrowTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1123,98 +1290,36 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.page_high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::UpdateAllocationInfo() {
-  allocation_info_.top = to_space_.page_low();
-  allocation_info_.limit = to_space_.page_high();
-
-  // Lower limit during incremental marking.
-  if (heap()->incremental_marking()->IsMarking() &&
-      inline_allocation_limit_step() != 0) {
-    Address new_limit =
-        allocation_info_.top + inline_allocation_limit_step();
-    allocation_info_.limit = Min(new_limit, allocation_info_.limit);
-  }
+  allocation_info_.limit = to_space_.high();
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::ResetAllocationInfo() {
-  to_space_.Reset();
-  UpdateAllocationInfo();
-  pages_used_ = 0;
-  // Clear all mark-bits in the to-space.
-  NewSpacePageIterator it(&to_space_);
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
+  allocation_info_.top = to_space_.low();
+  allocation_info_.limit = to_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
-bool NewSpace::AddFreshPage() {
-  Address top = allocation_info_.top;
-  if (NewSpacePage::IsAtStart(top)) {
-    // The current page is already empty. Don't try to make another.
-
-    // We should only get here if someone asks to allocate more
-    // than what can be stored in a single page.
-    // TODO(gc): Change the limit on new-space allocation to prevent this
-    // from happening (all such allocations should go directly to LOSpace).
-    return false;
-  }
-  if (!to_space_.AdvancePage()) {
-    // Failed to get a new page in to-space.
-    return false;
-  }
-
-  // Clear remainder of current page.
-  Address limit = NewSpacePage::FromLimit(top)->area_end();
-  if (heap()->gc_state() == Heap::SCAVENGE) {
-    heap()->promotion_queue()->SetNewLimit(limit);
-    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
-  }
-
-  int remaining_in_page = static_cast<int>(limit - top);
-  heap()->CreateFillerObjectAt(top, remaining_in_page);
-  pages_used_++;
-  UpdateAllocationInfo();
-
-  return true;
+void NewSpace::MCResetRelocationInfo() {
+  mc_forwarding_info_.top = from_space_.low();
+  mc_forwarding_info_.limit = from_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
 }
 
 
-MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top;
-  Address new_top = old_top + size_in_bytes;
-  Address high = to_space_.page_high();
-  if (allocation_info_.limit < high) {
-    // Incremental marking has lowered the limit to get a
-    // chance to do a step.
-    allocation_info_.limit = Min(
-        allocation_info_.limit + inline_allocation_limit_step_,
-        high);
-    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated);
-    top_on_previous_step_ = new_top;
-    return AllocateRaw(size_in_bytes);
-  } else if (AddFreshPage()) {
-    // Switched to new page. Try allocating again.
-    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(bytes_allocated);
-    top_on_previous_step_ = to_space_.page_low();
-    return AllocateRaw(size_in_bytes);
-  } else {
-    return Failure::RetryAfterGC();
-  }
+void NewSpace::MCCommitRelocationInfo() {
+  // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
+  // valid allocation info for the to space.
+  allocation_info_.top = mc_forwarding_info_.top;
+  allocation_info_.limit = to_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 #ifdef DEBUG
-// We do not use the SemiSpaceIterator because verification doesn't assume
+// We do not use the SemispaceIterator because verification doesn't assume
 // that it works (it depends on the invariants we are checking).
 void NewSpace::Verify() {
   // The allocation pointer should be in the space or at the very end.
@@ -1222,57 +1327,63 @@
 
   // There should be objects packed in from the low address up to the
   // allocation pointer.
-  Address current = to_space_.first_page()->area_start();
-  CHECK_EQ(current, to_space_.space_start());
+  Address current = to_space_.low();
+  while (current < top()) {
+    HeapObject* object = HeapObject::FromAddress(current);
 
-  while (current != top()) {
-    if (!NewSpacePage::IsAtEnd(current)) {
-      // The allocation pointer should not be in the middle of an object.
-      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
-            current < top());
+    // The first word should be a map, and we expect all map pointers to
+    // be in map space.
+    Map* map = object->map();
+    ASSERT(map->IsMap());
+    ASSERT(heap()->map_space()->Contains(map));
 
-      HeapObject* object = HeapObject::FromAddress(current);
+    // The object should not be code or a map.
+    ASSERT(!object->IsMap());
+    ASSERT(!object->IsCode());
 
-      // The first word should be a map, and we expect all map pointers to
-      // be in map space.
-      Map* map = object->map();
-      CHECK(map->IsMap());
-      CHECK(heap()->map_space()->Contains(map));
+    // The object itself should look OK.
+    object->Verify();
 
-      // The object should not be code or a map.
-      CHECK(!object->IsMap());
-      CHECK(!object->IsCode());
+    // All the interior pointers should be contained in the heap.
+    VerifyPointersVisitor visitor;
+    int size = object->Size();
+    object->IterateBody(map->instance_type(), size, &visitor);
 
-      // The object itself should look OK.
-      object->Verify();
-
-      // All the interior pointers should be contained in the heap.
-      VerifyPointersVisitor visitor;
-      int size = object->Size();
-      object->IterateBody(map->instance_type(), size, &visitor);
-
-      current += size;
-    } else {
-      // At end of page, switch to next page.
-      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
-      // Next page should be valid.
-      CHECK(!page->is_anchor());
-      current = page->area_start();
-    }
+    current += size;
   }
 
-  // Check semi-spaces.
-  ASSERT_EQ(from_space_.id(), kFromSpace);
-  ASSERT_EQ(to_space_.id(), kToSpace);
-  from_space_.Verify();
-  to_space_.Verify();
+  // The allocation pointer should not be in the middle of an object.
+  ASSERT(current == top());
 }
 #endif
 
+
+bool SemiSpace::Commit() {
+  ASSERT(!is_committed());
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(
+      start_, capacity_, executable())) {
+    return false;
+  }
+  committed_ = true;
+  return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+  ASSERT(is_committed());
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+      start_, capacity_)) {
+    return false;
+  }
+  committed_ = false;
+  return true;
+}
+
+
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
-void SemiSpace::SetUp(Address start,
+bool SemiSpace::Setup(Address start,
                       int initial_capacity,
                       int maximum_capacity) {
   // Creates a space in the young generation. The constructor does not
@@ -1281,16 +1392,18 @@
   // otherwise.  In the mark-compact collector, the memory region of the from
   // space is used as the marking stack. It requires contiguous memory
   // addresses.
-  ASSERT(maximum_capacity >= Page::kPageSize);
-  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+  initial_capacity_ = initial_capacity;
   capacity_ = initial_capacity;
-  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+  maximum_capacity_ = maximum_capacity;
   committed_ = false;
+
   start_ = start;
   address_mask_ = ~(maximum_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
   object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
   age_mark_ = start_;
+
+  return Commit();
 }
 
 
@@ -1300,266 +1413,81 @@
 }
 
 
-bool SemiSpace::Commit() {
-  ASSERT(!is_committed());
-  int pages = capacity_ / Page::kPageSize;
-  Address end = start_ + maximum_capacity_;
-  Address start = end - pages * Page::kPageSize;
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
-                                                          capacity_,
-                                                          executable())) {
+bool SemiSpace::Grow() {
+  // Double the semispace size but only up to maximum capacity.
+  int maximum_extra = maximum_capacity_ - capacity_;
+  int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
+                  maximum_extra);
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(
+      high(), extra, executable())) {
     return false;
   }
-
-  NewSpacePage* page = anchor();
-  for (int i = 1; i <= pages; i++) {
-    NewSpacePage* new_page =
-      NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
-    new_page->InsertAfter(page);
-    page = new_page;
-  }
-
-  committed_ = true;
-  Reset();
-  return true;
-}
-
-
-bool SemiSpace::Uncommit() {
-  ASSERT(is_committed());
-  Address start = start_ + maximum_capacity_ - capacity_;
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
-    return false;
-  }
-  anchor()->set_next_page(anchor());
-  anchor()->set_prev_page(anchor());
-
-  committed_ = false;
+  capacity_ += extra;
   return true;
 }
 
 
 bool SemiSpace::GrowTo(int new_capacity) {
-  if (!is_committed()) {
-    if (!Commit()) return false;
-  }
-  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity <= maximum_capacity_);
   ASSERT(new_capacity > capacity_);
-  int pages_before = capacity_ / Page::kPageSize;
-  int pages_after = new_capacity / Page::kPageSize;
-
-  Address end = start_ + maximum_capacity_;
-  Address start = end - new_capacity;
   size_t delta = new_capacity - capacity_;
-
   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      start, delta, executable())) {
+      high(), delta, executable())) {
     return false;
   }
   capacity_ = new_capacity;
-  NewSpacePage* last_page = anchor()->prev_page();
-  ASSERT(last_page != anchor());
-  for (int i = pages_before + 1; i <= pages_after; i++) {
-    Address page_address = end - i * Page::kPageSize;
-    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
-                                                      page_address,
-                                                      this);
-    new_page->InsertAfter(last_page);
-    Bitmap::Clear(new_page);
-    // Duplicate the flags that was set on the old page.
-    new_page->SetFlags(last_page->GetFlags(),
-                       NewSpacePage::kCopyOnFlipFlagsMask);
-    last_page = new_page;
-  }
   return true;
 }
 
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
-  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity >= initial_capacity_);
   ASSERT(new_capacity < capacity_);
-  if (is_committed()) {
-    // Semispaces grow backwards from the end of their allocated capacity,
-    // so we find the before and after start addresses relative to the
-    // end of the space.
-    Address space_end = start_ + maximum_capacity_;
-    Address old_start = space_end - capacity_;
-    size_t delta = capacity_ - new_capacity;
-    ASSERT(IsAligned(delta, OS::AllocateAlignment()));
-
-    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
-    if (!allocator->UncommitBlock(old_start, delta)) {
-      return false;
-    }
-
-    int pages_after = new_capacity / Page::kPageSize;
-    NewSpacePage* new_last_page =
-        NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
-    new_last_page->set_next_page(anchor());
-    anchor()->set_prev_page(new_last_page);
-    ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+  size_t delta = capacity_ - new_capacity;
+  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+      high() - delta, delta)) {
+    return false;
   }
-
   capacity_ = new_capacity;
-
   return true;
 }
 
 
-void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
-  anchor_.set_owner(this);
-  // Fixup back-pointers to anchor. Address of anchor changes
-  // when we swap.
-  anchor_.prev_page()->set_next_page(&anchor_);
-  anchor_.next_page()->set_prev_page(&anchor_);
-
-  bool becomes_to_space = (id_ == kFromSpace);
-  id_ = becomes_to_space ? kToSpace : kFromSpace;
-  NewSpacePage* page = anchor_.next_page();
-  while (page != &anchor_) {
-    page->set_owner(this);
-    page->SetFlags(flags, mask);
-    if (becomes_to_space) {
-      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
-      page->SetFlag(MemoryChunk::IN_TO_SPACE);
-      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
-      page->ResetLiveBytes();
-    } else {
-      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
-      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
-    }
-    ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
-    ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
-           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
-    page = page->next_page();
-  }
-}
-
-
-void SemiSpace::Reset() {
-  ASSERT(anchor_.next_page() != &anchor_);
-  current_page_ = anchor_.next_page();
-}
-
-
-void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
-  // We won't be swapping semispaces without data in them.
-  ASSERT(from->anchor_.next_page() != &from->anchor_);
-  ASSERT(to->anchor_.next_page() != &to->anchor_);
-
-  // Swap bits.
-  SemiSpace tmp = *from;
-  *from = *to;
-  *to = tmp;
-
-  // Fixup back-pointers to the page list anchor now that its address
-  // has changed.
-  // Swap to/from-space bits on pages.
-  // Copy GC flags from old active space (from-space) to new (to-space).
-  intptr_t flags = from->current_page()->GetFlags();
-  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
-
-  from->FlipPages(0, 0);
-}
-
-
-void SemiSpace::set_age_mark(Address mark) {
-  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
-  age_mark_ = mark;
-  // Mark all pages up to the one containing mark.
-  NewSpacePageIterator it(space_start(), mark);
-  while (it.has_next()) {
-    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
-  }
-}
-
-
 #ifdef DEBUG
 void SemiSpace::Print() { }
 
 
-void SemiSpace::Verify() {
-  bool is_from_space = (id_ == kFromSpace);
-  NewSpacePage* page = anchor_.next_page();
-  CHECK(anchor_.semi_space() == this);
-  while (page != &anchor_) {
-    CHECK(page->semi_space() == this);
-    CHECK(page->InNewSpace());
-    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
-                                        : MemoryChunk::IN_TO_SPACE));
-    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
-                                         : MemoryChunk::IN_FROM_SPACE));
-    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
-    if (!is_from_space) {
-      // The pointers-from-here-are-interesting flag isn't updated dynamically
-      // on from-space pages, so it might be out of sync with the marking state.
-      if (page->heap()->incremental_marking()->IsMarking()) {
-        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
-      } else {
-        CHECK(!page->IsFlagSet(
-            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
-      }
-      // TODO(gc): Check that the live_bytes_count_ field matches the
-      // black marking on the page (if we make it match in new-space).
-    }
-    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
-    CHECK(page->prev_page()->next_page() == page);
-    page = page->next_page();
-  }
-}
-
-
-void SemiSpace::AssertValidRange(Address start, Address end) {
-  // Addresses belong to same semi-space
-  NewSpacePage* page = NewSpacePage::FromLimit(start);
-  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
-  SemiSpace* space = page->semi_space();
-  CHECK_EQ(space, end_page->semi_space());
-  // Start address is before end address, either on same page,
-  // or end address is on a later page in the linked list of
-  // semi-space pages.
-  if (page == end_page) {
-    CHECK(start <= end);
-  } else {
-    while (page != end_page) {
-      page = page->next_page();
-      CHECK_NE(page, space->anchor());
-    }
-  }
-}
+void SemiSpace::Verify() { }
 #endif
 
 
 // -----------------------------------------------------------------------------
 // SemiSpaceIterator implementation.
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
-  Initialize(space->bottom(), space->top(), NULL);
+  Initialize(space, space->bottom(), space->top(), NULL);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
                                      HeapObjectCallback size_func) {
-  Initialize(space->bottom(), space->top(), size_func);
+  Initialize(space, space->bottom(), space->top(), size_func);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
-  Initialize(start, space->top(), NULL);
+  Initialize(space, start, space->top(), NULL);
 }
 
 
-SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
-  Initialize(from, to, NULL);
-}
-
-
-void SemiSpaceIterator::Initialize(Address start,
+void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
                                    Address end,
                                    HeapObjectCallback size_func) {
-  SemiSpace::AssertValidRange(start, end);
+  ASSERT(space->ToSpaceContains(start));
+  ASSERT(space->ToSpaceLow() <= end
+         && end <= space->ToSpaceHigh());
+  space_ = &space->to_space_;
   current_ = start;
   limit_ = end;
   size_func_ = size_func;
@@ -1695,7 +1623,7 @@
 void NewSpace::CollectStatistics() {
   ClearHistograms();
   SemiSpaceIterator it(this);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
     RecordAllocation(obj);
 }
 
@@ -1771,6 +1699,7 @@
   promoted_histogram_[type].increment_bytes(obj->Size());
 }
 
+
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
@@ -1779,440 +1708,493 @@
   ASSERT(IsAligned(size_in_bytes, kPointerSize));
 
   // We write a map and possibly size information to the block.  If the block
-  // is big enough to be a FreeSpace with at least one extra word (the next
-  // pointer), we set its map to be the free space map and its size to an
+  // is big enough to be a ByteArray with at least one extra word (the next
+  // pointer), we set its map to be the byte array map and its size to an
   // appropriate array length for the desired size from HeapObject::Size().
   // If the block is too small (eg, one or two words), to hold both a size
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
-  if (size_in_bytes > FreeSpace::kHeaderSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
-    // Can't use FreeSpace::cast because it fails during deserialization.
-    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
-    this_as_free_space->set_size(size_in_bytes);
+  if (size_in_bytes > ByteArray::kHeaderSize) {
+    set_map(heap->raw_unchecked_byte_array_map());
+    // Can't use ByteArray::cast because it fails during deserialization.
+    ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
+    this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
   } else if (size_in_bytes == kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
+    set_map(heap->raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
+    set_map(heap->raw_unchecked_two_pointer_filler_map());
   } else {
     UNREACHABLE();
   }
   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
-  // deserialization because the free space map is not done yet.
+  // deserialization because the byte array map is not done yet.
 }
 
 
-FreeListNode* FreeListNode::next() {
+Address FreeListNode::next(Heap* heap) {
   ASSERT(IsFreeListNode(this));
-  if (map() == HEAP->raw_unchecked_free_space_map()) {
-    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kNextOffset));
-  } else {
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kPointerSize));
-  }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
-  ASSERT(IsFreeListNode(this));
-  if (map() == HEAP->raw_unchecked_free_space_map()) {
+  if (map() == heap->raw_unchecked_byte_array_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
+    return Memory::Address_at(address() + kNextOffset);
   } else {
-    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
+    return Memory::Address_at(address() + kPointerSize);
   }
 }
 
 
-void FreeListNode::set_next(FreeListNode* next) {
+void FreeListNode::set_next(Heap* heap, Address next) {
   ASSERT(IsFreeListNode(this));
-  // While we are booting the VM the free space map will actually be null.  So
-  // we have to make sure that we don't try to use it for anything at that
-  // stage.
-  if (map() == HEAP->raw_unchecked_free_space_map()) {
-    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    Memory::Address_at(address() + kNextOffset) =
-        reinterpret_cast<Address>(next);
+  if (map() == heap->raw_unchecked_byte_array_map()) {
+    ASSERT(Size() >= kNextOffset + kPointerSize);
+    Memory::Address_at(address() + kNextOffset) = next;
   } else {
-    Memory::Address_at(address() + kPointerSize) =
-        reinterpret_cast<Address>(next);
+    Memory::Address_at(address() + kPointerSize) = next;
   }
 }
 
 
-FreeList::FreeList(PagedSpace* owner)
-    : owner_(owner), heap_(owner->heap()) {
+OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
+  : heap_(heap),
+    owner_(owner) {
   Reset();
 }
 
 
-void FreeList::Reset() {
+void OldSpaceFreeList::Reset() {
   available_ = 0;
-  small_list_ = NULL;
-  medium_list_ = NULL;
-  large_list_ = NULL;
-  huge_list_ = NULL;
+  for (int i = 0; i < kFreeListsLength; i++) {
+    free_[i].head_node_ = NULL;
+  }
+  needs_rebuild_ = false;
+  finger_ = kHead;
+  free_[kHead].next_size_ = kEnd;
 }
 
 
-int FreeList::Free(Address start, int size_in_bytes) {
-  if (size_in_bytes == 0) return 0;
+void OldSpaceFreeList::RebuildSizeList() {
+  ASSERT(needs_rebuild_);
+  int cur = kHead;
+  for (int i = cur + 1; i < kFreeListsLength; i++) {
+    if (free_[i].head_node_ != NULL) {
+      free_[cur].next_size_ = i;
+      cur = i;
+    }
+  }
+  free_[cur].next_size_ = kEnd;
+  needs_rebuild_ = false;
+}
+
+
+int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
+#ifdef DEBUG
+  Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
+#endif
   FreeListNode* node = FreeListNode::FromAddress(start);
   node->set_size(heap_, size_in_bytes);
 
-  // Early return to drop too-small blocks on the floor.
-  if (size_in_bytes < kSmallListMin) return size_in_bytes;
-
-  // Insert other blocks at the head of a free list of the appropriate
-  // magnitude.
-  if (size_in_bytes <= kSmallListMax) {
-    node->set_next(small_list_);
-    small_list_ = node;
-  } else if (size_in_bytes <= kMediumListMax) {
-    node->set_next(medium_list_);
-    medium_list_ = node;
-  } else if (size_in_bytes <= kLargeListMax) {
-    node->set_next(large_list_);
-    large_list_ = node;
-  } else {
-    node->set_next(huge_list_);
-    huge_list_ = node;
+  // We don't use the freelists in compacting mode.  This makes it more like a
+  // GC that only has mark-sweep-compact and doesn't have a mark-sweep
+  // collector.
+  if (FLAG_always_compact) {
+    return size_in_bytes;
   }
+
+  // Early return to drop too-small blocks on the floor (one or two word
+  // blocks cannot hold a map pointer, a size field, and a pointer to the
+  // next block in the free list).
+  if (size_in_bytes < kMinBlockSize) {
+    return size_in_bytes;
+  }
+
+  // Insert other blocks at the head of an exact free list.
+  int index = size_in_bytes >> kPointerSizeLog2;
+  node->set_next(heap_, free_[index].head_node_);
+  free_[index].head_node_ = node->address();
   available_ += size_in_bytes;
-  ASSERT(IsVeryLong() || available_ == SumFreeLists());
+  needs_rebuild_ = true;
   return 0;
 }
 
 
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
-  FreeListNode* node = *list;
-
-  if (node == NULL) return NULL;
-
-  while (node != NULL &&
-         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
-    available_ -= node->Size();
-    node = node->next();
-  }
-
-  if (node != NULL) {
-    *node_size = node->Size();
-    *list = node->next();
-  } else {
-    *list = NULL;
-  }
-
-  return node;
-}
-
-
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
-  FreeListNode* node = NULL;
-
-  if (size_in_bytes <= kSmallAllocationMax) {
-    node = PickNodeFromList(&small_list_, node_size);
-    if (node != NULL) return node;
-  }
-
-  if (size_in_bytes <= kMediumAllocationMax) {
-    node = PickNodeFromList(&medium_list_, node_size);
-    if (node != NULL) return node;
-  }
-
-  if (size_in_bytes <= kLargeAllocationMax) {
-    node = PickNodeFromList(&large_list_, node_size);
-    if (node != NULL) return node;
-  }
-
-  for (FreeListNode** cur = &huge_list_;
-       *cur != NULL;
-       cur = (*cur)->next_address()) {
-    FreeListNode* cur_node = *cur;
-    while (cur_node != NULL &&
-           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
-      available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
-      cur_node = cur_node->next();
-    }
-
-    *cur = cur_node;
-    if (cur_node == NULL) break;
-
-    ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
-    int size = cur_as_free_space->Size();
-    if (size >= size_in_bytes) {
-      // Large enough node found.  Unlink it from the list.
-      node = *cur;
-      *node_size = size;
-      *cur = node->next();
-      break;
-    }
-  }
-
-  return node;
-}
-
-
-// Allocation on the old space free list.  If it succeeds then a new linear
-// allocation space has been set up with the top and limit of the space.  If
-// the allocation fails then NULL is returned, and the caller can perform a GC
-// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(int size_in_bytes) {
+MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
   ASSERT(0 < size_in_bytes);
   ASSERT(size_in_bytes <= kMaxBlockSize);
   ASSERT(IsAligned(size_in_bytes, kPointerSize));
-  // Don't free list allocate if there is linear space available.
-  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
 
-  int new_node_size = 0;
-  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
-  if (new_node == NULL) return NULL;
-
-  available_ -= new_node_size;
-  ASSERT(IsVeryLong() || available_ == SumFreeLists());
-
-  int bytes_left = new_node_size - size_in_bytes;
-  ASSERT(bytes_left >= 0);
-
-  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
-  // Mark the old linear allocation area with a free space map so it can be
-  // skipped when scanning the heap.  This also puts it back in the free list
-  // if it is big enough.
-  owner_->Free(owner_->top(), old_linear_size);
-
-#ifdef DEBUG
-  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
-    reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+  if (needs_rebuild_) RebuildSizeList();
+  int index = size_in_bytes >> kPointerSizeLog2;
+  // Check for a perfect fit.
+  if (free_[index].head_node_ != NULL) {
+    FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
+    // If this was the last block of its size, remove the size.
+    if ((free_[index].head_node_ = node->next(heap_)) == NULL)
+      RemoveSize(index);
+    available_ -= size_in_bytes;
+    *wasted_bytes = 0;
+    ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
+    return node;
   }
-#endif
-
-  owner_->heap()->incremental_marking()->OldSpaceStep(
-      size_in_bytes - old_linear_size);
-
-  // The old-space-step might have finished sweeping and restarted marking.
-  // Verify that it did not turn the page of the new node into an evacuation
-  // candidate.
-  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
-  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
-
-  // Memory in the linear allocation area is counted as allocated.  We may free
-  // a little of this again immediately - see below.
-  owner_->Allocate(new_node_size);
-
-  if (bytes_left > kThreshold &&
-      owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
-      FLAG_incremental_marking_steps) {
-    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
-    // We don't want to give too large linear areas to the allocator while
-    // incremental marking is going on, because we won't check again whether
-    // we want to do another increment until the linear area is used up.
-    owner_->Free(new_node->address() + size_in_bytes + linear_size,
-                 new_node_size - size_in_bytes - linear_size);
-    owner_->SetTop(new_node->address() + size_in_bytes,
-                   new_node->address() + size_in_bytes + linear_size);
-  } else if (bytes_left > 0) {
-    // Normally we give the rest of the node to the allocator as its new
-    // linear allocation area.
-    owner_->SetTop(new_node->address() + size_in_bytes,
-                   new_node->address() + new_node_size);
-  } else {
-    // TODO(gc) Try not freeing linear allocation region when bytes_left
-    // are zero.
-    owner_->SetTop(NULL, NULL);
+  // Search the size list for the best fit.
+  int prev = finger_ < index ? finger_ : kHead;
+  int cur = FindSize(index, &prev);
+  ASSERT(index < cur);
+  if (cur == kEnd) {
+    // No large enough size in list.
+    *wasted_bytes = 0;
+    return Failure::RetryAfterGC(owner_);
   }
-
-  return new_node;
-}
-
-
-static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
-  intptr_t sum = 0;
-  while (n != NULL) {
-    if (Page::FromAddress(n->address()) == p) {
-      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
-      sum += free_space->Size();
-    }
-    n = n->next();
-  }
-  return sum;
-}
-
-
-void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
-  sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
-  if (sizes->huge_size_ < p->area_size()) {
-    sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
-    sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
-    sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
-  } else {
-    sizes->small_size_ = 0;
-    sizes->medium_size_ = 0;
-    sizes->large_size_ = 0;
-  }
-}
-
-
-static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
-  intptr_t sum = 0;
-  while (*n != NULL) {
-    if (Page::FromAddress((*n)->address()) == p) {
-      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
-      sum += free_space->Size();
-      *n = (*n)->next();
+  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
+  int rem = cur - index;
+  int rem_bytes = rem << kPointerSizeLog2;
+  FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
+  ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
+  FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
+                                                     size_in_bytes);
+  // Distinguish the cases prev < rem < cur and rem <= prev < cur
+  // to avoid many redundant tests and calls to Insert/RemoveSize.
+  if (prev < rem) {
+    // Simple case: insert rem between prev and cur.
+    finger_ = prev;
+    free_[prev].next_size_ = rem;
+    // If this was the last block of size cur, remove the size.
+    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
+      free_[rem].next_size_ = free_[cur].next_size_;
     } else {
-      n = (*n)->next_address();
+      free_[rem].next_size_ = cur;
     }
+    // Add the remainder block.
+    rem_node->set_size(heap_, rem_bytes);
+    rem_node->set_next(heap_, free_[rem].head_node_);
+    free_[rem].head_node_ = rem_node->address();
+  } else {
+    // If this was the last block of size cur, remove the size.
+    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
+      finger_ = prev;
+      free_[prev].next_size_ = free_[cur].next_size_;
+    }
+    if (rem_bytes < kMinBlockSize) {
+      // Too-small remainder is wasted.
+      rem_node->set_size(heap_, rem_bytes);
+      available_ -= size_in_bytes + rem_bytes;
+      *wasted_bytes = rem_bytes;
+      return cur_node;
+    }
+    // Add the remainder block and, if needed, insert its size.
+    rem_node->set_size(heap_, rem_bytes);
+    rem_node->set_next(heap_, free_[rem].head_node_);
+    free_[rem].head_node_ = rem_node->address();
+    if (rem_node->next(heap_) == NULL) InsertSize(rem);
   }
-  return sum;
+  available_ -= size_in_bytes;
+  *wasted_bytes = 0;
+  return cur_node;
 }
 
 
-intptr_t FreeList::EvictFreeListItems(Page* p) {
-  intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
-
-  if (sum < p->area_size()) {
-    sum += EvictFreeListItemsInList(&small_list_, p) +
-        EvictFreeListItemsInList(&medium_list_, p) +
-        EvictFreeListItemsInList(&large_list_, p);
+void OldSpaceFreeList::MarkNodes() {
+  for (int i = 0; i < kFreeListsLength; i++) {
+    Address cur_addr = free_[i].head_node_;
+    while (cur_addr != NULL) {
+      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+      cur_addr = cur_node->next(heap_);
+      cur_node->SetMark();
+    }
   }
-
-  available_ -= static_cast<int>(sum);
-
-  return sum;
 }
 
 
 #ifdef DEBUG
-intptr_t FreeList::SumFreeList(FreeListNode* cur) {
-  intptr_t sum = 0;
-  while (cur != NULL) {
-    ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
-    sum += cur_as_free_space->Size();
-    cur = cur->next();
+bool OldSpaceFreeList::Contains(FreeListNode* node) {
+  for (int i = 0; i < kFreeListsLength; i++) {
+    Address cur_addr = free_[i].head_node_;
+    while (cur_addr != NULL) {
+      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+      if (cur_node == node) return true;
+      cur_addr = cur_node->next(heap_);
+    }
   }
-  return sum;
-}
-
-
-static const int kVeryLongFreeList = 500;
-
-
-int FreeList::FreeListLength(FreeListNode* cur) {
-  int length = 0;
-  while (cur != NULL) {
-    length++;
-    cur = cur->next();
-    if (length == kVeryLongFreeList) return length;
-  }
-  return length;
-}
-
-
-bool FreeList::IsVeryLong() {
-  if (FreeListLength(small_list_) == kVeryLongFreeList) return  true;
-  if (FreeListLength(medium_list_) == kVeryLongFreeList) return  true;
-  if (FreeListLength(large_list_) == kVeryLongFreeList) return  true;
-  if (FreeListLength(huge_list_) == kVeryLongFreeList) return  true;
   return false;
 }
-
-
-// This can take a very long time because it is linear in the number of entries
-// on the free list, so it should not be called if FreeListLength returns
-// kVeryLongFreeList.
-intptr_t FreeList::SumFreeLists() {
-  intptr_t sum = SumFreeList(small_list_);
-  sum += SumFreeList(medium_list_);
-  sum += SumFreeList(large_list_);
-  sum += SumFreeList(huge_list_);
-  return sum;
-}
 #endif
 
 
+FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
+                                     AllocationSpace owner,
+                                     int object_size)
+    : heap_(heap), owner_(owner), object_size_(object_size) {
+  Reset();
+}
+
+
+void FixedSizeFreeList::Reset() {
+  available_ = 0;
+  head_ = tail_ = NULL;
+}
+
+
+void FixedSizeFreeList::Free(Address start) {
+#ifdef DEBUG
+  Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
+#endif
+  // We only use the freelists with mark-sweep.
+  ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
+  FreeListNode* node = FreeListNode::FromAddress(start);
+  node->set_size(heap_, object_size_);
+  node->set_next(heap_, NULL);
+  if (head_ == NULL) {
+    tail_ = head_ = node->address();
+  } else {
+    FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
+    tail_ = node->address();
+  }
+  available_ += object_size_;
+}
+
+
+MaybeObject* FixedSizeFreeList::Allocate() {
+  if (head_ == NULL) {
+    return Failure::RetryAfterGC(owner_);
+  }
+
+  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
+  FreeListNode* node = FreeListNode::FromAddress(head_);
+  head_ = node->next(heap_);
+  available_ -= object_size_;
+  return node;
+}
+
+
+void FixedSizeFreeList::MarkNodes() {
+  Address cur_addr = head_;
+  while (cur_addr != NULL && cur_addr != tail_) {
+    FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
+    cur_addr = cur_node->next(heap_);
+    cur_node->SetMark();
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // OldSpace implementation
 
-bool NewSpace::ReserveSpace(int bytes) {
-  // We can't reliably unpack a partial snapshot that needs more new space
-  // space than the minimum NewSpace size.  The limit can be set lower than
-  // the end of new space either because there is more space on the next page
-  // or because we have lowered the limit in order to get periodic incremental
-  // marking.  The most reliable way to ensure that there is linear space is
-  // to do the allocation, then rewind the limit.
-  ASSERT(bytes <= InitialCapacity());
-  MaybeObject* maybe = AllocateRaw(bytes);
-  Object* object = NULL;
-  if (!maybe->ToObject(&object)) return false;
-  HeapObject* allocation = HeapObject::cast(object);
-  Address top = allocation_info_.top;
-  if ((top - bytes) == allocation->address()) {
-    allocation_info_.top = allocation->address();
-    return true;
+void OldSpace::PrepareForMarkCompact(bool will_compact) {
+  // Call prepare of the super class.
+  PagedSpace::PrepareForMarkCompact(will_compact);
+
+  if (will_compact) {
+    // Reset relocation info.  During a compacting collection, everything in
+    // the space is considered 'available' and we will rediscover live data
+    // and waste during the collection.
+    MCResetRelocationInfo();
+    ASSERT(Available() == Capacity());
+  } else {
+    // During a non-compacting collection, everything below the linear
+    // allocation pointer is considered allocated (everything above is
+    // available) and we will rediscover available and wasted bytes during
+    // the collection.
+    accounting_stats_.AllocateBytes(free_list_.available());
+    accounting_stats_.FillWastedBytes(Waste());
   }
-  // There may be a borderline case here where the allocation succeeded, but
-  // the limit and top have moved on to a new page.  In that case we try again.
-  return ReserveSpace(bytes);
-}
-
-
-void PagedSpace::PrepareForMarkCompact() {
-  // We don't have a linear allocation area while sweeping.  It will be restored
-  // on the first allocation after the sweep.
-  // Mark the old linear allocation area with a free space map so it can be
-  // skipped when scanning the heap.
-  int old_linear_size = static_cast<int>(limit() - top());
-  Free(top(), old_linear_size);
-  SetTop(NULL, NULL);
-
-  // Stop lazy sweeping and clear marking bits for unswept pages.
-  if (first_unswept_page_ != NULL) {
-    Page* p = first_unswept_page_;
-    do {
-      // Do not use ShouldBeSweptLazily predicate here.
-      // New evacuation candidates were selected but they still have
-      // to be swept before collection starts.
-      if (!p->WasSwept()) {
-        Bitmap::Clear(p);
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-      }
-      p = p->next_page();
-    } while (p != anchor());
-  }
-  first_unswept_page_ = Page::FromAddress(NULL);
-  unswept_free_bytes_ = 0;
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
-bool PagedSpace::ReserveSpace(int size_in_bytes) {
-  ASSERT(size_in_bytes <= AreaSize());
-  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
-  Address current_top = allocation_info_.top;
-  Address new_top = current_top + size_in_bytes;
-  if (new_top <= allocation_info_.limit) return true;
+void OldSpace::MCCommitRelocationInfo() {
+  // Update fast allocation info.
+  allocation_info_.top = mc_forwarding_info_.top;
+  allocation_info_.limit = mc_forwarding_info_.limit;
+  ASSERT(allocation_info_.VerifyPagedAllocation());
 
-  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
-  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
-  if (new_area == NULL) return false;
+  // The space is compacted and we haven't yet built free lists or
+  // wasted any space.
+  ASSERT(Waste() == 0);
+  ASSERT(AvailableFree() == 0);
 
-  int old_linear_size = static_cast<int>(limit() - top());
-  // Mark the old linear allocation area with a free space so it can be
-  // skipped when scanning the heap.  This also puts it back in the free list
-  // if it is big enough.
-  Free(top(), old_linear_size);
+  // Build the free list for the space.
+  int computed_size = 0;
+  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+  while (it.has_next()) {
+    Page* p = it.next();
+    // Space below the relocation pointer is allocated.
+    computed_size +=
+        static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
+    if (it.has_next()) {
+      // Free the space at the top of the page.
+      int extra_size =
+          static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
+      if (extra_size > 0) {
+        int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
+                                           extra_size);
+        // The bytes we have just "freed" to add to the free list were
+        // already accounted as available.
+        accounting_stats_.WasteBytes(wasted_bytes);
+      }
+    }
+  }
 
-  SetTop(new_area->address(), new_area->address() + size_in_bytes);
-  Allocate(size_in_bytes);
+  // Make sure the computed size - based on the used portion of the pages in
+  // use - matches the size obtained while computing forwarding addresses.
+  ASSERT(computed_size == Size());
+}
+
+
+bool NewSpace::ReserveSpace(int bytes) {
+  // We can't reliably unpack a partial snapshot that needs more new space
+  // space than the minimum NewSpace size.
+  ASSERT(bytes <= InitialCapacity());
+  Address limit = allocation_info_.limit;
+  Address top = allocation_info_.top;
+  return limit - top >= bytes;
+}
+
+
+void PagedSpace::FreePages(Page* prev, Page* last) {
+  if (last == AllocationTopPage()) {
+    // Pages are already at the end of used pages.
+    return;
+  }
+
+  Page* first = NULL;
+
+  // Remove pages from the list.
+  if (prev == NULL) {
+    first = first_page_;
+    first_page_ = last->next_page();
+  } else {
+    first = prev->next_page();
+    heap()->isolate()->memory_allocator()->SetNextPage(
+        prev, last->next_page());
+  }
+
+  // Attach it after the last page.
+  heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
+  last_page_ = last;
+  heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
+
+  // Clean them up.
+  do {
+    first->InvalidateWatermark(true);
+    first->SetAllocationWatermark(first->ObjectAreaStart());
+    first->SetCachedAllocationWatermark(first->ObjectAreaStart());
+    first->SetRegionMarks(Page::kAllRegionsCleanMarks);
+    first = first->next_page();
+  } while (first != NULL);
+
+  // Order of pages in this space might no longer be consistent with
+  // order of pages in chunks.
+  page_list_is_chunk_ordered_ = false;
+}
+
+
+void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
+  const bool add_to_freelist = true;
+
+  // Mark used and unused pages to properly fill unused pages
+  // after reordering.
+  PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
+  Page* last_in_use = AllocationTopPage();
+  bool in_use = true;
+
+  while (all_pages_iterator.has_next()) {
+    Page* p = all_pages_iterator.next();
+    p->SetWasInUseBeforeMC(in_use);
+    if (p == last_in_use) {
+      // We passed a page containing allocation top. All consequent
+      // pages are not used.
+      in_use = false;
+    }
+  }
+
+  if (page_list_is_chunk_ordered_) return;
+
+  Page* new_last_in_use = Page::FromAddress(NULL);
+  heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
+      this, &first_page_, &last_page_, &new_last_in_use);
+  ASSERT(new_last_in_use->is_valid());
+
+  if (new_last_in_use != last_in_use) {
+    // Current allocation top points to a page which is now in the middle
+    // of page list. We should move allocation top forward to the new last
+    // used page so various object iterators will continue to work properly.
+    int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
+                                         last_in_use->AllocationTop());
+
+    last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
+    if (size_in_bytes > 0) {
+      Address start = last_in_use->AllocationTop();
+      if (deallocate_blocks) {
+        accounting_stats_.AllocateBytes(size_in_bytes);
+        DeallocateBlock(start, size_in_bytes, add_to_freelist);
+      } else {
+        heap()->CreateFillerObjectAt(start, size_in_bytes);
+      }
+    }
+
+    // New last in use page was in the middle of the list before
+    // sorting so it full.
+    SetTop(new_last_in_use->AllocationTop());
+
+    ASSERT(AllocationTopPage() == new_last_in_use);
+    ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+  }
+
+  PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+  while (pages_in_use_iterator.has_next()) {
+    Page* p = pages_in_use_iterator.next();
+    if (!p->WasInUseBeforeMC()) {
+      // Empty page is in the middle of a sequence of used pages.
+      // Allocate it as a whole and deallocate immediately.
+      int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
+                                           p->ObjectAreaStart());
+
+      p->SetAllocationWatermark(p->ObjectAreaStart());
+      Address start = p->ObjectAreaStart();
+      if (deallocate_blocks) {
+        accounting_stats_.AllocateBytes(size_in_bytes);
+        DeallocateBlock(start, size_in_bytes, add_to_freelist);
+      } else {
+        heap()->CreateFillerObjectAt(start, size_in_bytes);
+      }
+    }
+  }
+
+  page_list_is_chunk_ordered_ = true;
+}
+
+
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+  if (will_compact) {
+    RelinkPageListInChunkOrder(false);
+  }
+}
+
+
+bool PagedSpace::ReserveSpace(int bytes) {
+  Address limit = allocation_info_.limit;
+  Address top = allocation_info_.top;
+  if (limit - top >= bytes) return true;
+
+  // There wasn't enough space in the current page.  Lets put the rest
+  // of the page on the free list and start a fresh page.
+  PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
+
+  Page* reserved_page = TopPageOf(allocation_info_);
+  int bytes_left_to_reserve = bytes;
+  while (bytes_left_to_reserve > 0) {
+    if (!reserved_page->next_page()->is_valid()) {
+      if (heap()->OldGenerationAllocationLimitReached()) return false;
+      Expand(reserved_page);
+    }
+    bytes_left_to_reserve -= Page::kPageSize;
+    reserved_page = reserved_page->next_page();
+    if (!reserved_page->is_valid()) return false;
+  }
+  ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
+  TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
+  SetAllocationInfo(&allocation_info_,
+                    TopPageOf(allocation_info_)->next_page());
   return true;
 }
 
@@ -2220,70 +2202,47 @@
 // You have to call this last, since the implementation from PagedSpace
 // doesn't know that memory was 'promised' to large object space.
 bool LargeObjectSpace::ReserveSpace(int bytes) {
-  return heap()->OldGenerationCapacityAvailable() >= bytes &&
-         (!heap()->incremental_marking()->IsStopped() ||
-           heap()->OldGenerationSpaceAvailable() >= bytes);
+  return heap()->OldGenerationSpaceAvailable() >= bytes;
 }
 
 
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
-  if (IsSweepingComplete()) return true;
+// Slow case for normal allocation.  Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
+  // Linear allocation in this space has failed.  If there is another page
+  // in the space, move to that page and allocate there.  This allocation
+  // should succeed (size_in_bytes should not be greater than a page's
+  // object area size).
+  Page* current_page = TopPageOf(allocation_info_);
+  if (current_page->next_page()->is_valid()) {
+    return AllocateInNextPage(current_page, size_in_bytes);
+  }
 
-  intptr_t freed_bytes = 0;
-  Page* p = first_unswept_page_;
-  do {
-    Page* next_page = p->next_page();
-    if (ShouldBeSweptLazily(p)) {
-      if (FLAG_gc_verbose) {
-        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
-               reinterpret_cast<intptr_t>(p));
+  // There is no next page in this space.  Try free list allocation unless that
+  // is currently forbidden.
+  if (!heap()->linear_allocation()) {
+    int wasted_bytes;
+    Object* result;
+    MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+    accounting_stats_.WasteBytes(wasted_bytes);
+    if (maybe->ToObject(&result)) {
+      accounting_stats_.AllocateBytes(size_in_bytes);
+
+      HeapObject* obj = HeapObject::cast(result);
+      Page* p = Page::FromAddress(obj->address());
+
+      if (obj->address() >= p->AllocationWatermark()) {
+        // There should be no hole between the allocation watermark
+        // and allocated object address.
+        // Memory above the allocation watermark was not swept and
+        // might contain garbage pointers to new space.
+        ASSERT(obj->address() == p->AllocationWatermark());
+        p->SetAllocationWatermark(obj->address() + size_in_bytes);
       }
-      DecreaseUnsweptFreeBytes(p);
-      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+
+      return obj;
     }
-    p = next_page;
-  } while (p != anchor() && freed_bytes < bytes_to_sweep);
-
-  if (p == anchor()) {
-    first_unswept_page_ = Page::FromAddress(NULL);
-  } else {
-    first_unswept_page_ = p;
-  }
-
-  heap()->LowerOldGenLimits(freed_bytes);
-
-  heap()->FreeQueuedChunks();
-
-  return IsSweepingComplete();
-}
-
-
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
-  if (allocation_info_.top >= allocation_info_.limit) return;
-
-  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
-    // Create filler object to keep page iterable if it was iterable.
-    int remaining =
-        static_cast<int>(allocation_info_.limit - allocation_info_.top);
-    heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
-
-    allocation_info_.top = NULL;
-    allocation_info_.limit = NULL;
-  }
-}
-
-
-HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
-  // Allocation in this space has failed.
-
-  // If there are unswept pages advance lazy sweeper then sweep one page before
-  // allocating a new page.
-  if (first_unswept_page_->is_valid()) {
-    AdvanceSweeper(size_in_bytes);
-
-    // Retry the free list allocation.
-    HeapObject* object = free_list_.Allocate(size_in_bytes);
-    if (object != NULL) return object;
   }
 
   // Free list allocation failed and there is no next page.  Fail if we have
@@ -2295,18 +2254,9 @@
   }
 
   // Try to expand the space and allocate in the new next page.
-  if (Expand()) {
-    return free_list_.Allocate(size_in_bytes);
-  }
-
-  // Last ditch, sweep all the remaining pages to try to find space.  This may
-  // cause a pause.
-  if (!IsSweepingComplete()) {
-    AdvanceSweeper(kMaxInt);
-
-    // Retry the free list allocation.
-    HeapObject* object = free_list_.Allocate(size_in_bytes);
-    if (object != NULL) return object;
+  ASSERT(!current_page->next_page()->is_valid());
+  if (Expand(current_page)) {
+    return AllocateInNextPage(current_page, size_in_bytes);
   }
 
   // Finally, fail.
@@ -2314,6 +2264,53 @@
 }
 
 
+void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+  current_page->SetAllocationWatermark(allocation_info_.top);
+  int free_size =
+      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
+  if (free_size > 0) {
+    int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
+    accounting_stats_.WasteBytes(wasted_bytes);
+  }
+}
+
+
+void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+  current_page->SetAllocationWatermark(allocation_info_.top);
+  int free_size =
+      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
+  // In the fixed space free list all the free list items have the right size.
+  // We use up the rest of the page while preserving this invariant.
+  while (free_size >= object_size_in_bytes_) {
+    free_list_.Free(allocation_info_.top);
+    allocation_info_.top += object_size_in_bytes_;
+    free_size -= object_size_in_bytes_;
+    accounting_stats_.WasteBytes(object_size_in_bytes_);
+  }
+}
+
+
+// Add the block at the top of the page to the space's free list, set the
+// allocation info to the next page (assumed to be one), and allocate
+// linearly there.
+HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
+                                         int size_in_bytes) {
+  ASSERT(current_page->next_page()->is_valid());
+  Page* next_page = current_page->next_page();
+  next_page->ClearGCFields();
+  PutRestOfCurrentPageOnFreeList(current_page);
+  SetAllocationInfo(&allocation_info_, next_page);
+  return AllocateLinearly(&allocation_info_, size_in_bytes);
+}
+
+
+void OldSpace::DeallocateBlock(Address start,
+                                 int size_in_bytes,
+                                 bool add_to_freelist) {
+  Free(start, size_in_bytes, add_to_freelist);
+}
+
+
 #ifdef DEBUG
 void PagedSpace::ReportCodeStatistics() {
   Isolate* isolate = Isolate::Current();
@@ -2416,7 +2413,7 @@
 void PagedSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
@@ -2441,17 +2438,16 @@
 }
 
 
-void PagedSpace::ReportStatistics() {
+void OldSpace::ReportStatistics() {
   int pct = static_cast<int>(Available() * 100 / Capacity());
   PrintF("  capacity: %" V8_PTR_PREFIX "d"
              ", waste: %" V8_PTR_PREFIX "d"
              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
-  if (was_swept_conservatively_) return;
   ClearHistograms();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
     CollectHistogramInfo(obj);
   ReportHistogram(true);
 }
@@ -2460,28 +2456,192 @@
 // -----------------------------------------------------------------------------
 // FixedSpace implementation
 
-void FixedSpace::PrepareForMarkCompact() {
+void FixedSpace::PrepareForMarkCompact(bool will_compact) {
   // Call prepare of the super class.
-  PagedSpace::PrepareForMarkCompact();
+  PagedSpace::PrepareForMarkCompact(will_compact);
 
-  // During a non-compacting collection, everything below the linear
-  // allocation pointer except wasted top-of-page blocks is considered
-  // allocated and we will rediscover available bytes during the
-  // collection.
-  accounting_stats_.AllocateBytes(free_list_.available());
+  if (will_compact) {
+    // Reset relocation info.
+    MCResetRelocationInfo();
+
+    // During a compacting collection, everything in the space is considered
+    // 'available' (set by the call to MCResetRelocationInfo) and we will
+    // rediscover live and wasted bytes during the collection.
+    ASSERT(Available() == Capacity());
+  } else {
+    // During a non-compacting collection, everything below the linear
+    // allocation pointer except wasted top-of-page blocks is considered
+    // allocated and we will rediscover available bytes during the
+    // collection.
+    accounting_stats_.AllocateBytes(free_list_.available());
+  }
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
+void FixedSpace::MCCommitRelocationInfo() {
+  // Update fast allocation info.
+  allocation_info_.top = mc_forwarding_info_.top;
+  allocation_info_.limit = mc_forwarding_info_.limit;
+  ASSERT(allocation_info_.VerifyPagedAllocation());
+
+  // The space is compacted and we haven't yet wasted any space.
+  ASSERT(Waste() == 0);
+
+  // Update allocation_top of each page in use and compute waste.
+  int computed_size = 0;
+  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
+  while (it.has_next()) {
+    Page* page = it.next();
+    Address page_top = page->AllocationTop();
+    computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
+    if (it.has_next()) {
+      accounting_stats_.WasteBytes(
+          static_cast<int>(page->ObjectAreaEnd() - page_top));
+      page->SetAllocationWatermark(page_top);
+    }
+  }
+
+  // Make sure the computed size - based on the used portion of the
+  // pages in use - matches the size we adjust during allocation.
+  ASSERT(computed_size == Size());
+}
+
+
+// Slow case for normal allocation. Try in order: (1) allocate in the next
+// page in the space, (2) allocate off the space's free list, (3) expand the
+// space, (4) fail.
+HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
+  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+  // Linear allocation in this space has failed.  If there is another page
+  // in the space, move to that page and allocate there.  This allocation
+  // should succeed.
+  Page* current_page = TopPageOf(allocation_info_);
+  if (current_page->next_page()->is_valid()) {
+    return AllocateInNextPage(current_page, size_in_bytes);
+  }
+
+  // There is no next page in this space.  Try free list allocation unless
+  // that is currently forbidden.  The fixed space free list implicitly assumes
+  // that all free blocks are of the fixed size.
+  if (!heap()->linear_allocation()) {
+    Object* result;
+    MaybeObject* maybe = free_list_.Allocate();
+    if (maybe->ToObject(&result)) {
+      accounting_stats_.AllocateBytes(size_in_bytes);
+      HeapObject* obj = HeapObject::cast(result);
+      Page* p = Page::FromAddress(obj->address());
+
+      if (obj->address() >= p->AllocationWatermark()) {
+        // There should be no hole between the allocation watermark
+        // and allocated object address.
+        // Memory above the allocation watermark was not swept and
+        // might contain garbage pointers to new space.
+        ASSERT(obj->address() == p->AllocationWatermark());
+        p->SetAllocationWatermark(obj->address() + size_in_bytes);
+      }
+
+      return obj;
+    }
+  }
+
+  // Free list allocation failed and there is no next page.  Fail if we have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (!heap()->always_allocate() &&
+      heap()->OldGenerationAllocationLimitReached()) {
+    return NULL;
+  }
+
+  // Try to expand the space and allocate in the new next page.
+  ASSERT(!current_page->next_page()->is_valid());
+  if (Expand(current_page)) {
+    return AllocateInNextPage(current_page, size_in_bytes);
+  }
+
+  // Finally, fail.
+  return NULL;
+}
+
+
+// Move to the next page (there is assumed to be one) and allocate there.
+// The top of page block is always wasted, because it is too small to hold a
+// map.
+HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
+                                           int size_in_bytes) {
+  ASSERT(current_page->next_page()->is_valid());
+  ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
+  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+  Page* next_page = current_page->next_page();
+  next_page->ClearGCFields();
+  current_page->SetAllocationWatermark(allocation_info_.top);
+  accounting_stats_.WasteBytes(page_extra_);
+  SetAllocationInfo(&allocation_info_, next_page);
+  return AllocateLinearly(&allocation_info_, size_in_bytes);
+}
+
+
+void FixedSpace::DeallocateBlock(Address start,
+                                 int size_in_bytes,
+                                 bool add_to_freelist) {
+  // Free-list elements in fixed space are assumed to have a fixed size.
+  // We break the free block into chunks and add them to the free list
+  // individually.
+  int size = object_size_in_bytes();
+  ASSERT(size_in_bytes % size == 0);
+  Address end = start + size_in_bytes;
+  for (Address a = start; a < end; a += size) {
+    Free(a, add_to_freelist);
+  }
+}
+
+
+#ifdef DEBUG
+void FixedSpace::ReportStatistics() {
+  int pct = static_cast<int>(Available() * 100 / Capacity());
+  PrintF("  capacity: %" V8_PTR_PREFIX "d"
+             ", waste: %" V8_PTR_PREFIX "d"
+             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+         Capacity(), Waste(), Available(), pct);
+
+  ClearHistograms();
+  HeapObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+    CollectHistogramInfo(obj);
+  ReportHistogram(false);
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // MapSpace implementation
 
+void MapSpace::PrepareForMarkCompact(bool will_compact) {
+  // Call prepare of the super class.
+  FixedSpace::PrepareForMarkCompact(will_compact);
+
+  if (will_compact) {
+    // Initialize map index entry.
+    int page_count = 0;
+    PageIterator it(this, PageIterator::ALL_PAGES);
+    while (it.has_next()) {
+      ASSERT_MAP_PAGE_INDEX(page_count);
+
+      Page* p = it.next();
+      ASSERT(p->mc_page_index == page_count);
+
+      page_addresses_[page_count++] = p->address();
+    }
+  }
+}
+
+
 #ifdef DEBUG
 void MapSpace::VerifyObject(HeapObject* object) {
   // The object should be a map or a free-list node.
-  ASSERT(object->IsMap() || object->IsFreeSpace());
+  ASSERT(object->IsMap() || object->IsByteArray());
 }
 #endif
 
@@ -2502,73 +2662,129 @@
 // LargeObjectIterator
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
-  current_ = space->first_page_;
+  current_ = space->first_chunk_;
   size_func_ = NULL;
 }
 
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
                                          HeapObjectCallback size_func) {
-  current_ = space->first_page_;
+  current_ = space->first_chunk_;
   size_func_ = size_func;
 }
 
 
-HeapObject* LargeObjectIterator::Next() {
+HeapObject* LargeObjectIterator::next() {
   if (current_ == NULL) return NULL;
 
   HeapObject* object = current_->GetObject();
-  current_ = current_->next_page();
+  current_ = current_->next();
   return object;
 }
 
 
 // -----------------------------------------------------------------------------
-// LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) {
-    return key1 == key2;
+// LargeObjectChunk
+
+LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
+                                        Executability executable) {
+  size_t requested = ChunkSizeFor(size_in_bytes);
+  size_t size;
+  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+  Isolate* isolate = Isolate::Current();
+  void* mem = isolate->memory_allocator()->AllocateRawMemory(
+      requested + guard_size, &size, executable);
+  if (mem == NULL) return NULL;
+
+  // The start of the chunk may be overlayed with a page so we have to
+  // make sure that the page flags fit in the size field.
+  ASSERT((size & Page::kPageFlagMask) == 0);
+
+  LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
+  if (size < requested + guard_size) {
+    isolate->memory_allocator()->FreeRawMemory(
+        mem, size, executable);
+    LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
+    return NULL;
+  }
+
+  if (guard_size != 0) {
+    OS::Guard(mem, guard_size);
+    size -= guard_size;
+    mem = static_cast<Address>(mem) + guard_size;
+  }
+
+  ObjectSpace space = (executable == EXECUTABLE)
+      ? kObjectSpaceCodeSpace
+      : kObjectSpaceLoSpace;
+  isolate->memory_allocator()->PerformAllocationCallback(
+      space, kAllocationActionAllocate, size);
+
+  LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
+  chunk->size_ = size;
+  chunk->GetPage()->heap_ = isolate->heap();
+  return chunk;
 }
 
 
-LargeObjectSpace::LargeObjectSpace(Heap* heap,
-                                   intptr_t max_capacity,
-                                   AllocationSpace id)
+void LargeObjectChunk::Free(Executability executable) {
+  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+  ObjectSpace space =
+      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
+  // Do not access instance fields after FreeRawMemory!
+  Address my_address = address();
+  size_t my_size = size();
+  Isolate* isolate = GetPage()->heap_->isolate();
+  MemoryAllocator* a = isolate->memory_allocator();
+  a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
+  a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
+  LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
+}
+
+
+int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
+  int os_alignment = static_cast<int>(OS::AllocateAlignment());
+  if (os_alignment < Page::kPageSize) {
+    size_in_bytes += (Page::kPageSize - os_alignment);
+  }
+  return size_in_bytes + Page::kObjectStartOffset;
+}
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpace
+
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
-      max_capacity_(max_capacity),
-      first_page_(NULL),
+      first_chunk_(NULL),
       size_(0),
       page_count_(0),
-      objects_size_(0),
-      chunk_map_(ComparePointers, 1024) {}
+      objects_size_(0) {}
 
 
-bool LargeObjectSpace::SetUp() {
-  first_page_ = NULL;
+bool LargeObjectSpace::Setup() {
+  first_chunk_ = NULL;
   size_ = 0;
   page_count_ = 0;
   objects_size_ = 0;
-  chunk_map_.Clear();
   return true;
 }
 
 
 void LargeObjectSpace::TearDown() {
-  while (first_page_ != NULL) {
-    LargePage* page = first_page_;
-    first_page_ = first_page_->next_page();
-    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
-
-    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
-    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
-        space, kAllocationActionFree, page->size());
-    heap()->isolate()->memory_allocator()->Free(page);
+  while (first_chunk_ != NULL) {
+    LargeObjectChunk* chunk = first_chunk_;
+    first_chunk_ = first_chunk_->next();
+    chunk->Free(chunk->GetPage()->PageExecutability());
   }
-  SetUp();
+  Setup();
 }
 
 
-MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
-                                           Executability executable) {
+MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
+                                                   int object_size,
+                                                   Executability executable) {
+  ASSERT(0 < object_size && object_size <= requested_size);
+
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
   if (!heap()->always_allocate() &&
@@ -2576,136 +2792,190 @@
     return Failure::RetryAfterGC(identity());
   }
 
-  if (Size() + object_size > max_capacity_) {
+  LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
+  if (chunk == NULL) {
     return Failure::RetryAfterGC(identity());
   }
 
-  LargePage* page = heap()->isolate()->memory_allocator()->
-      AllocateLargePage(object_size, executable, this);
-  if (page == NULL) return Failure::RetryAfterGC(identity());
-  ASSERT(page->area_size() >= object_size);
-
-  size_ += static_cast<int>(page->size());
-  objects_size_ += object_size;
+  size_ += static_cast<int>(chunk->size());
+  objects_size_ += requested_size;
   page_count_++;
-  page->set_next_page(first_page_);
-  first_page_ = page;
+  chunk->set_next(first_chunk_);
+  first_chunk_ = chunk;
 
-  // Register all MemoryChunk::kAlignment-aligned chunks covered by
-  // this large page in the chunk map.
-  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
-  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
-  for (uintptr_t key = base; key <= limit; key++) {
-    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                              static_cast<uint32_t>(key),
-                                              true);
-    ASSERT(entry != NULL);
-    entry->value = page;
-  }
+  // Initialize page header.
+  Page* page = chunk->GetPage();
+  Address object_address = page->ObjectAreaStart();
 
-  HeapObject* object = page->GetObject();
+  // Clear the low order bit of the second word in the page to flag it as a
+  // large object page.  If the chunk_size happened to be written there, its
+  // low order bit should already be clear.
+  page->SetIsLargeObjectPage(true);
+  page->SetPageExecutability(executable);
+  page->SetRegionMarks(Page::kAllRegionsCleanMarks);
+  return HeapObject::FromAddress(object_address);
+}
 
-#ifdef DEBUG
-  // Make the object consistent so the heap can be vefified in OldSpaceStep.
-  reinterpret_cast<Object**>(object->address())[0] =
-      heap()->fixed_array_map();
-  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
-#endif
 
-  heap()->incremental_marking()->OldSpaceStep(object_size);
-  return object;
+MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  return AllocateRawInternal(size_in_bytes,
+                             size_in_bytes,
+                             EXECUTABLE);
+}
+
+
+MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  return AllocateRawInternal(size_in_bytes,
+                             size_in_bytes,
+                             NOT_EXECUTABLE);
+}
+
+
+MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  return AllocateRawInternal(size_in_bytes,
+                             size_in_bytes,
+                             NOT_EXECUTABLE);
 }
 
 
 // GC support
 MaybeObject* LargeObjectSpace::FindObject(Address a) {
-  LargePage* page = FindPage(a);
-  if (page != NULL) {
-    return page->GetObject();
+  for (LargeObjectChunk* chunk = first_chunk_;
+       chunk != NULL;
+       chunk = chunk->next()) {
+    Address chunk_address = chunk->address();
+    if (chunk_address <= a && a < chunk_address + chunk->size()) {
+      return chunk->GetObject();
+    }
   }
   return Failure::Exception();
 }
 
 
-LargePage* LargeObjectSpace::FindPage(Address a) {
-  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
-  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                        static_cast<uint32_t>(key),
-                                        false);
-  if (e != NULL) {
-    ASSERT(e->value != NULL);
-    LargePage* page = reinterpret_cast<LargePage*>(e->value);
-    ASSERT(page->is_valid());
-    if (page->Contains(a)) {
-      return page;
+LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
+  // TODO(853): Change this implementation to only find executable
+  // chunks and use some kind of hash-based approach to speed it up.
+  for (LargeObjectChunk* chunk = first_chunk_;
+       chunk != NULL;
+       chunk = chunk->next()) {
+    Address chunk_address = chunk->address();
+    if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
+      return chunk;
     }
   }
   return NULL;
 }
 
 
+void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
+  LargeObjectIterator it(this);
+  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    if (object->IsFixedArray()) {
+      Page* page = Page::FromAddress(object->address());
+      uint32_t marks = page->GetRegionMarks();
+      uint32_t newmarks = Page::kAllRegionsCleanMarks;
+
+      if (marks != Page::kAllRegionsCleanMarks) {
+        // For a large page a single dirty mark corresponds to several
+        // regions (modulo 32). So we treat a large page as a sequence of
+        // normal pages of size Page::kPageSize having same dirty marks
+        // and subsequently iterate dirty regions on each of these pages.
+        Address start = object->address();
+        Address end = page->ObjectAreaEnd();
+        Address object_end = start + object->Size();
+
+        // Iterate regions of the first normal page covering object.
+        uint32_t first_region_number = page->GetRegionNumberForAddress(start);
+        newmarks |=
+            heap()->IterateDirtyRegions(marks >> first_region_number,
+                                        start,
+                                        end,
+                                        &Heap::IteratePointersInDirtyRegion,
+                                        copy_object) << first_region_number;
+
+        start = end;
+        end = start + Page::kPageSize;
+        while (end <= object_end) {
+          // Iterate next 32 regions.
+          newmarks |=
+              heap()->IterateDirtyRegions(marks,
+                                          start,
+                                          end,
+                                          &Heap::IteratePointersInDirtyRegion,
+                                          copy_object);
+          start = end;
+          end = start + Page::kPageSize;
+        }
+
+        if (start != object_end) {
+          // Iterate the last piece of an object which is less than
+          // Page::kPageSize.
+          newmarks |=
+              heap()->IterateDirtyRegions(marks,
+                                          start,
+                                          object_end,
+                                          &Heap::IteratePointersInDirtyRegion,
+                                          copy_object);
+        }
+
+        page->SetRegionMarks(newmarks);
+      }
+    }
+  }
+}
+
+
 void LargeObjectSpace::FreeUnmarkedObjects() {
-  LargePage* previous = NULL;
-  LargePage* current = first_page_;
+  LargeObjectChunk* previous = NULL;
+  LargeObjectChunk* current = first_chunk_;
   while (current != NULL) {
     HeapObject* object = current->GetObject();
-    // Can this large page contain pointers to non-trivial objects.  No other
-    // pointer object is this big.
-    bool is_pointer_object = object->IsFixedArray();
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) {
-      mark_bit.Clear();
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+    if (object->IsMarked()) {
+      object->ClearMark();
+      heap()->mark_compact_collector()->tracer()->decrement_marked_count();
       previous = current;
-      current = current->next_page();
+      current = current->next();
     } else {
-      LargePage* page = current;
       // Cut the chunk out from the chunk list.
-      current = current->next_page();
+      LargeObjectChunk* current_chunk = current;
+      current = current->next();
       if (previous == NULL) {
-        first_page_ = current;
+        first_chunk_ = current;
       } else {
-        previous->set_next_page(current);
+        previous->set_next(current);
       }
 
       // Free the chunk.
       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
           object, heap()->isolate());
-      size_ -= static_cast<int>(page->size());
+      LiveObjectList::ProcessNonLive(object);
+
+      size_ -= static_cast<int>(current_chunk->size());
       objects_size_ -= object->Size();
       page_count_--;
-
-      // Remove entries belonging to this page.
-      // Use variable alignment to help pass length check (<= 80 characters)
-      // of single line in tools/presubmit.py.
-      const intptr_t alignment = MemoryChunk::kAlignment;
-      uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
-      uintptr_t limit = base + (page->size()-1)/alignment;
-      for (uintptr_t key = base; key <= limit; key++) {
-        chunk_map_.Remove(reinterpret_cast<void*>(key),
-                          static_cast<uint32_t>(key));
-      }
-
-      if (is_pointer_object) {
-        heap()->QueueMemoryChunkForFree(page);
-      } else {
-        heap()->isolate()->memory_allocator()->Free(page);
-      }
+      current_chunk->Free(current_chunk->GetPage()->PageExecutability());
     }
   }
-  heap()->FreeQueuedChunks();
 }
 
 
 bool LargeObjectSpace::Contains(HeapObject* object) {
   Address address = object->address();
-  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+  if (heap()->new_space()->Contains(address)) {
+    return false;
+  }
+  Page* page = Page::FromAddress(address);
 
-  bool owned = (chunk->owner() == this);
+  SLOW_ASSERT(!page->IsLargeObjectPage()
+              || !FindObject(address)->IsFailure());
 
-  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
-
-  return owned;
+  return page->IsLargeObjectPage();
 }
 
 
@@ -2713,14 +2983,14 @@
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.
 void LargeObjectSpace::Verify() {
-  for (LargePage* chunk = first_page_;
+  for (LargeObjectChunk* chunk = first_chunk_;
        chunk != NULL;
-       chunk = chunk->next_page()) {
+       chunk = chunk->next()) {
     // Each chunk contains an object that starts at the large object page's
     // object area start.
     HeapObject* object = chunk->GetObject();
     Page* page = Page::FromAddress(object->address());
-    ASSERT(object->address() == page->area_start());
+    ASSERT(object->address() == page->ObjectAreaStart());
 
     // The first word should be a map, and we expect all map pointers to be
     // in map space.
@@ -2745,6 +3015,9 @@
                           object->Size(),
                           &code_visitor);
     } else if (object->IsFixedArray()) {
+      // We loop over fixed arrays ourselves, rather then using the visitor,
+      // because the visitor doesn't support the start/offset iteration
+      // needed for IsRegionDirty.
       FixedArray* array = FixedArray::cast(object);
       for (int j = 0; j < array->length(); j++) {
         Object* element = array->get(j);
@@ -2752,6 +3025,13 @@
           HeapObject* element_object = HeapObject::cast(element);
           ASSERT(heap()->Contains(element_object));
           ASSERT(element_object->map()->IsMap());
+          if (heap()->InNewSpace(element_object)) {
+            Address array_addr = object->address();
+            Address element_addr = array_addr + FixedArray::kHeaderSize +
+                j * kPointerSize;
+
+            ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
+          }
         }
       }
     }
@@ -2761,7 +3041,7 @@
 
 void LargeObjectSpace::Print() {
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     obj->Print();
   }
 }
@@ -2772,7 +3052,7 @@
   int num_objects = 0;
   ClearHistograms();
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     num_objects++;
     CollectHistogramInfo(obj);
   }
@@ -2786,38 +3066,13 @@
 void LargeObjectSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   LargeObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
     }
   }
 }
-
-
-void Page::Print() {
-  // Make a best-effort to print the objects in the page.
-  PrintF("Page@%p in %s\n",
-         this->address(),
-         AllocationSpaceName(this->owner()->identity()));
-  printf(" --------------------------------------\n");
-  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
-  unsigned mark_size = 0;
-  for (HeapObject* object = objects.Next();
-       object != NULL;
-       object = objects.Next()) {
-    bool is_marked = Marking::MarkBitFrom(object).Get();
-    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
-    if (is_marked) {
-      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
-    }
-    object->ShortPrint();
-    PrintF("\n");
-  }
-  printf(" --------------------------------------\n");
-  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
-}
-
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/spaces.h b/src/spaces.h
index b614c3b..f156496 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -29,7 +29,6 @@
 #define V8_SPACES_H_
 
 #include "allocation.h"
-#include "hashmap.h"
 #include "list.h"
 #include "log.h"
 
@@ -50,47 +49,45 @@
 //
 // The semispaces of the young generation are contiguous.  The old and map
 // spaces consists of a list of pages. A page has a page header and an object
-// area.
+// area. A page size is deliberately chosen as 8K bytes.
+// The first word of a page is an opaque page header that has the
+// address of the next page and its ownership information. The second word may
+// have the allocation top address of this page. Heap objects are aligned to the
+// pointer size.
 //
 // There is a separate large object space for objects larger than
 // Page::kMaxHeapObjectSize, so that they do not have to move during
 // collection. The large object space is paged. Pages in large object space
-// may be larger than the page size.
+// may be larger than 8K.
 //
-// A store-buffer based write barrier is used to keep track of intergenerational
-// references.  See store-buffer.h.
+// A card marking write barrier is used to keep track of intergenerational
+// references. Old space pages are divided into regions of Page::kRegionSize
+// size. Each region has a corresponding dirty bit in the page header which is
+// set if the region might contain pointers to new space. For details about
+// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
+// method body.
 //
-// During scavenges and mark-sweep collections we sometimes (after a store
-// buffer overflow) iterate intergenerational pointers without decoding heap
-// object maps so if the page belongs to old pointer space or large object
-// space it is essential to guarantee that the page does not contain any
-// garbage pointers to new space: every pointer aligned word which satisfies
-// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
-// new space. Thus objects in old pointer and large object spaces should have a
-// special layout (e.g. no bare integer fields). This requirement does not
-// apply to map space which is iterated in a special fashion. However we still
-// require pointer fields of dead maps to be cleaned.
+// During scavenges and mark-sweep collections we iterate intergenerational
+// pointers without decoding heap object maps so if the page belongs to old
+// pointer space or large object space it is essential to guarantee that
+// the page does not contain any garbage pointers to new space: every pointer
+// aligned word which satisfies the Heap::InNewSpace() predicate must be a
+// pointer to a live heap object in new space. Thus objects in old pointer
+// and large object spaces should have a special layout (e.g. no bare integer
+// fields). This requirement does not apply to map space which is iterated in
+// a special fashion. However we still require pointer fields of dead maps to
+// be cleaned.
 //
-// To enable lazy cleaning of old space pages we can mark chunks of the page
-// as being garbage.  Garbage sections are marked with a special map.  These
-// sections are skipped when scanning the page, even if we are otherwise
-// scanning without regard for object boundaries.  Garbage sections are chained
-// together to form a free list after a GC.  Garbage sections created outside
-// of GCs by object trunctation etc. may not be in the free list chain.  Very
-// small free spaces are ignored, they need only be cleaned of bogus pointers
-// into new space.
+// To enable lazy cleaning of old space pages we use a notion of allocation
+// watermark. Every pointer under watermark is considered to be well formed.
+// Page allocation watermark is not necessarily equal to page allocation top but
+// all alive objects on page should reside under allocation watermark.
+// During scavenge allocation watermark might be bumped and invalid pointers
+// might appear below it. To avoid following them we store a valid watermark
+// into special field in the page header and set a page WATERMARK_INVALIDATED
+// flag. For details see comments in the Page::SetAllocationWatermark() method
+// body.
 //
-// Each page may have up to one special garbage section.  The start of this
-// section is denoted by the top field in the space.  The end of the section
-// is denoted by the limit field in the space.  This special garbage section
-// is not marked with a free space map in the data.  The point of this section
-// is to enable linear allocation without having to constantly update the byte
-// array every time the top field is updated and a new object is created.  The
-// special garbage section is not in the chain of garbage sections.
-//
-// Since the top and limit fields are in the space, not the page, only one page
-// has a special garbage section, and if the top and limit are equal then there
-// is no special garbage section.
 
 // Some assertion macros used in the debugging mode.
 
@@ -104,7 +101,7 @@
   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
 
 #define ASSERT_OBJECT_SIZE(size)                                               \
-  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
+  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
 
 #define ASSERT_PAGE_OFFSET(offset)                                             \
   ASSERT((Page::kObjectStartOffset <= offset)                                  \
@@ -117,540 +114,30 @@
 class PagedSpace;
 class MemoryAllocator;
 class AllocationInfo;
-class Space;
-class FreeList;
-class MemoryChunk;
-
-class MarkBit {
- public:
-  typedef uint32_t CellType;
-
-  inline MarkBit(CellType* cell, CellType mask, bool data_only)
-      : cell_(cell), mask_(mask), data_only_(data_only) { }
-
-  inline CellType* cell() { return cell_; }
-  inline CellType mask() { return mask_; }
-
-#ifdef DEBUG
-  bool operator==(const MarkBit& other) {
-    return cell_ == other.cell_ && mask_ == other.mask_;
-  }
-#endif
-
-  inline void Set() { *cell_ |= mask_; }
-  inline bool Get() { return (*cell_ & mask_) != 0; }
-  inline void Clear() { *cell_ &= ~mask_; }
-
-  inline bool data_only() { return data_only_; }
-
-  inline MarkBit Next() {
-    CellType new_mask = mask_ << 1;
-    if (new_mask == 0) {
-      return MarkBit(cell_ + 1, 1, data_only_);
-    } else {
-      return MarkBit(cell_, new_mask, data_only_);
-    }
-  }
-
- private:
-  CellType* cell_;
-  CellType mask_;
-  // This boolean indicates that the object is in a data-only space with no
-  // pointers.  This enables some optimizations when marking.
-  // It is expected that this field is inlined and turned into control flow
-  // at the place where the MarkBit object is created.
-  bool data_only_;
-};
-
-
-// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
- public:
-  static const uint32_t kBitsPerCell = 32;
-  static const uint32_t kBitsPerCellLog2 = 5;
-  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
-  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
-  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
-
-  static const size_t kLength =
-    (1 << kPageSizeBits) >> (kPointerSizeLog2);
-
-  static const size_t kSize =
-    (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
-
-
-  static int CellsForLength(int length) {
-    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
-  }
-
-  int CellsCount() {
-    return CellsForLength(kLength);
-  }
-
-  static int SizeFor(int cells_count) {
-    return sizeof(MarkBit::CellType) * cells_count;
-  }
-
-  INLINE(static uint32_t IndexToCell(uint32_t index)) {
-    return index >> kBitsPerCellLog2;
-  }
-
-  INLINE(static uint32_t CellToIndex(uint32_t index)) {
-    return index << kBitsPerCellLog2;
-  }
-
-  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
-    return (index + kBitIndexMask) & ~kBitIndexMask;
-  }
-
-  INLINE(MarkBit::CellType* cells()) {
-    return reinterpret_cast<MarkBit::CellType*>(this);
-  }
-
-  INLINE(Address address()) {
-    return reinterpret_cast<Address>(this);
-  }
-
-  INLINE(static Bitmap* FromAddress(Address addr)) {
-    return reinterpret_cast<Bitmap*>(addr);
-  }
-
-  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
-    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
-    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
-    return MarkBit(cell, mask, data_only);
-  }
-
-  static inline void Clear(MemoryChunk* chunk);
-
-  static void PrintWord(uint32_t word, uint32_t himask = 0) {
-    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
-      if ((mask & himask) != 0) PrintF("[");
-      PrintF((mask & word) ? "1" : "0");
-      if ((mask & himask) != 0) PrintF("]");
-    }
-  }
-
-  class CellPrinter {
-   public:
-    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
-
-    void Print(uint32_t pos, uint32_t cell) {
-      if (cell == seq_type) {
-        seq_length++;
-        return;
-      }
-
-      Flush();
-
-      if (IsSeq(cell)) {
-        seq_start = pos;
-        seq_length = 0;
-        seq_type = cell;
-        return;
-      }
-
-      PrintF("%d: ", pos);
-      PrintWord(cell);
-      PrintF("\n");
-    }
-
-    void Flush() {
-      if (seq_length > 0) {
-        PrintF("%d: %dx%d\n",
-               seq_start,
-               seq_type == 0 ? 0 : 1,
-               seq_length * kBitsPerCell);
-        seq_length = 0;
-      }
-    }
-
-    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
-
-   private:
-    uint32_t seq_start;
-    uint32_t seq_type;
-    uint32_t seq_length;
-  };
-
-  void Print() {
-    CellPrinter printer;
-    for (int i = 0; i < CellsCount(); i++) {
-      printer.Print(i, cells()[i]);
-    }
-    printer.Flush();
-    PrintF("\n");
-  }
-
-  bool IsClean() {
-    for (int i = 0; i < CellsCount(); i++) {
-      if (cells()[i] != 0) return false;
-    }
-    return true;
-  }
-};
-
-
-class SkipList;
-class SlotsBuffer;
-
-// MemoryChunk represents a memory region owned by a specific space.
-// It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accommodate
-// any heap object.
-class MemoryChunk {
- public:
-  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
-  static MemoryChunk* FromAddress(Address a) {
-    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
-  }
-
-  // Only works for addresses in pointer spaces, not data or code spaces.
-  static inline MemoryChunk* FromAnyPointerAddress(Address addr);
-
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  bool is_valid() { return address() != NULL; }
-
-  MemoryChunk* next_chunk() const { return next_chunk_; }
-  MemoryChunk* prev_chunk() const { return prev_chunk_; }
-
-  void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
-  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
-
-  Space* owner() const {
-    if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
-        kFailureTag) {
-      return reinterpret_cast<Space*>(owner_ - kFailureTag);
-    } else {
-      return NULL;
-    }
-  }
-
-  void set_owner(Space* space) {
-    ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
-    owner_ = reinterpret_cast<Address>(space) + kFailureTag;
-    ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
-           kFailureTag);
-  }
-
-  VirtualMemory* reserved_memory() {
-    return &reservation_;
-  }
-
-  void InitializeReservedMemory() {
-    reservation_.Reset();
-  }
-
-  void set_reserved_memory(VirtualMemory* reservation) {
-    ASSERT_NOT_NULL(reservation);
-    reservation_.TakeControl(reservation);
-  }
-
-  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
-  void initialize_scan_on_scavenge(bool scan) {
-    if (scan) {
-      SetFlag(SCAN_ON_SCAVENGE);
-    } else {
-      ClearFlag(SCAN_ON_SCAVENGE);
-    }
-  }
-  inline void set_scan_on_scavenge(bool scan);
-
-  int store_buffer_counter() { return store_buffer_counter_; }
-  void set_store_buffer_counter(int counter) {
-    store_buffer_counter_ = counter;
-  }
-
-  bool Contains(Address addr) {
-    return addr >= area_start() && addr < area_end();
-  }
-
-  // Checks whether addr can be a limit of addresses in this page.
-  // It's a limit if it's in the page, or if it's just after the
-  // last byte of the page.
-  bool ContainsLimit(Address addr) {
-    return addr >= area_start() && addr <= area_end();
-  }
-
-  enum MemoryChunkFlags {
-    IS_EXECUTABLE,
-    ABOUT_TO_BE_FREED,
-    POINTERS_TO_HERE_ARE_INTERESTING,
-    POINTERS_FROM_HERE_ARE_INTERESTING,
-    SCAN_ON_SCAVENGE,
-    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
-    IN_TO_SPACE,    // All pages in new space has one of these two set.
-    NEW_SPACE_BELOW_AGE_MARK,
-    CONTAINS_ONLY_DATA,
-    EVACUATION_CANDIDATE,
-    RESCAN_ON_EVACUATION,
-
-    // Pages swept precisely can be iterated, hitting only the live objects.
-    // Whereas those swept conservatively cannot be iterated over. Both flags
-    // indicate that marking bits have been cleared by the sweeper, otherwise
-    // marking bits are still intact.
-    WAS_SWEPT_PRECISELY,
-    WAS_SWEPT_CONSERVATIVELY,
-
-    // Last flag, keep at bottom.
-    NUM_MEMORY_CHUNK_FLAGS
-  };
-
-
-  static const int kPointersToHereAreInterestingMask =
-      1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
-  static const int kPointersFromHereAreInterestingMask =
-      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
-  static const int kEvacuationCandidateMask =
-      1 << EVACUATION_CANDIDATE;
-
-  static const int kSkipEvacuationSlotsRecordingMask =
-      (1 << EVACUATION_CANDIDATE) |
-      (1 << RESCAN_ON_EVACUATION) |
-      (1 << IN_FROM_SPACE) |
-      (1 << IN_TO_SPACE);
-
-
-  void SetFlag(int flag) {
-    flags_ |= static_cast<uintptr_t>(1) << flag;
-  }
-
-  void ClearFlag(int flag) {
-    flags_ &= ~(static_cast<uintptr_t>(1) << flag);
-  }
-
-  void SetFlagTo(int flag, bool value) {
-    if (value) {
-      SetFlag(flag);
-    } else {
-      ClearFlag(flag);
-    }
-  }
-
-  bool IsFlagSet(int flag) {
-    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
-  }
-
-  // Set or clear multiple flags at a time. The flags in the mask
-  // are set to the value in "flags", the rest retain the current value
-  // in flags_.
-  void SetFlags(intptr_t flags, intptr_t mask) {
-    flags_ = (flags_ & ~mask) | (flags & mask);
-  }
-
-  // Return all current flags.
-  intptr_t GetFlags() { return flags_; }
-
-  // Manage live byte count (count of bytes known to be live,
-  // because they are marked black).
-  void ResetLiveBytes() {
-    if (FLAG_gc_verbose) {
-      PrintF("ResetLiveBytes:%p:%x->0\n",
-             static_cast<void*>(this), live_byte_count_);
-    }
-    live_byte_count_ = 0;
-  }
-  void IncrementLiveBytes(int by) {
-    if (FLAG_gc_verbose) {
-      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
-             static_cast<void*>(this), live_byte_count_,
-             ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
-             live_byte_count_ + by);
-    }
-    live_byte_count_ += by;
-    ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
-  }
-  int LiveBytes() {
-    ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
-    return live_byte_count_;
-  }
-
-  static void IncrementLiveBytesFromGC(Address address, int by) {
-    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
-  }
-
-  static void IncrementLiveBytesFromMutator(Address address, int by);
-
-  static const intptr_t kAlignment =
-      (static_cast<uintptr_t>(1) << kPageSizeBits);
-
-  static const intptr_t kAlignmentMask = kAlignment - 1;
-
-  static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
-
-  static const intptr_t kLiveBytesOffset =
-     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
-     kPointerSize + kPointerSize +
-     kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
-  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
-  static const size_t kHeaderSize =
-      kSlotsBufferOffset + kPointerSize + kPointerSize;
-
-  static const int kBodyOffset =
-    CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
-
-  // The start offset of the object area in a page. Aligned to both maps and
-  // code alignment to be suitable for both.  Also aligned to 32 words because
-  // the marking bitmap is arranged in 32 bit chunks.
-  static const int kObjectStartAlignment = 32 * kPointerSize;
-  static const int kObjectStartOffset = kBodyOffset - 1 +
-      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
-  size_t size() const { return size_; }
-
-  void set_size(size_t size) {
-    size_ = size;
-  }
-
-  void SetArea(Address area_start, Address area_end) {
-    area_start_ = area_start;
-    area_end_ = area_end;
-  }
-
-  Executability executable() {
-    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-  }
-
-  bool ContainsOnlyData() {
-    return IsFlagSet(CONTAINS_ONLY_DATA);
-  }
-
-  bool InNewSpace() {
-    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
-  }
-
-  bool InToSpace() {
-    return IsFlagSet(IN_TO_SPACE);
-  }
-
-  bool InFromSpace() {
-    return IsFlagSet(IN_FROM_SPACE);
-  }
-
-  // ---------------------------------------------------------------------
-  // Markbits support
-
-  inline Bitmap* markbits() {
-    return Bitmap::FromAddress(address() + kHeaderSize);
-  }
-
-  void PrintMarkbits() { markbits()->Print(); }
-
-  inline uint32_t AddressToMarkbitIndex(Address addr) {
-    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
-  }
-
-  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
-    const intptr_t offset =
-        reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
-    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
-  }
-
-  inline Address MarkbitIndexToAddress(uint32_t index) {
-    return this->address() + (index << kPointerSizeLog2);
-  }
-
-  void InsertAfter(MemoryChunk* other);
-  void Unlink();
-
-  inline Heap* heap() { return heap_; }
-
-  static const int kFlagsOffset = kPointerSize * 3;
-
-  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
-
-  bool ShouldSkipEvacuationSlotRecording() {
-    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
-  }
-
-  inline SkipList* skip_list() {
-    return skip_list_;
-  }
-
-  inline void set_skip_list(SkipList* skip_list) {
-    skip_list_ = skip_list;
-  }
-
-  inline SlotsBuffer* slots_buffer() {
-    return slots_buffer_;
-  }
-
-  inline SlotsBuffer** slots_buffer_address() {
-    return &slots_buffer_;
-  }
-
-  void MarkEvacuationCandidate() {
-    ASSERT(slots_buffer_ == NULL);
-    SetFlag(EVACUATION_CANDIDATE);
-  }
-
-  void ClearEvacuationCandidate() {
-    ASSERT(slots_buffer_ == NULL);
-    ClearFlag(EVACUATION_CANDIDATE);
-  }
-
-  Address area_start() { return area_start_; }
-  Address area_end() { return area_end_; }
-  int area_size() {
-    return static_cast<int>(area_end() - area_start());
-  }
-
- protected:
-  MemoryChunk* next_chunk_;
-  MemoryChunk* prev_chunk_;
-  size_t size_;
-  intptr_t flags_;
-
-  // Start and end of allocatable memory on this chunk.
-  Address area_start_;
-  Address area_end_;
-
-  // If the chunk needs to remember its memory reservation, it is stored here.
-  VirtualMemory reservation_;
-  // The identity of the owning space.  This is tagged as a failure pointer, but
-  // no failure can be in an object, so this can be distinguished from any entry
-  // in a fixed array.
-  Address owner_;
-  Heap* heap_;
-  // Used by the store buffer to keep track of which pages to mark scan-on-
-  // scavenge.
-  int store_buffer_counter_;
-  // Count of bytes marked black on page.
-  int live_byte_count_;
-  SlotsBuffer* slots_buffer_;
-  SkipList* skip_list_;
-
-  static MemoryChunk* Initialize(Heap* heap,
-                                 Address base,
-                                 size_t size,
-                                 Address area_start,
-                                 Address area_end,
-                                 Executability executable,
-                                 Space* owner);
-
-  friend class MemoryAllocator;
-};
-
-STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
 
 // -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 1MB. Large object pages may be larger.
+// A page normally has 8K bytes. Large object pages may be larger.  A page
+// address is always aligned to the 8K page size.
+//
+// Each page starts with a header of Page::kPageHeaderSize size which contains
+// bookkeeping data.
+//
+// The mark-compact collector transforms a map pointer into a page index and a
+// page offset. The exact encoding is described in the comments for
+// class MapWord in objects.h.
 //
 // The only way to get a page pointer is by calling factory methods:
 //   Page* p = Page::FromAddress(addr); or
 //   Page* p = Page::FromAllocationTop(top);
-class Page : public MemoryChunk {
+class Page {
  public:
   // Returns the page containing a given address. The address ranges
   // from [page_addr .. page_addr + kPageSize[
-  // This only works if the object is in fact in a page.  See also MemoryChunk::
-  // FromAddress() and FromAnyAddress().
+  //
+  // Note that this function only works for addresses in normal paged
+  // spaces and addresses in the first 8K of large object pages (i.e.,
+  // the start of large objects but not necessarily derived pointers
+  // within them).
   INLINE(static Page* FromAddress(Address a)) {
     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
   }
@@ -661,23 +148,66 @@
   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
   INLINE(static Page* FromAllocationTop(Address top)) {
     Page* p = FromAddress(top - kPointerSize);
+    ASSERT_PAGE_OFFSET(p->Offset(top));
     return p;
   }
 
-  // Returns the next page in the chain of pages owned by a space.
+  // Returns the start address of this page.
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  // Checks whether this is a valid page address.
+  bool is_valid() { return address() != NULL; }
+
+  // Returns the next page of this page.
   inline Page* next_page();
-  inline Page* prev_page();
-  inline void set_next_page(Page* page);
-  inline void set_prev_page(Page* page);
+
+  // Return the end of allocation in this page. Undefined for unused pages.
+  inline Address AllocationTop();
+
+  // Return the allocation watermark for the page.
+  // For old space pages it is guaranteed that the area under the watermark
+  // does not contain any garbage pointers to new space.
+  inline Address AllocationWatermark();
+
+  // Return the allocation watermark offset from the beginning of the page.
+  inline uint32_t AllocationWatermarkOffset();
+
+  inline void SetAllocationWatermark(Address allocation_watermark);
+
+  inline void SetCachedAllocationWatermark(Address allocation_watermark);
+  inline Address CachedAllocationWatermark();
+
+  // Returns the start address of the object area in this page.
+  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
+
+  // Returns the end address (exclusive) of the object area in this page.
+  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
 
   // Checks whether an address is page aligned.
   static bool IsAlignedToPageSize(Address a) {
     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
   }
 
+  // True if this page was in use before current compaction started.
+  // Result is valid only for pages owned by paged spaces and
+  // only after PagedSpace::PrepareForMarkCompact was called.
+  inline bool WasInUseBeforeMC();
+
+  inline void SetWasInUseBeforeMC(bool was_in_use);
+
+  // True if this page is a large object page.
+  inline bool IsLargeObjectPage();
+
+  inline void SetIsLargeObjectPage(bool is_large_object_page);
+
+  inline Executability PageExecutability();
+
+  inline void SetPageExecutability(Executability executable);
+
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
     int offset = static_cast<int>(a - address());
+    ASSERT_PAGE_OFFSET(offset);
     return offset;
   }
 
@@ -688,70 +218,144 @@
   }
 
   // ---------------------------------------------------------------------
+  // Card marking support
+
+  static const uint32_t kAllRegionsCleanMarks = 0x0;
+  static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
+
+  inline uint32_t GetRegionMarks();
+  inline void SetRegionMarks(uint32_t dirty);
+
+  inline uint32_t GetRegionMaskForAddress(Address addr);
+  inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
+  inline int GetRegionNumberForAddress(Address addr);
+
+  inline void MarkRegionDirty(Address addr);
+  inline bool IsRegionDirty(Address addr);
+
+  inline void ClearRegionMarks(Address start,
+                               Address end,
+                               bool reaches_limit);
 
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
 
-  // Object area size in bytes.
-  static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
-
-  // Maximum object size that fits in a page.
-  static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
-
   // Page size mask.
   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
+  static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
+    kIntSize + kPointerSize + kPointerSize;
+
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.
+  static const int kObjectStartOffset =
+      CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
+
+  // Object area size in bytes.
+  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
+
+  // Maximum object size that fits in a page.
+  static const int kMaxHeapObjectSize = kObjectAreaSize;
+
+  static const int kDirtyFlagOffset = 2 * kPointerSize;
+  static const int kRegionSizeLog2 = 8;
+  static const int kRegionSize = 1 << kRegionSizeLog2;
+  static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
+
+  STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
+
+  enum PageFlag {
+    IS_NORMAL_PAGE = 0,
+    WAS_IN_USE_BEFORE_MC,
+
+    // Page allocation watermark was bumped by preallocation during scavenge.
+    // Correct watermark can be retrieved by CachedAllocationWatermark() method
+    WATERMARK_INVALIDATED,
+    IS_EXECUTABLE,
+    NUM_PAGE_FLAGS  // Must be last
+  };
+  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
+
+  // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
+  // scavenge we just invalidate the watermark on each old space page after
+  // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
+  // flag at the beginning of the next scavenge and each page becomes marked as
+  // having a valid watermark.
+  //
+  // The following invariant must hold for pages in old pointer and map spaces:
+  //     If page is in use then page is marked as having invalid watermark at
+  //     the beginning and at the end of any GC.
+  //
+  // This invariant guarantees that after flipping flag meaning at the
+  // beginning of scavenge all pages in use will be marked as having valid
+  // watermark.
+  static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
+
+  // Returns true if the page allocation watermark was not altered during
+  // scavenge.
+  inline bool IsWatermarkValid();
+
+  inline void InvalidateWatermark(bool value);
+
+  inline bool GetPageFlag(PageFlag flag);
+  inline void SetPageFlag(PageFlag flag, bool value);
+  inline void ClearPageFlags();
+
   inline void ClearGCFields();
 
-  static inline Page* Initialize(Heap* heap,
-                                 MemoryChunk* chunk,
-                                 Executability executable,
-                                 PagedSpace* owner);
+  static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
+  static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
+  static const uint32_t kAllocationWatermarkOffsetMask =
+      ((1 << kAllocationWatermarkOffsetBits) - 1) <<
+      kAllocationWatermarkOffsetShift;
 
-  void InitializeAsAnchor(PagedSpace* owner);
+  static const uint32_t kFlagsMask =
+    ((1 << kAllocationWatermarkOffsetShift) - 1);
 
-  bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
-  bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
-  bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
+  STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
+               kAllocationWatermarkOffsetBits);
 
-  void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
-  void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
+  //---------------------------------------------------------------------------
+  // Page header description.
+  //
+  // If a page is not in the large object space, the first word,
+  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
+  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
+  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
+  // or [next_page_start, next_page_end[. It cannot point to a valid address
+  // in the current page.  If a page is in the large object space, the first
+  // word *may* (if the page start and large object chunk start are the
+  // same) contain the address of the next large object chunk.
+  intptr_t opaque_header;
 
-  void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
-  void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+  // If the page is not in the large object space, the low-order bit of the
+  // second word is set. If the page is in the large object space, the
+  // second word *may* (if the page start and large object chunk start are
+  // the same) contain the large object chunk size.  In either case, the
+  // low-order bit for large object pages will be cleared.
+  // For normal pages this word is used to store page flags and
+  // offset of allocation top.
+  intptr_t flags_;
 
-#ifdef DEBUG
-  void Print();
-#endif  // DEBUG
+  // This field contains dirty marks for regions covering the page. Only dirty
+  // regions might contain intergenerational references.
+  // Only 32 dirty marks are supported so for large object pages several regions
+  // might be mapped to a single dirty mark.
+  uint32_t dirty_regions_;
 
-  friend class MemoryAllocator;
+  // The index of the page in its owner space.
+  int mc_page_index;
+
+  // During mark-compact collections this field contains the forwarding address
+  // of the first live object in this page.
+  // During scavenge collection this field is used to store allocation watermark
+  // if it is altered during scavenge.
+  Address mc_first_forwarded;
+
+  Heap* heap_;
 };
 
 
-STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
-
-
-class LargePage : public MemoryChunk {
- public:
-  HeapObject* GetObject() {
-    return HeapObject::FromAddress(area_start());
-  }
-
-  inline LargePage* next_page() const {
-    return static_cast<LargePage*>(next_chunk());
-  }
-
-  inline void set_next_page(LargePage* page) {
-    set_next_chunk(page);
-  }
- private:
-  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
-
-  friend class MemoryAllocator;
-};
-
-STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
-
 // ----------------------------------------------------------------------------
 // Space is the abstract superclass for all allocation spaces.
 class Space : public Malloced {
@@ -776,14 +380,6 @@
   // (e.g. see LargeObjectSpace).
   virtual intptr_t SizeOfObjects() { return Size(); }
 
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (id_ == CODE_SPACE) {
-      return RoundDown(size, kCodeAlignment);
-    } else {
-      return RoundDown(size, kPointerSize);
-    }
-  }
-
 #ifdef DEBUG
   virtual void Print() = 0;
 #endif
@@ -818,7 +414,7 @@
   // Reserves a range of virtual memory, but does not commit any of it.
   // Can only be called once, at heap initialization time.
   // Returns false on failure.
-  bool SetUp(const size_t requested_size);
+  bool Setup(const size_t requested_size);
 
   // Frees the range of virtual memory, and frees the data structures used to
   // manage it.
@@ -834,9 +430,9 @@
   // Allocates a chunk of memory from the large-object portion of
   // the code range.  On platforms with no separate code range, should
   // not be called.
-  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
-                                            size_t* allocated);
-  void FreeRawMemory(Address buf, size_t length);
+  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+                                          size_t* allocated);
+  void FreeRawMemory(void* buf, size_t length);
 
  private:
   Isolate* isolate_;
@@ -847,15 +443,9 @@
   class FreeBlock {
    public:
     FreeBlock(Address start_arg, size_t size_arg)
-        : start(start_arg), size(size_arg) {
-      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
-      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
-    }
+        : start(start_arg), size(size_arg) {}
     FreeBlock(void* start_arg, size_t size_arg)
-        : start(static_cast<Address>(start_arg)), size(size_arg) {
-      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
-      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
-    }
+        : start(static_cast<Address>(start_arg)), size(size_arg) {}
 
     Address start;
     size_t size;
@@ -883,80 +473,123 @@
 };
 
 
-class SkipList {
- public:
-  SkipList() {
-    Clear();
-  }
-
-  void Clear() {
-    for (int idx = 0; idx < kSize; idx++) {
-      starts_[idx] = reinterpret_cast<Address>(-1);
-    }
-  }
-
-  Address StartFor(Address addr) {
-    return starts_[RegionNumber(addr)];
-  }
-
-  void AddObject(Address addr, int size) {
-    int start_region = RegionNumber(addr);
-    int end_region = RegionNumber(addr + size - kPointerSize);
-    for (int idx = start_region; idx <= end_region; idx++) {
-      if (starts_[idx] > addr) starts_[idx] = addr;
-    }
-  }
-
-  static inline int RegionNumber(Address addr) {
-    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
-  }
-
-  static void Update(Address addr, int size) {
-    Page* page = Page::FromAddress(addr);
-    SkipList* list = page->skip_list();
-    if (list == NULL) {
-      list = new SkipList();
-      page->set_skip_list(list);
-    }
-
-    list->AddObject(addr, size);
-  }
-
- private:
-  static const int kRegionSizeLog2 = 13;
-  static const int kRegionSize = 1 << kRegionSizeLog2;
-  static const int kSize = Page::kPageSize / kRegionSize;
-
-  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
-
-  Address starts_[kSize];
-};
-
-
 // ----------------------------------------------------------------------------
 // A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
-// pages for large object space.
+// allocator manages chunks for the paged heap spaces (old space and map
+// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
+// addresses and are linked as a list.
 //
-// Each space has to manage it's own pages.
+// The allocator keeps an initial chunk which is used for the new space.  The
+// leftover regions of the initial chunk are used for the initial chunks of
+// old space and map space if they are big enough to hold at least one page.
+// The allocator assumes that there is one old space and one map space, each
+// expands the space by allocating kPagesPerChunk pages except the last
+// expansion (before running out of space).  The first chunk may contain fewer
+// than kPagesPerChunk pages as well.
 //
+// The memory allocator also allocates chunks for the large object space, but
+// they are managed by the space itself.  The new space does not expand.
+//
+// The fact that pages for paged spaces are allocated and deallocated in chunks
+// induces a constraint on the order of pages in a linked lists. We say that
+// pages are linked in the chunk-order if and only if every two consecutive
+// pages from the same chunk are consecutive in the linked list.
+//
+
+
 class MemoryAllocator {
  public:
   explicit MemoryAllocator(Isolate* isolate);
 
   // Initializes its internal bookkeeping structures.
   // Max capacity of the total space and executable memory limit.
-  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
+  bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
 
+  // Deletes valid chunks.
   void TearDown();
 
-  Page* AllocatePage(PagedSpace* owner, Executability executable);
+  // Reserves an initial address range of virtual memory to be split between
+  // the two new space semispaces, the old space, and the map space.  The
+  // memory is not yet committed or assigned to spaces and split into pages.
+  // The initial chunk is unmapped when the memory allocator is torn down.
+  // This function should only be called when there is not already a reserved
+  // initial chunk (initial_chunk_ should be NULL).  It returns the start
+  // address of the initial chunk if successful, with the side effect of
+  // setting the initial chunk, or else NULL if unsuccessful and leaves the
+  // initial chunk NULL.
+  void* ReserveInitialChunk(const size_t requested);
 
-  LargePage* AllocateLargePage(intptr_t object_size,
-                                      Executability executable,
-                                      Space* owner);
+  // Commits pages from an as-yet-unmanaged block of virtual memory into a
+  // paged space.  The block should be part of the initial chunk reserved via
+  // a call to ReserveInitialChunk.  The number of pages is always returned in
+  // the output parameter num_pages.  This function assumes that the start
+  // address is non-null and that it is big enough to hold at least one
+  // page-aligned page.  The call always succeeds, and num_pages is always
+  // greater than zero.
+  Page* CommitPages(Address start, size_t size, PagedSpace* owner,
+                    int* num_pages);
 
-  void Free(MemoryChunk* chunk);
+  // Commit a contiguous block of memory from the initial chunk.  Assumes that
+  // the address is not NULL, the size is greater than zero, and that the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool CommitBlock(Address start, size_t size, Executability executable);
+
+  // Uncommit a contiguous block of memory [start..(start+size)[.
+  // start is not NULL, the size is greater than zero, and the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool UncommitBlock(Address start, size_t size);
+
+  // Zaps a contiguous block of memory [start..(start+size)[ thus
+  // filling it up with a recognizable non-NULL bit pattern.
+  void ZapBlock(Address start, size_t size);
+
+  // Attempts to allocate the requested (non-zero) number of pages from the
+  // OS.  Fewer pages might be allocated than requested. If it fails to
+  // allocate memory for the OS or cannot allocate a single page, this
+  // function returns an invalid page pointer (NULL). The caller must check
+  // whether the returned page is valid (by calling Page::is_valid()).  It is
+  // guaranteed that allocated pages have contiguous addresses.  The actual
+  // number of allocated pages is returned in the output parameter
+  // allocated_pages.  If the PagedSpace owner is executable and there is
+  // a code range, the pages are allocated from the code range.
+  Page* AllocatePages(int requested_pages, int* allocated_pages,
+                      PagedSpace* owner);
+
+  // Frees pages from a given page and after. Requires pages to be
+  // linked in chunk-order (see comment for class).
+  // If 'p' is the first page of a chunk, pages from 'p' are freed
+  // and this function returns an invalid page pointer.
+  // Otherwise, the function searches a page after 'p' that is
+  // the first page of a chunk. Pages after the found page
+  // are freed and the function returns 'p'.
+  Page* FreePages(Page* p);
+
+  // Frees all pages owned by given space.
+  void FreeAllPages(PagedSpace* space);
+
+  // Allocates and frees raw memory of certain size.
+  // These are just thin wrappers around OS::Allocate and OS::Free,
+  // but keep track of allocated bytes as part of heap.
+  // If the flag is EXECUTABLE and a code range exists, the requested
+  // memory is allocated from the code range.  If a code range exists
+  // and the freed memory is in it, the code range manages the freed memory.
+  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
+                                          size_t* allocated,
+                                          Executability executable);
+  void FreeRawMemory(void* buf,
+                     size_t length,
+                     Executability executable);
+  void PerformAllocationCallback(ObjectSpace space,
+                                 AllocationAction action,
+                                 size_t size);
+
+  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                   ObjectSpace space,
+                                   AllocationAction action);
+  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
 
   // Returns the maximum available bytes of heaps.
   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -975,85 +608,70 @@
 
   // Returns maximum available bytes that the old space can have.
   intptr_t MaxAvailable() {
-    return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
+    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
   }
 
+  // Links two pages.
+  inline void SetNextPage(Page* prev, Page* next);
+
+  // Returns the next page of a given page.
+  inline Page* GetNextPage(Page* p);
+
+  // Checks whether a page belongs to a space.
+  inline bool IsPageInSpace(Page* p, PagedSpace* space);
+
+  // Returns the space that owns the given page.
+  inline PagedSpace* PageOwner(Page* page);
+
+  // Finds the first/last page in the same chunk as a given page.
+  Page* FindFirstPageInSameChunk(Page* p);
+  Page* FindLastPageInSameChunk(Page* p);
+
+  // Relinks list of pages owned by space to make it chunk-ordered.
+  // Returns new first and last pages of space.
+  // Also returns last page in relinked list which has WasInUsedBeforeMC
+  // flag set.
+  void RelinkPageListInChunkOrder(PagedSpace* space,
+                                  Page** first_page,
+                                  Page** last_page,
+                                  Page** last_page_in_use);
+
 #ifdef DEBUG
   // Reports statistic info of the space.
   void ReportStatistics();
 #endif
 
-  MemoryChunk* AllocateChunk(intptr_t body_size,
-                             Executability executable,
-                             Space* space);
-
-  Address ReserveAlignedMemory(size_t requested,
-                               size_t alignment,
-                               VirtualMemory* controller);
-  Address AllocateAlignedMemory(size_t requested,
-                                size_t alignment,
-                                Executability executable,
-                                VirtualMemory* controller);
-
-  void FreeMemory(VirtualMemory* reservation, Executability executable);
-  void FreeMemory(Address addr, size_t size, Executability executable);
-
-  // Commit a contiguous block of memory from the initial chunk.  Assumes that
-  // the address is not NULL, the size is greater than zero, and that the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool CommitBlock(Address start, size_t size, Executability executable);
-
-  // Uncommit a contiguous block of memory [start..(start+size)[.
-  // start is not NULL, the size is greater than zero, and the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool UncommitBlock(Address start, size_t size);
-
-  // Zaps a contiguous block of memory [start..(start+size)[ thus
-  // filling it up with a recognizable non-NULL bit pattern.
-  void ZapBlock(Address start, size_t size);
-
-  void PerformAllocationCallback(ObjectSpace space,
-                                 AllocationAction action,
-                                 size_t size);
-
-  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                          ObjectSpace space,
-                                          AllocationAction action);
-
-  void RemoveMemoryAllocationCallback(
-      MemoryAllocationCallback callback);
-
-  bool MemoryAllocationCallbackRegistered(
-      MemoryAllocationCallback callback);
-
-  static int CodePageGuardStartOffset();
-
-  static int CodePageGuardSize();
-
-  static int CodePageAreaStartOffset();
-
-  static int CodePageAreaEndOffset();
-
-  static int CodePageAreaSize() {
-    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
-  }
-
-  static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
+  // Due to encoding limitation, we can only have 8K chunks.
+  static const int kMaxNofChunks = 1 << kPageSizeBits;
+  // If a chunk has at least 16 pages, the maximum heap size is about
+  // 8K * 8K * 16 = 1G bytes.
+#ifdef V8_TARGET_ARCH_X64
+  static const int kPagesPerChunk = 32;
+  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
+  static const int kChunkTableLevels = 4;
+  static const int kChunkTableBitsPerLevel = 12;
+#else
+  static const int kPagesPerChunk = 16;
+  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
+  static const int kChunkTableLevels = 2;
+  static const int kChunkTableBitsPerLevel = 8;
+#endif
 
  private:
+  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
+
   Isolate* isolate_;
 
   // Maximum space size in bytes.
-  size_t capacity_;
+  intptr_t capacity_;
   // Maximum subset of capacity_ that can be executable
-  size_t capacity_executable_;
+  intptr_t capacity_executable_;
 
   // Allocated space size in bytes.
-  size_t size_;
+  intptr_t size_;
+
   // Allocated executable space size in bytes.
-  size_t size_executable_;
+  intptr_t size_executable_;
 
   struct MemoryAllocationCallbackRegistration {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -1065,11 +683,64 @@
     ObjectSpace space;
     AllocationAction action;
   };
-
   // A List of callback that are triggered when memory is allocated or free'd
   List<MemoryAllocationCallbackRegistration>
       memory_allocation_callbacks_;
 
+  // The initial chunk of virtual memory.
+  VirtualMemory* initial_chunk_;
+
+  // Allocated chunk info: chunk start address, chunk size, and owning space.
+  class ChunkInfo BASE_EMBEDDED {
+   public:
+    ChunkInfo() : address_(NULL),
+                  size_(0),
+                  owner_(NULL),
+                  executable_(NOT_EXECUTABLE),
+                  owner_identity_(FIRST_SPACE) {}
+    inline void init(Address a, size_t s, PagedSpace* o);
+    Address address() { return address_; }
+    size_t size() { return size_; }
+    PagedSpace* owner() { return owner_; }
+    // We save executability of the owner to allow using it
+    // when collecting stats after the owner has been destroyed.
+    Executability executable() const { return executable_; }
+    AllocationSpace owner_identity() const { return owner_identity_; }
+
+   private:
+    Address address_;
+    size_t size_;
+    PagedSpace* owner_;
+    Executability executable_;
+    AllocationSpace owner_identity_;
+  };
+
+  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
+  List<ChunkInfo> chunks_;
+  List<int> free_chunk_ids_;
+  int max_nof_chunks_;
+  int top_;
+
+  // Push/pop a free chunk id onto/from the stack.
+  void Push(int free_chunk_id);
+  int Pop();
+  bool OutOfChunkIds() { return top_ == 0; }
+
+  // Frees a chunk.
+  void DeleteChunk(int chunk_id);
+
+  // Basic check whether a chunk id is in the valid range.
+  inline bool IsValidChunkId(int chunk_id);
+
+  // Checks whether a chunk id identifies an allocated chunk.
+  inline bool IsValidChunk(int chunk_id);
+
+  // Returns the chunk id that a page belongs to.
+  inline int GetChunkId(Page* p);
+
+  // True if the address lies in the initial chunk.
+  inline bool InInitialChunk(Address address);
+
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
   // collector to rebuild page headers in the from space, which is
@@ -1077,7 +748,13 @@
   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
                                PagedSpace* owner);
 
-  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
+  Page* RelinkPagesInChunk(int chunk_id,
+                           Address chunk_start,
+                           size_t chunk_size,
+                           Page* prev,
+                           Page** last_page_in_use);
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
 };
 
 
@@ -1100,67 +777,111 @@
 // -----------------------------------------------------------------------------
 // Heap object iterator in new/old/map spaces.
 //
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
+// A HeapObjectIterator iterates objects from a given address to the
+// top of a space. The given address must be below the current
+// allocation pointer (space top). There are some caveats.
 //
-// If objects are allocated in the page during iteration the iterator may
-// or may not iterate over those objects.  The caller must create a new
-// iterator in order to be sure to visit these new objects.
+// (1) If the space top changes upward during iteration (because of
+//     allocating new objects), the iterator does not iterate objects
+//     above the original space top. The caller must create a new
+//     iterator starting from the old top in order to visit these new
+//     objects.
+//
+// (2) If new objects are allocated below the original allocation top
+//     (e.g., free-list allocation in paged spaces), the new objects
+//     may or may not be iterated depending on their position with
+//     respect to the current point of iteration.
+//
+// (3) The space top should not change downward during iteration,
+//     otherwise the iterator will return not-necessarily-valid
+//     objects.
+
 class HeapObjectIterator: public ObjectIterator {
  public:
-  // Creates a new object iterator in a given space.
+  // Creates a new object iterator in a given space. If a start
+  // address is not given, the iterator starts from the space bottom.
   // If the size function is not given, the iterator calls the default
   // Object::Size().
   explicit HeapObjectIterator(PagedSpace* space);
   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
+  HeapObjectIterator(PagedSpace* space, Address start);
+  HeapObjectIterator(PagedSpace* space,
+                     Address start,
+                     HeapObjectCallback size_func);
   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
 
-  // Advance to the next object, skipping free spaces and other fillers and
-  // skipping the special garbage section of which there is one per space.
-  // Returns NULL when the iteration has ended.
-  inline HeapObject* Next() {
-    do {
-      HeapObject* next_obj = FromCurrentPage();
-      if (next_obj != NULL) return next_obj;
-    } while (AdvanceToNextPage());
-    return NULL;
+  inline HeapObject* next() {
+    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
   }
 
-  virtual HeapObject* next_object() {
-    return Next();
-  }
+  // implementation of ObjectIterator.
+  virtual HeapObject* next_object() { return next(); }
 
  private:
-  enum PageMode { kOnePageOnly, kAllPagesInSpace };
+  Address cur_addr_;  // current iteration point
+  Address end_addr_;  // end iteration point
+  Address cur_limit_;  // current page limit
+  HeapObjectCallback size_func_;  // size function
+  Page* end_page_;  // caches the page of the end address
 
-  Address cur_addr_;  // Current iteration point.
-  Address cur_end_;   // End iteration point.
-  HeapObjectCallback size_func_;  // Size function or NULL.
-  PagedSpace* space_;
-  PageMode page_mode_;
+  HeapObject* FromCurrentPage() {
+    ASSERT(cur_addr_ < cur_limit_);
 
-  // Fast (inlined) path of next().
-  inline HeapObject* FromCurrentPage();
+    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    ASSERT_OBJECT_SIZE(obj_size);
 
-  // Slow path of next(), goes into the next page.  Returns false if the
-  // iteration has ended.
-  bool AdvanceToNextPage();
+    cur_addr_ += obj_size;
+    ASSERT(cur_addr_ <= cur_limit_);
+
+    return obj;
+  }
+
+  // Slow path of next, goes into the next page.
+  HeapObject* FromNextPage();
 
   // Initializes fields.
-  inline void Initialize(PagedSpace* owner,
-                         Address start,
-                         Address end,
-                         PageMode mode,
-                         HeapObjectCallback size_func);
+  void Initialize(Address start, Address end, HeapObjectCallback size_func);
+
+#ifdef DEBUG
+  // Verifies whether fields have valid values.
+  void Verify();
+#endif
 };
 
 
 // -----------------------------------------------------------------------------
 // A PageIterator iterates the pages in a paged space.
+//
+// The PageIterator class provides three modes for iterating pages in a space:
+//   PAGES_IN_USE iterates pages containing allocated objects.
+//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
+//                    mark-compact collection.
+//   ALL_PAGES iterates all pages in the space.
+//
+// There are some caveats.
+//
+// (1) If the space expands during iteration, new pages will not be
+//     returned by the iterator in any mode.
+//
+// (2) If new objects are allocated during iteration, they will appear
+//     in pages returned by the iterator.  Allocation may cause the
+//     allocation pointer or MC allocation pointer in the last page to
+//     change between constructing the iterator and iterating the last
+//     page.
+//
+// (3) The space should not shrink during iteration, otherwise the
+//     iterator will return deallocated pages.
 
 class PageIterator BASE_EMBEDDED {
  public:
-  explicit inline PageIterator(PagedSpace* space);
+  enum Mode {
+    PAGES_IN_USE,
+    PAGES_USED_BY_MC,
+    ALL_PAGES
+  };
+
+  PageIterator(PagedSpace* space, Mode mode);
 
   inline bool has_next();
   inline Page* next();
@@ -1168,25 +889,21 @@
  private:
   PagedSpace* space_;
   Page* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  Page* next_page_;
+  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
 };
 
 
 // -----------------------------------------------------------------------------
-// A space has a circular list of pages. The next page can be accessed via
-// Page::next_page() call.
+// A space has a list of pages. The next page can be accessed via
+// Page::next_page() call. The next page of the last page is an
+// invalid page pointer. A space can expand and shrink dynamically.
 
 // An abstraction of allocation and relocation pointers in a page-structured
 // space.
 class AllocationInfo {
  public:
-  AllocationInfo() : top(NULL), limit(NULL) {
-  }
-
-  Address top;  // Current allocation top.
-  Address limit;  // Current allocation limit.
+  Address top;  // current allocation top
+  Address limit;  // current allocation limit
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
@@ -1198,11 +915,11 @@
 
 
 // An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
+// The 'capacity' of a space is the number of object-area bytes (ie, not
 // including page bookkeeping structures) currently in the space. The 'size'
 // of a space is the number of allocated bytes, the 'waste' in the space is
 // the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
+// allocation without reorganizing the space via a GC (eg, small blocks due
 // to internal fragmentation, top of page areas in map space), and the bytes
 // 'available' is the number of unallocated bytes that are not waste.  The
 // capacity is the sum of size, waste, and available.
@@ -1215,213 +932,73 @@
  public:
   AllocationStats() { Clear(); }
 
-  // Zero out all the allocation statistics (i.e., no capacity).
+  // Zero out all the allocation statistics (ie, no capacity).
   void Clear() {
     capacity_ = 0;
+    available_ = 0;
     size_ = 0;
     waste_ = 0;
   }
 
-  void ClearSizeWaste() {
-    size_ = capacity_;
-    waste_ = 0;
-  }
-
-  // Reset the allocation statistics (i.e., available = capacity with no
+  // Reset the allocation statistics (ie, available = capacity with no
   // wasted or allocated bytes).
   void Reset() {
+    available_ = capacity_;
     size_ = 0;
     waste_ = 0;
   }
 
   // Accessors for the allocation statistics.
   intptr_t Capacity() { return capacity_; }
+  intptr_t Available() { return available_; }
   intptr_t Size() { return size_; }
   intptr_t Waste() { return waste_; }
 
-  // Grow the space by adding available bytes.  They are initially marked as
-  // being in use (part of the size), but will normally be immediately freed,
-  // putting them on the free list and removing them from size_.
+  // Grow the space by adding available bytes.
   void ExpandSpace(int size_in_bytes) {
     capacity_ += size_in_bytes;
-    size_ += size_in_bytes;
-    ASSERT(size_ >= 0);
+    available_ += size_in_bytes;
   }
 
-  // Shrink the space by removing available bytes.  Since shrinking is done
-  // during sweeping, bytes have been marked as being in use (part of the size)
-  // and are hereby freed.
+  // Shrink the space by removing available bytes.
   void ShrinkSpace(int size_in_bytes) {
     capacity_ -= size_in_bytes;
-    size_ -= size_in_bytes;
-    ASSERT(size_ >= 0);
+    available_ -= size_in_bytes;
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
+    available_ -= size_in_bytes;
     size_ += size_in_bytes;
-    ASSERT(size_ >= 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
   void DeallocateBytes(intptr_t size_in_bytes) {
     size_ -= size_in_bytes;
-    ASSERT(size_ >= 0);
+    available_ += size_in_bytes;
   }
 
   // Waste free bytes (available -> waste).
   void WasteBytes(int size_in_bytes) {
-    size_ -= size_in_bytes;
+    available_ -= size_in_bytes;
     waste_ += size_in_bytes;
-    ASSERT(size_ >= 0);
+  }
+
+  // Consider the wasted bytes to be allocated, as they contain filler
+  // objects (waste -> size).
+  void FillWastedBytes(intptr_t size_in_bytes) {
+    waste_ -= size_in_bytes;
+    size_ += size_in_bytes;
   }
 
  private:
   intptr_t capacity_;
+  intptr_t available_;
   intptr_t size_;
   intptr_t waste_;
 };
 
 
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap.  They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object).  They have a size and a next pointer.  The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
-  // Obtain a free-list node from a raw address.  This is not a cast because
-  // it does not check nor require that the first word at the address is a map
-  // pointer.
-  static FreeListNode* FromAddress(Address address) {
-    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
-  }
-
-  static inline bool IsFreeListNode(HeapObject* object);
-
-  // Set the size in bytes, which can be read with HeapObject::Size().  This
-  // function also writes a map to the first word of the block so that it
-  // looks like a heap object to the garbage collector and heap iteration
-  // functions.
-  void set_size(Heap* heap, int size_in_bytes);
-
-  // Accessors for the next field.
-  inline FreeListNode* next();
-  inline FreeListNode** next_address();
-  inline void set_next(FreeListNode* next);
-
-  inline void Zap();
-
- private:
-  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space.  The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other.  The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
-// find a new space to allocate from.  This is done with the free list, which
-// is divided up into rough categories to cut down on waste.  Having finer
-// categories would scatter allocation more.
-
-// The old space free list is organized in categories.
-// 1-31 words:  Such small free areas are discarded for efficiency reasons.
-//     They can be reclaimed by the compactor.  However the distance between top
-//     and limit may be this small.
-// 32-255 words: There is a list of spaces this large.  It is used for top and
-//     limit when the object we need to allocate is 1-31 words in size.  These
-//     spaces are called small.
-// 256-2047 words: There is a list of spaces this large.  It is used for top and
-//     limit when the object we need to allocate is 32-255 words in size.  These
-//     spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large.  It is used for top
-//     and limit when the object we need to allocate is 256-2047 words in size.
-//     These spaces are call large.
-// At least 16384 words.  This list is for objects of 2048 words or larger.
-//     Empty pages are added to this list.  These spaces are called huge.
-class FreeList BASE_EMBEDDED {
- public:
-  explicit FreeList(PagedSpace* owner);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
-
-  // Place a node on the free list.  The block of size 'size_in_bytes'
-  // starting at 'start' is placed on the free list.  The return value is the
-  // number of bytes that have been lost due to internal fragmentation by
-  // freeing the block.  Bookkeeping information will be written to the block,
-  // i.e., its contents will be destroyed.  The start address should be word
-  // aligned, and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
-
-  // Allocate a block of size 'size_in_bytes' from the free list.  The block
-  // is unitialized.  A failure is returned if no block is available.  The
-  // number of bytes lost to fragmentation is returned in the output parameter
-  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
-  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
-
-#ifdef DEBUG
-  void Zap();
-  static intptr_t SumFreeList(FreeListNode* node);
-  static int FreeListLength(FreeListNode* cur);
-  intptr_t SumFreeLists();
-  bool IsVeryLong();
-#endif
-
-  struct SizeStats {
-    intptr_t Total() {
-      return small_size_ + medium_size_ + large_size_ + huge_size_;
-    }
-
-    intptr_t small_size_;
-    intptr_t medium_size_;
-    intptr_t large_size_;
-    intptr_t huge_size_;
-  };
-
-  void CountFreeListItems(Page* p, SizeStats* sizes);
-
-  intptr_t EvictFreeListItems(Page* p);
-
- private:
-  // The size range of blocks, in bytes.
-  static const int kMinBlockSize = 3 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
-
-  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
-
-  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
-
-  PagedSpace* owner_;
-  Heap* heap_;
-
-  // Total available bytes in all blocks on this free list.
-  int available_;
-
-  static const int kSmallListMin = 0x20 * kPointerSize;
-  static const int kSmallListMax = 0xff * kPointerSize;
-  static const int kMediumListMax = 0x7ff * kPointerSize;
-  static const int kLargeListMax = 0x3fff * kPointerSize;
-  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
-  static const int kMediumAllocationMax = kSmallListMax;
-  static const int kLargeAllocationMax = kMediumListMax;
-  FreeListNode* small_list_;
-  FreeListNode* medium_list_;
-  FreeListNode* large_list_;
-  FreeListNode* huge_list_;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
-};
-
-
 class PagedSpace : public Space {
  public:
   // Creates a space with a maximum capacity, and an id.
@@ -1436,11 +1013,11 @@
   // the memory allocator's initial chunk) if possible.  If the block of
   // addresses is not big enough to contain a single page-aligned page, a
   // fresh chunk will be allocated.
-  bool SetUp();
+  bool Setup(Address start, size_t size);
 
   // Returns true if the space has been successfully set up and not
   // subsequently torn down.
-  bool HasBeenSetUp();
+  bool HasBeenSetup();
 
   // Cleans up the space, frees all pages in this space except those belonging
   // to the initial chunk, uncommits addresses in the initial chunk.
@@ -1449,6 +1026,8 @@
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
   bool Contains(HeapObject* o) { return Contains(o->address()); }
+  // Never crashes even if a is not a valid pointer.
+  inline bool SafeContains(Address a);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or Failure::Exception() if it is not. The implementation
@@ -1456,92 +1035,104 @@
   // linear in the number of objects in the page. It may be slow.
   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
 
-  // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact();
+  // Checks whether page is currently in use by this space.
+  bool IsUsed(Page* page);
 
-  // Current capacity without growing (Size() + Available()).
+  void MarkAllPagesClean();
+
+  // Prepares for a mark-compact GC.
+  virtual void PrepareForMarkCompact(bool will_compact);
+
+  // The top of allocation in a page in this space. Undefined if page is unused.
+  Address PageAllocationTop(Page* page) {
+    return page == TopPageOf(allocation_info_) ? top()
+        : PageAllocationLimit(page);
+  }
+
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) = 0;
+
+  void FlushTopPageWatermark() {
+    AllocationTopPage()->SetCachedAllocationWatermark(top());
+    AllocationTopPage()->InvalidateWatermark(true);
+  }
+
+  // Current capacity without growing (Size() + Available() + Waste()).
   intptr_t Capacity() { return accounting_stats_.Capacity(); }
 
   // Total amount of memory committed for this space.  For paged
   // spaces this equals the capacity.
   intptr_t CommittedMemory() { return Capacity(); }
 
-  // Sets the capacity, the available space and the wasted space to zero.
-  // The stats are rebuilt during sweeping by adding each page to the
-  // capacity and the size when it is encountered.  As free spaces are
-  // discovered during the sweeping they are subtracted from the size and added
-  // to the available and wasted totals.
-  void ClearStats() {
-    accounting_stats_.ClearSizeWaste();
-  }
+  // Available bytes without growing.
+  intptr_t Available() { return accounting_stats_.Available(); }
 
-  // Available bytes without growing.  These are the bytes on the free list.
-  // The bytes in the linear allocation area are not included in this total
-  // because updating the stats would slow down allocation.  New pages are
-  // immediately added to the free list so they show up here.
-  intptr_t Available() { return free_list_.available(); }
-
-  // Allocated bytes in this space.  Garbage bytes that were not found due to
-  // lazy sweeping are counted as being allocated!  The bytes in the current
-  // linear allocation area (between top and limit) are also counted here.
+  // Allocated bytes in this space.
   virtual intptr_t Size() { return accounting_stats_.Size(); }
 
-  // As size, but the bytes in lazily swept pages are estimated and the bytes
-  // in the current linear allocation area are not included.
-  virtual intptr_t SizeOfObjects() {
-    ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
-    return Size() - unswept_free_bytes_ - (limit() - top());
-  }
+  // Wasted bytes due to fragmentation and not recoverable until the
+  // next GC of this space.
+  intptr_t Waste() { return accounting_stats_.Waste(); }
 
-  // Wasted bytes in this space.  These are just the bytes that were thrown away
-  // due to being too small to use for allocation.  They do not include the
-  // free bytes that were not found at all due to lazy sweeping.
-  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+  // Returns the address of the first object in this space.
+  Address bottom() { return first_page_->ObjectAreaStart(); }
 
   // Returns the allocation pointer in this space.
   Address top() { return allocation_info_.top; }
-  Address limit() { return allocation_info_.limit; }
 
   // Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
 
+  // Allocate the requested number of bytes for relocation during mark-compact
+  // collection.
+  MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
+
   virtual bool ReserveSpace(int bytes);
 
-  // Give a block of memory to the space's free list.  It might be added to
-  // the free list or accounted as waste.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  int Free(Address start, int size_in_bytes) {
-    int wasted = free_list_.Free(start, size_in_bytes);
-    accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
-    return size_in_bytes - wasted;
-  }
+  // Used by ReserveSpace.
+  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
+
+  // Free all pages in range from prev (exclusive) to last (inclusive).
+  // Freed pages are moved to the end of page list.
+  void FreePages(Page* prev, Page* last);
+
+  // Deallocates a block.
+  virtual void DeallocateBlock(Address start,
+                               int size_in_bytes,
+                               bool add_to_freelist) = 0;
 
   // Set space allocation info.
-  void SetTop(Address top, Address limit) {
-    ASSERT(top == limit ||
-           Page::FromAddress(top) == Page::FromAddress(limit - 1));
+  void SetTop(Address top) {
     allocation_info_.top = top;
-    allocation_info_.limit = limit;
+    allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
   }
 
-  void Allocate(int bytes) {
-    accounting_stats_.AllocateBytes(bytes);
+  // ---------------------------------------------------------------------------
+  // Mark-compact collection support functions
+
+  // Set the relocation point to the beginning of the space.
+  void MCResetRelocationInfo();
+
+  // Writes relocation info to the top page.
+  void MCWriteRelocationInfoToPage() {
+    TopPageOf(mc_forwarding_info_)->
+        SetAllocationWatermark(mc_forwarding_info_.top);
   }
 
-  void IncreaseCapacity(int size) {
-    accounting_stats_.ExpandSpace(size);
-  }
+  // Computes the offset of a given address in this space to the beginning
+  // of the space.
+  int MCSpaceOffsetForAddress(Address addr);
 
-  // Releases an unused page and shrinks the space.
-  void ReleasePage(Page* page);
+  // Updates the allocation pointer to the relocation top after a mark-compact
+  // collection.
+  virtual void MCCommitRelocationInfo() = 0;
 
-  // Releases all of the unused pages.
-  void ReleaseAllUnusedPages();
+  // Releases half of unused pages.
+  void Shrink();
 
-  // The dummy page that anchors the linked list of pages.
-  Page* anchor() { return &anchor_; }
+  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
+  bool EnsureCapacity(int capacity);
 
 #ifdef DEBUG
   // Print meta info and objects in this space.
@@ -1550,9 +1141,6 @@
   // Verify integrity of this space.
   virtual void Verify(ObjectVisitor* visitor);
 
-  // Reports statistics for the space
-  void ReportStatistics();
-
   // Overridden by subclasses to verify space-specific object
   // properties (e.g., only maps or free-list nodes are in map space).
   virtual void VerifyObject(HeapObject* obj) {}
@@ -1563,108 +1151,91 @@
   static void ResetCodeStatistics();
 #endif
 
-  bool was_swept_conservatively() { return was_swept_conservatively_; }
-  void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
+  // Returns the page of the allocation pointer.
+  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
 
-  // Evacuation candidates are swept by evacuator.  Needs to return a valid
-  // result before _and_ after evacuation has finished.
-  static bool ShouldBeSweptLazily(Page* p) {
-    return !p->IsEvacuationCandidate() &&
-           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
-           !p->WasSweptPrecisely();
-  }
-
-  void SetPagesToSweep(Page* first) {
-    ASSERT(unswept_free_bytes_ == 0);
-    if (first == &anchor_) first = NULL;
-    first_unswept_page_ = first;
-  }
-
-  void IncrementUnsweptFreeBytes(int by) {
-    unswept_free_bytes_ += by;
-  }
-
-  void IncreaseUnsweptFreeBytes(Page* p) {
-    ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
-  }
-
-  void DecreaseUnsweptFreeBytes(Page* p) {
-    ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
-  }
-
-  bool AdvanceSweeper(intptr_t bytes_to_sweep);
-
-  bool IsSweepingComplete() {
-    return !first_unswept_page_->is_valid();
-  }
-
-  Page* FirstPage() { return anchor_.next_page(); }
-  Page* LastPage() { return anchor_.prev_page(); }
-
-  void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
-    free_list_.CountFreeListItems(p, sizes);
-  }
-
-  void EvictEvacuationCandidatesFromFreeLists();
-
-  bool CanExpand();
-
-  // Returns the number of total pages in this space.
-  int CountTotalPages();
-
-  // Return size of allocatable area on a page in this space.
-  inline int AreaSize() {
-    return area_size_;
-  }
+  void RelinkPageListInChunkOrder(bool deallocate_blocks);
 
  protected:
-  int area_size_;
-
   // Maximum capacity of this space.
   intptr_t max_capacity_;
 
   // Accounting information for this space.
   AllocationStats accounting_stats_;
 
-  // The dummy page that anchors the double linked list of pages.
-  Page anchor_;
+  // The first page in this space.
+  Page* first_page_;
 
-  // The space's free list.
-  FreeList free_list_;
+  // The last page in this space.  Initially set in Setup, updated in
+  // Expand and Shrink.
+  Page* last_page_;
+
+  // True if pages owned by this space are linked in chunk-order.
+  // See comment for class MemoryAllocator for definition of chunk-order.
+  bool page_list_is_chunk_ordered_;
 
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
+  // Relocation information during mark-compact collections.
+  AllocationInfo mc_forwarding_info_;
+
   // Bytes of each page that cannot be allocated.  Possibly non-zero
   // for pages in spaces with only fixed-size objects.  Always zero
   // for pages in spaces with variable sized objects (those pages are
   // padded with free-list nodes).
   int page_extra_;
 
-  bool was_swept_conservatively_;
+  // Sets allocation pointer to a page bottom.
+  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
 
-  // The first page to be swept when the lazy sweeper advances. Is set
-  // to NULL when all pages have been swept.
-  Page* first_unswept_page_;
+  // Returns the top page specified by an allocation info structure.
+  static Page* TopPageOf(AllocationInfo alloc_info) {
+    return Page::FromAllocationTop(alloc_info.limit);
+  }
 
-  // The number of free bytes which could be reclaimed by advancing the
-  // lazy sweeper.  This is only an estimation because lazy sweeping is
-  // done conservatively.
-  intptr_t unswept_free_bytes_;
+  int CountPagesToTop() {
+    Page* p = Page::FromAllocationTop(allocation_info_.top);
+    PageIterator it(this, PageIterator::ALL_PAGES);
+    int counter = 1;
+    while (it.has_next()) {
+      if (it.next() == p) return counter;
+      counter++;
+    }
+    UNREACHABLE();
+    return -1;
+  }
 
   // Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS, or if the hard heap
-  // size limit has been hit.
-  bool Expand();
+  // it cannot allocate requested number of pages from OS. Newly allocated
+  // pages are append to the last_page;
+  bool Expand(Page* last_page);
 
-  // Generic fast case allocation function that tries linear allocation at the
-  // address denoted by top in allocation_info_.
-  inline HeapObject* AllocateLinearly(int size_in_bytes);
+  // Generic fast case allocation function that tries linear allocation in
+  // the top page of 'alloc_info'.  Returns NULL on failure.
+  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
+                                      int size_in_bytes);
+
+  // During normal allocation or deserialization, roll to the next page in
+  // the space (there is assumed to be one) and allocate there.  This
+  // function is space-dependent.
+  virtual HeapObject* AllocateInNextPage(Page* current_page,
+                                         int size_in_bytes) = 0;
 
   // Slow path of AllocateRaw.  This function is space-dependent.
-  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
+
+  // Slow path of MCAllocateRaw.
+  MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
+
+#ifdef DEBUG
+  // Returns the number of total pages in this space.
+  int CountTotalPages();
+#endif
+
+ private:
+  // Returns a pointer to the page of the relocation pointer.
+  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
 
   friend class PageIterator;
 };
@@ -1705,128 +1276,39 @@
 };
 
 
-enum SemiSpaceId {
-  kFromSpace = 0,
-  kToSpace = 1
-};
-
-
-class SemiSpace;
-
-
-class NewSpacePage : public MemoryChunk {
- public:
-  // GC related flags copied from from-space to to-space when
-  // flipping semispaces.
-  static const intptr_t kCopyOnFlipFlagsMask =
-    (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-    (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
-    (1 << MemoryChunk::SCAN_ON_SCAVENGE);
-
-  static const int kAreaSize = Page::kNonCodeObjectAreaSize;
-
-  inline NewSpacePage* next_page() const {
-    return static_cast<NewSpacePage*>(next_chunk());
-  }
-
-  inline void set_next_page(NewSpacePage* page) {
-    set_next_chunk(page);
-  }
-
-  inline NewSpacePage* prev_page() const {
-    return static_cast<NewSpacePage*>(prev_chunk());
-  }
-
-  inline void set_prev_page(NewSpacePage* page) {
-    set_prev_chunk(page);
-  }
-
-  SemiSpace* semi_space() {
-    return reinterpret_cast<SemiSpace*>(owner());
-  }
-
-  bool is_anchor() { return !this->InNewSpace(); }
-
-  static bool IsAtStart(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
-        == kObjectStartOffset;
-  }
-
-  static bool IsAtEnd(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
-  }
-
-  Address address() {
-    return reinterpret_cast<Address>(this);
-  }
-
-  // Finds the NewSpacePage containg the given address.
-  static inline NewSpacePage* FromAddress(Address address_in_page) {
-    Address page_start =
-        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
-                                  ~Page::kPageAlignmentMask);
-    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
-    return page;
-  }
-
-  // Find the page for a limit address. A limit address is either an address
-  // inside a page, or the address right after the last byte of a page.
-  static inline NewSpacePage* FromLimit(Address address_limit) {
-    return NewSpacePage::FromAddress(address_limit - 1);
-  }
-
- private:
-  // Create a NewSpacePage object that is only used as anchor
-  // for the doubly-linked list of real pages.
-  explicit NewSpacePage(SemiSpace* owner) {
-    InitializeAsAnchor(owner);
-  }
-
-  static NewSpacePage* Initialize(Heap* heap,
-                                  Address start,
-                                  SemiSpace* semi_space);
-
-  // Intialize a fake NewSpacePage used as sentinel at the ends
-  // of a doubly-linked list of real NewSpacePages.
-  // Only uses the prev/next links, and sets flags to not be in new-space.
-  void InitializeAsAnchor(SemiSpace* owner);
-
-  friend class SemiSpace;
-  friend class SemiSpaceIterator;
-};
-
-
 // -----------------------------------------------------------------------------
 // SemiSpace in young generation
 //
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector  uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
+// A semispace is a contiguous chunk of memory. The mark-compact collector
+// uses the memory in the from space as a marking stack when tracing live
+// objects.
 
 class SemiSpace : public Space {
  public:
   // Constructor.
-  SemiSpace(Heap* heap, SemiSpaceId semispace)
-    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      start_(NULL),
-      age_mark_(NULL),
-      id_(semispace),
-      anchor_(this),
-      current_page_(NULL) { }
+  explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
+    start_ = NULL;
+    age_mark_ = NULL;
+  }
 
   // Sets up the semispace using the given chunk.
-  void SetUp(Address start, int initial_capacity, int maximum_capacity);
+  bool Setup(Address start, int initial_capacity, int maximum_capacity);
 
   // Tear down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
   void TearDown();
 
   // True if the space has been set up but not torn down.
-  bool HasBeenSetUp() { return start_ != NULL; }
+  bool HasBeenSetup() { return start_ != NULL; }
+
+  // Grow the size of the semispace by committing extra virtual memory.
+  // Assumes that the caller has checked that the semispace has not reached
+  // its maximum capacity (and thus there is space available in the reserved
+  // address range to grow).
+  bool Grow();
 
   // Grow the semispace to the new capacity.  The new capacity
-  // requested must be larger than the current capacity and less than
-  // the maximum capacity.
+  // requested must be larger than the current capacity.
   bool GrowTo(int new_capacity);
 
   // Shrinks the semispace to the new capacity.  The new capacity
@@ -1834,40 +1316,14 @@
   // semispace and less than the current capacity.
   bool ShrinkTo(int new_capacity);
 
-  // Returns the start address of the first page of the space.
-  Address space_start() {
-    ASSERT(anchor_.next_page() != &anchor_);
-    return anchor_.next_page()->area_start();
-  }
-
-  // Returns the start address of the current page of the space.
-  Address page_low() {
-    return current_page_->area_start();
-  }
-
+  // Returns the start address of the space.
+  Address low() { return start_; }
   // Returns one past the end address of the space.
-  Address space_end() {
-    return anchor_.prev_page()->area_end();
-  }
-
-  // Returns one past the end address of the current page of the space.
-  Address page_high() {
-    return current_page_->area_end();
-  }
-
-  bool AdvancePage() {
-    NewSpacePage* next_page = current_page_->next_page();
-    if (next_page == anchor()) return false;
-    current_page_ = next_page;
-    return true;
-  }
-
-  // Resets the space to using the first page.
-  void Reset();
+  Address high() { return low() + capacity_; }
 
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
-  void set_age_mark(Address mark);
+  void set_age_mark(Address mark) { age_mark_ = mark; }
 
   // True if the address is in the address range of this semispace (not
   // necessarily below the allocation pointer).
@@ -1882,6 +1338,11 @@
     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   }
 
+  // The offset of an address from the beginning of the space.
+  int SpaceOffsetForAddress(Address addr) {
+    return static_cast<int>(addr - low());
+  }
+
   // If we don't have these here then SemiSpace will be abstract.  However
   // they should never be called.
   virtual intptr_t Size() {
@@ -1898,19 +1359,9 @@
   bool Commit();
   bool Uncommit();
 
-  NewSpacePage* first_page() { return anchor_.next_page(); }
-  NewSpacePage* current_page() { return current_page_; }
-
 #ifdef DEBUG
   virtual void Print();
   virtual void Verify();
-  // Validate a range of of addresses in a SemiSpace.
-  // The "from" address must be on a page prior to the "to" address,
-  // in the linked page order, or it must be earlier on the same page.
-  static void AssertValidRange(Address from, Address to);
-#else
-  // Do nothing.
-  inline static void AssertValidRange(Address from, Address to) {}
 #endif
 
   // Returns the current capacity of the semi space.
@@ -1922,17 +1373,7 @@
   // Returns the initial capacity of the semi space.
   int InitialCapacity() { return initial_capacity_; }
 
-  SemiSpaceId id() { return id_; }
-
-  static void Swap(SemiSpace* from, SemiSpace* to);
-
  private:
-  // Flips the semispace between being from-space and to-space.
-  // Copies the flags into the masked positions on all pages in the space.
-  void FlipPages(intptr_t flags, intptr_t flag_mask);
-
-  NewSpacePage* anchor() { return &anchor_; }
-
   // The current and maximum capacity of the space.
   int capacity_;
   int maximum_capacity_;
@@ -1949,13 +1390,7 @@
   uintptr_t object_expected_;
 
   bool committed_;
-  SemiSpaceId id_;
 
-  NewSpacePage anchor_;
-  NewSpacePage* current_page_;
-
-  friend class SemiSpaceIterator;
-  friend class NewSpacePageIterator;
  public:
   TRACK_MEMORY("SemiSpace")
 };
@@ -1971,26 +1406,12 @@
   // Create an iterator over the objects in the given space.  If no start
   // address is given, the iterator starts from the bottom of the space.  If
   // no size function is given, the iterator calls Object::Size().
-
-  // Iterate over all of allocated to-space.
   explicit SemiSpaceIterator(NewSpace* space);
-  // Iterate over all of allocated to-space, with a custome size function.
   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
-  // Iterate over part of allocated to-space, from start to the end
-  // of allocation.
   SemiSpaceIterator(NewSpace* space, Address start);
-  // Iterate from one address to another in the same semi-space.
-  SemiSpaceIterator(Address from, Address to);
 
-  HeapObject* Next() {
+  HeapObject* next() {
     if (current_ == limit_) return NULL;
-    if (NewSpacePage::IsAtEnd(current_)) {
-      NewSpacePage* page = NewSpacePage::FromLimit(current_);
-      page = page->next_page();
-      ASSERT(!page->is_anchor());
-      current_ = page->area_start();
-      if (current_ == limit_) return NULL;
-    }
 
     HeapObject* object = HeapObject::FromAddress(current_);
     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -2000,13 +1421,14 @@
   }
 
   // Implementation of the ObjectIterator functions.
-  virtual HeapObject* next_object() { return Next(); }
+  virtual HeapObject* next_object() { return next(); }
 
  private:
-  void Initialize(Address start,
-                  Address end,
+  void Initialize(NewSpace* space, Address start, Address end,
                   HeapObjectCallback size_func);
 
+  // The semispace.
+  SemiSpace* space_;
   // The current iteration point.
   Address current_;
   // The end of iteration.
@@ -2017,34 +1439,6 @@
 
 
 // -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
-  // Make an iterator that runs over all pages in to-space.
-  explicit inline NewSpacePageIterator(NewSpace* space);
-
-  // Make an iterator that runs over all pages in the given semispace,
-  // even those not used in allocation.
-  explicit inline NewSpacePageIterator(SemiSpace* space);
-
-  // Make iterator that iterates from the page containing start
-  // to the page that contains limit in the same semispace.
-  inline NewSpacePageIterator(Address start, Address limit);
-
-  inline bool has_next();
-  inline NewSpacePage* next();
-
- private:
-  NewSpacePage* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  NewSpacePage* next_page_;
-  // Last page returned.
-  NewSpacePage* last_page_;
-};
-
-
-// -----------------------------------------------------------------------------
 // The young generation space.
 //
 // The new space consists of a contiguous pair of semispaces.  It simply
@@ -2055,21 +1449,19 @@
   // Constructor.
   explicit NewSpace(Heap* heap)
     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      to_space_(heap, kToSpace),
-      from_space_(heap, kFromSpace),
-      reservation_(),
-      inline_allocation_limit_step_(0) {}
+      to_space_(heap),
+      from_space_(heap) {}
 
   // Sets up the new space using the given chunk.
-  bool SetUp(int reserved_semispace_size_, int max_semispace_size);
+  bool Setup(Address start, int size);
 
   // Tears down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
   void TearDown();
 
   // True if the space has been set up but not torn down.
-  bool HasBeenSetUp() {
-    return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
+  bool HasBeenSetup() {
+    return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
   }
 
   // Flip the pair of spaces.
@@ -2088,30 +1480,18 @@
     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
         == reinterpret_cast<uintptr_t>(start_);
   }
-
   bool Contains(Object* o) {
-    Address a = reinterpret_cast<Address>(o);
-    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
+    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   }
 
   // Return the allocated bytes in the active semispace.
-  virtual intptr_t Size() {
-    return pages_used_ * NewSpacePage::kAreaSize +
-        static_cast<int>(top() - to_space_.page_low());
-  }
-
+  virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
   // The same, but returning an int.  We have to have the one that returns
   // intptr_t because it is inherited, but if we know we are dealing with the
   // new space, which can't get as big as the other spaces then this is useful:
   int SizeAsInt() { return static_cast<int>(Size()); }
 
   // Return the current capacity of a semispace.
-  intptr_t EffectiveCapacity() {
-    SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
-    return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
-  }
-
-  // Return the current capacity of a semispace.
   intptr_t Capacity() {
     ASSERT(to_space_.Capacity() == from_space_.Capacity());
     return to_space_.Capacity();
@@ -2123,10 +1503,8 @@
     return Capacity();
   }
 
-  // Return the available bytes without growing.
-  intptr_t Available() {
-    return Capacity() - Size();
-  }
+  // Return the available bytes without growing in the active semispace.
+  intptr_t Available() { return Capacity() - Size(); }
 
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
@@ -2141,12 +1519,9 @@
   }
 
   // Return the address of the allocation pointer in the active semispace.
-  Address top() {
-    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
-    return allocation_info_.top;
-  }
+  Address top() { return allocation_info_.top; }
   // Return the address of the first object in the active semispace.
-  Address bottom() { return to_space_.space_start(); }
+  Address bottom() { return to_space_.low(); }
 
   // Get the age mark of the inactive semispace.
   Address age_mark() { return from_space_.age_mark(); }
@@ -2158,68 +1533,54 @@
   Address start() { return start_; }
   uintptr_t mask() { return address_mask_; }
 
-  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
-    ASSERT(Contains(addr));
-    ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
-           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
-    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
-  }
-
-  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
-    return reinterpret_cast<Address>(index << kPointerSizeLog2);
-  }
-
   // The allocation top and limit addresses.
   Address* allocation_top_address() { return &allocation_info_.top; }
   Address* allocation_limit_address() { return &allocation_info_.limit; }
 
-  MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
+  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
+    return AllocateRawInternal(size_in_bytes, &allocation_info_);
+  }
+
+  // Allocate the requested number of bytes for relocation during mark-compact
+  // collection.
+  MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
+    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
+  }
 
   // Reset the allocation pointer to the beginning of the active semispace.
   void ResetAllocationInfo();
+  // Reset the reloction pointer to the bottom of the inactive semispace in
+  // preparation for mark-compact collection.
+  void MCResetRelocationInfo();
+  // Update the allocation pointer in the active semispace after a
+  // mark-compact collection.
+  void MCCommitRelocationInfo();
 
-  void LowerInlineAllocationLimit(intptr_t step) {
-    inline_allocation_limit_step_ = step;
-    if (step == 0) {
-      allocation_info_.limit = to_space_.page_high();
-    } else {
-      allocation_info_.limit = Min(
-          allocation_info_.top + inline_allocation_limit_step_,
-          allocation_info_.limit);
-    }
-    top_on_previous_step_ = allocation_info_.top;
+  // Get the extent of the inactive semispace (for use as a marking stack).
+  Address FromSpaceLow() { return from_space_.low(); }
+  Address FromSpaceHigh() { return from_space_.high(); }
+
+  // Get the extent of the active semispace (to sweep newly copied objects
+  // during a scavenge collection).
+  Address ToSpaceLow() { return to_space_.low(); }
+  Address ToSpaceHigh() { return to_space_.high(); }
+
+  // Offsets from the beginning of the semispaces.
+  int ToSpaceOffsetForAddress(Address a) {
+    return to_space_.SpaceOffsetForAddress(a);
   }
-
-  // Get the extent of the inactive semispace (for use as a marking stack,
-  // or to zap it). Notice: space-addresses are not necessarily on the
-  // same page, so FromSpaceStart() might be above FromSpaceEnd().
-  Address FromSpacePageLow() { return from_space_.page_low(); }
-  Address FromSpacePageHigh() { return from_space_.page_high(); }
-  Address FromSpaceStart() { return from_space_.space_start(); }
-  Address FromSpaceEnd() { return from_space_.space_end(); }
-
-  // Get the extent of the active semispace's pages' memory.
-  Address ToSpaceStart() { return to_space_.space_start(); }
-  Address ToSpaceEnd() { return to_space_.space_end(); }
-
-  inline bool ToSpaceContains(Address address) {
-    return to_space_.Contains(address);
-  }
-  inline bool FromSpaceContains(Address address) {
-    return from_space_.Contains(address);
+  int FromSpaceOffsetForAddress(Address a) {
+    return from_space_.SpaceOffsetForAddress(a);
   }
 
   // True if the object is a heap object in the address range of the
   // respective semispace (not necessarily below the allocation pointer of the
   // semispace).
-  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
-  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
 
-  // Try to switch the active semispace to a new, empty, page.
-  // Returns false if this isn't possible or reasonable (i.e., there
-  // are no pages, or the current page is already empty), or true
-  // if successful.
-  bool AddFreshPage();
+  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
+  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
 
   virtual bool ReserveSpace(int bytes);
 
@@ -2259,24 +1620,10 @@
     return from_space_.Uncommit();
   }
 
-  inline intptr_t inline_allocation_limit_step() {
-    return inline_allocation_limit_step_;
-  }
-
-  SemiSpace* active_space() { return &to_space_; }
-
  private:
-  // Update allocation info to match the current to-space page.
-  void UpdateAllocationInfo();
-
-  Address chunk_base_;
-  uintptr_t chunk_size_;
-
   // The semispaces.
   SemiSpace to_space_;
   SemiSpace from_space_;
-  VirtualMemory reservation_;
-  int pages_used_;
 
   // Start address and bit mask for containment testing.
   Address start_;
@@ -2287,19 +1634,15 @@
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
   AllocationInfo allocation_info_;
-
-  // When incremental marking is active we will set allocation_info_.limit
-  // to be lower than actual limit and then will gradually increase it
-  // in steps to guarantee that we do incremental marking steps even
-  // when all allocation is performed from inlined generated code.
-  intptr_t inline_allocation_limit_step_;
-
-  Address top_on_previous_step_;
+  AllocationInfo mc_forwarding_info_;
 
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
 
-  MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
+  // Implementation of AllocateRaw and MCAllocateRaw.
+  MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
+      int size_in_bytes,
+      AllocationInfo* alloc_info);
 
   friend class SemiSpaceIterator;
 
@@ -2309,6 +1652,193 @@
 
 
 // -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  static inline bool IsFreeListNode(HeapObject* object);
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(Heap* heap, int size_in_bytes);
+
+  // Accessors for the next field.
+  inline Address next(Heap* heap);
+  inline void set_next(Heap* heap, Address next);
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.
+class OldSpaceFreeList BASE_EMBEDDED {
+ public:
+  OldSpaceFreeList(Heap* heap, AllocationSpace owner);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  intptr_t available() { return available_; }
+
+  // Place a node on the free list.  The block of size 'size_in_bytes'
+  // starting at 'start' is placed on the free list.  The return value is the
+  // number of bytes that have been lost due to internal fragmentation by
+  // freeing the block.  Bookkeeping information will be written to the block,
+  // ie, its contents will be destroyed.  The start address should be word
+  // aligned, and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // Allocate a block of size 'size_in_bytes' from the free list.  The block
+  // is unitialized.  A failure is returned if no block is available.  The
+  // number of bytes lost to fragmentation is returned in the output parameter
+  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
+
+  void MarkNodes();
+
+ private:
+  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
+  // will always result in waste.)
+  static const int kMinBlockSize = 2 * kPointerSize;
+  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+
+  Heap* heap_;
+
+  // The identity of the owning space, for building allocation Failure
+  // objects.
+  AllocationSpace owner_;
+
+  // Total available bytes in all blocks on this free list.
+  int available_;
+
+  // Blocks are put on exact free lists in an array, indexed by size in words.
+  // The available sizes are kept in an increasingly ordered list. Entries
+  // corresponding to sizes < kMinBlockSize always have an empty free list
+  // (but index kHead is used for the head of the size list).
+  struct SizeNode {
+    // Address of the head FreeListNode of the implied block size or NULL.
+    Address head_node_;
+    // Size (words) of the next larger available size if head_node_ != NULL.
+    int next_size_;
+  };
+  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
+  SizeNode free_[kFreeListsLength];
+
+  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
+  static const int kHead = kMinBlockSize / kPointerSize - 1;
+  static const int kEnd = kMaxInt;
+
+  // We keep a "finger" in the size list to speed up a common pattern:
+  // repeated requests for the same or increasing sizes.
+  int finger_;
+
+  // Starting from *prev, find and return the smallest size >= index (words),
+  // or kEnd. Update *prev to be the largest size < index, or kHead.
+  int FindSize(int index, int* prev) {
+    int cur = free_[*prev].next_size_;
+    while (cur < index) {
+      *prev = cur;
+      cur = free_[cur].next_size_;
+    }
+    return cur;
+  }
+
+  // Remove an existing element from the size list.
+  void RemoveSize(int index) {
+    int prev = kHead;
+    int cur = FindSize(index, &prev);
+    ASSERT(cur == index);
+    free_[prev].next_size_ = free_[cur].next_size_;
+    finger_ = prev;
+  }
+
+  // Insert a new element into the size list.
+  void InsertSize(int index) {
+    int prev = kHead;
+    int cur = FindSize(index, &prev);
+    ASSERT(cur != index);
+    free_[prev].next_size_ = index;
+    free_[index].next_size_ = cur;
+  }
+
+  // The size list is not updated during a sequence of calls to Free, but is
+  // rebuilt before the next allocation.
+  void RebuildSizeList();
+  bool needs_rebuild_;
+
+#ifdef DEBUG
+  // Does this free list contain a free block located at the address of 'node'?
+  bool Contains(FreeListNode* node);
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
+};
+
+
+// The free list for the map space.
+class FixedSizeFreeList BASE_EMBEDDED {
+ public:
+  FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  intptr_t available() { return available_; }
+
+  // Place a node on the free list.  The block starting at 'start' (assumed to
+  // have size object_size_) is placed on the free list.  Bookkeeping
+  // information will be written to the block, ie, its contents will be
+  // destroyed.  The start address should be word aligned.
+  void Free(Address start);
+
+  // Allocate a fixed sized block from the free list.  The block is unitialized.
+  // A failure is returned if no block is available.
+  MUST_USE_RESULT MaybeObject* Allocate();
+
+  void MarkNodes();
+
+ private:
+  Heap* heap_;
+
+  // Available bytes on the free list.
+  intptr_t available_;
+
+  // The head of the free list.
+  Address head_;
+
+  // The tail of the free list.
+  Address tail_;
+
+  // The identity of the owning space, for building allocation Failure
+  // objects.
+  AllocationSpace owner_;
+
+  // The size of the objects in this space.
+  int object_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
+};
+
+
+// -----------------------------------------------------------------------------
 // Old object space (excluding map objects)
 
 class OldSpace : public PagedSpace {
@@ -2319,28 +1849,71 @@
            intptr_t max_capacity,
            AllocationSpace id,
            Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable) {
+      : PagedSpace(heap, max_capacity, id, executable),
+        free_list_(heap, id) {
     page_extra_ = 0;
   }
 
+  // The bytes available on the free list (ie, not above the linear allocation
+  // pointer).
+  intptr_t AvailableFree() { return free_list_.available(); }
+
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->area_end();
+    return page->ObjectAreaEnd();
   }
 
+  // Give a block of memory to the space's free list.  It might be added to
+  // the free list or accounted as waste.
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  void Free(Address start, int size_in_bytes, bool add_to_freelist) {
+    accounting_stats_.DeallocateBytes(size_in_bytes);
+
+    if (add_to_freelist) {
+      int wasted_bytes = free_list_.Free(start, size_in_bytes);
+      accounting_stats_.WasteBytes(wasted_bytes);
+    }
+  }
+
+  virtual void DeallocateBlock(Address start,
+                               int size_in_bytes,
+                               bool add_to_freelist);
+
+  // Prepare for full garbage collection.  Resets the relocation pointer and
+  // clears the free list.
+  virtual void PrepareForMarkCompact(bool will_compact);
+
+  // Updates the allocation pointer to the relocation top after a mark-compact
+  // collection.
+  virtual void MCCommitRelocationInfo();
+
+  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
+  void MarkFreeListNodes() { free_list_.MarkNodes(); }
+
+#ifdef DEBUG
+  // Reports statistics for the space
+  void ReportStatistics();
+#endif
+
+ protected:
+  // Virtual function in the superclass.  Slow path of AllocateRaw.
+  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+  // Virtual function in the superclass.  Allocate linearly at the start of
+  // the page after current_page (there is assumed to be one).
+  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
+ private:
+  // The space's free list.
+  OldSpaceFreeList free_list_;
+
  public:
   TRACK_MEMORY("OldSpace")
 };
 
 
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  SLOW_ASSERT((space).page_low() <= (info).top             \
-              && (info).top <= (space).page_high()         \
-              && (info).limit <= (space).page_high())
-
-
 // -----------------------------------------------------------------------------
 // Old space for objects of a fixed size
 
@@ -2353,21 +1926,56 @@
              const char* name)
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
-        name_(name) {
-    page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
+        name_(name),
+        free_list_(heap, id, object_size_in_bytes) {
+    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
   }
 
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->area_end() - page_extra_;
+    return page->ObjectAreaEnd() - page_extra_;
   }
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
 
+  // Give a fixed sized block of memory to the space's free list.
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  void Free(Address start, bool add_to_freelist) {
+    if (add_to_freelist) {
+      free_list_.Free(start);
+    }
+    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
+  }
+
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact();
+  virtual void PrepareForMarkCompact(bool will_compact);
+
+  // Updates the allocation pointer to the relocation top after a mark-compact
+  // collection.
+  virtual void MCCommitRelocationInfo();
+
+  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
+  virtual void DeallocateBlock(Address start,
+                               int size_in_bytes,
+                               bool add_to_freelist);
+
+  void MarkFreeListNodes() { free_list_.MarkNodes(); }
+
+#ifdef DEBUG
+  // Reports statistic info of the space
+  void ReportStatistics();
+#endif
 
  protected:
+  // Virtual function in the superclass.  Slow path of AllocateRaw.
+  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+  // Virtual function in the superclass.  Allocate linearly at the start of
+  // the page after current_page (there is assumed to be one).
+  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+
   void ResetFreeList() {
     free_list_.Reset();
   }
@@ -2378,6 +1986,9 @@
 
   // The name of this space.
   const char* name_;
+
+  // The space's free list.
+  FixedSizeFreeList free_list_;
 };
 
 
@@ -2387,21 +1998,89 @@
 class MapSpace : public FixedSpace {
  public:
   // Creates a map space object with a maximum capacity.
-  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+  MapSpace(Heap* heap,
+           intptr_t max_capacity,
+           int max_map_space_pages,
+           AllocationSpace id)
       : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
-        max_map_space_pages_(kMaxMapPageIndex - 1) {
+        max_map_space_pages_(max_map_space_pages) {
+    ASSERT(max_map_space_pages < kMaxMapPageIndex);
   }
 
-  // Given an index, returns the page address.
-  // TODO(1600): this limit is artifical just to keep code compilable
-  static const int kMaxMapPageIndex = 1 << 16;
+  // Prepares for a mark-compact GC.
+  virtual void PrepareForMarkCompact(bool will_compact);
 
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (IsPowerOf2(Map::kSize)) {
-      return RoundDown(size, Map::kSize);
-    } else {
-      return (size / Map::kSize) * Map::kSize;
+  // Given an index, returns the page address.
+  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+
+  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
+
+  // Are map pointers encodable into map word?
+  bool MapPointersEncodable() {
+    if (!FLAG_use_big_map_space) {
+      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
+      return true;
     }
+    return CountPagesToTop() <= max_map_space_pages_;
+  }
+
+  // Should be called after forced sweep to find out if map space needs
+  // compaction.
+  bool NeedsCompaction(int live_maps) {
+    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
+  }
+
+  Address TopAfterCompaction(int live_maps) {
+    ASSERT(NeedsCompaction(live_maps));
+
+    int pages_left = live_maps / kMapsPerPage;
+    PageIterator it(this, PageIterator::ALL_PAGES);
+    while (pages_left-- > 0) {
+      ASSERT(it.has_next());
+      it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
+    }
+    ASSERT(it.has_next());
+    Page* top_page = it.next();
+    top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
+    ASSERT(top_page->is_valid());
+
+    int offset = live_maps % kMapsPerPage * Map::kSize;
+    Address top = top_page->ObjectAreaStart() + offset;
+    ASSERT(top < top_page->ObjectAreaEnd());
+    ASSERT(Contains(top));
+
+    return top;
+  }
+
+  void FinishCompaction(Address new_top, int live_maps) {
+    Page* top_page = Page::FromAddress(new_top);
+    ASSERT(top_page->is_valid());
+
+    SetAllocationInfo(&allocation_info_, top_page);
+    allocation_info_.top = new_top;
+
+    int new_size = live_maps * Map::kSize;
+    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
+    accounting_stats_.AllocateBytes(new_size);
+
+    // Flush allocation watermarks.
+    for (Page* p = first_page_; p != top_page; p = p->next_page()) {
+      p->SetAllocationWatermark(p->AllocationTop());
+    }
+    top_page->SetAllocationWatermark(new_top);
+
+#ifdef DEBUG
+    if (FLAG_enable_slow_asserts) {
+      intptr_t actual_size = 0;
+      for (Page* p = first_page_; p != top_page; p = p->next_page())
+        actual_size += kMapsPerPage * Map::kSize;
+      actual_size += (new_top - top_page->ObjectAreaStart());
+      ASSERT(accounting_stats_.Size() == actual_size);
+    }
+#endif
+
+    Shrink();
+    ResetFreeList();
   }
 
  protected:
@@ -2410,7 +2089,7 @@
 #endif
 
  private:
-  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
+  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
 
   // Do map space compaction if there is a page gap.
   int CompactionThreshold() {
@@ -2419,6 +2098,9 @@
 
   const int max_map_space_pages_;
 
+  // An array of page start address in a map space.
+  Address page_addresses_[kMaxMapPageIndex];
+
  public:
   TRACK_MEMORY("MapSpace")
 };
@@ -2434,14 +2116,6 @@
       : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
   {}
 
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
-      return RoundDown(size, JSGlobalPropertyCell::kSize);
-    } else {
-      return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
-    }
-  }
-
  protected:
 #ifdef DEBUG
   virtual void VerifyObject(HeapObject* obj);
@@ -2459,26 +2133,81 @@
 // A large object always starts at Page::kObjectStartOffset to a page.
 // Large objects do not move during garbage collections.
 
-class LargeObjectSpace : public Space {
+// A LargeObjectChunk holds exactly one large object page with exactly one
+// large object.
+class LargeObjectChunk {
  public:
-  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
-  virtual ~LargeObjectSpace() {}
+  // Allocates a new LargeObjectChunk that contains a large object page
+  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
+  // object) bytes after the object area start of that page.
+  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
 
-  // Initializes internal data structures.
-  bool SetUp();
+  // Free the memory associated with the chunk.
+  void Free(Executability executable);
 
-  // Releases internal resources, frees objects in this space.
-  void TearDown();
+  // Interpret a raw address as a large object chunk.
+  static LargeObjectChunk* FromAddress(Address address) {
+    return reinterpret_cast<LargeObjectChunk*>(address);
+  }
 
+  // Returns the address of this chunk.
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  Page* GetPage() {
+    return Page::FromAddress(RoundUp(address(), Page::kPageSize));
+  }
+
+  // Accessors for the fields of the chunk.
+  LargeObjectChunk* next() { return next_; }
+  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
+  size_t size() { return size_ & ~Page::kPageFlagMask; }
+
+  // Compute the start address in the chunk.
+  Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
+
+  // Returns the object in this chunk.
+  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
+
+  // Given a requested size returns the physical size of a chunk to be
+  // allocated.
+  static int ChunkSizeFor(int size_in_bytes);
+
+  // Given a chunk size, returns the object size it can accommodate.  Used by
+  // LargeObjectSpace::Available.
   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
   }
 
-  // Shared implementation of AllocateRaw, AllocateRawCode and
-  // AllocateRawFixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
-                                           Executability executable);
+ private:
+  // A pointer to the next large object chunk in the space or NULL.
+  LargeObjectChunk* next_;
+
+  // The total size of this chunk.
+  size_t size_;
+
+ public:
+  TRACK_MEMORY("LargeObjectChunk")
+};
+
+
+class LargeObjectSpace : public Space {
+ public:
+  LargeObjectSpace(Heap* heap, AllocationSpace id);
+  virtual ~LargeObjectSpace() {}
+
+  // Initializes internal data structures.
+  bool Setup();
+
+  // Releases internal resources, frees objects in this space.
+  void TearDown();
+
+  // Allocates a (non-FixedArray, non-Code) large object.
+  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
+  // Allocates a large Code object.
+  MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
+  // Allocates a large FixedArray.
+  MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
 
   // Available bytes for objects in this space.
   inline intptr_t Available();
@@ -2500,9 +2229,12 @@
   // space, may be slow.
   MaybeObject* FindObject(Address a);
 
-  // Finds a large object page containing the given address, returns NULL
+  // Finds a large object page containing the given pc, returns NULL
   // if such a page doesn't exist.
-  LargePage* FindPage(Address a);
+  LargeObjectChunk* FindChunkContainingPc(Address pc);
+
+  // Iterates objects covered by dirty regions.
+  void IterateDirtyRegions(ObjectSlotCallback func);
 
   // Frees unmarked objects.
   void FreeUnmarkedObjects();
@@ -2511,15 +2243,13 @@
   bool Contains(HeapObject* obj);
 
   // Checks whether the space is empty.
-  bool IsEmpty() { return first_page_ == NULL; }
+  bool IsEmpty() { return first_chunk_ == NULL; }
 
   // See the comments for ReserveSpace in the Space class.  This has to be
   // called after ReserveSpace has been called on the paged spaces, since they
   // may use some memory, leaving less for large objects.
   virtual bool ReserveSpace(int bytes);
 
-  LargePage* first_page() { return first_page_; }
-
 #ifdef DEBUG
   virtual void Verify();
   virtual void Print();
@@ -2531,14 +2261,17 @@
   bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
 
  private:
-  intptr_t max_capacity_;
   // The head of the linked list of large object chunks.
-  LargePage* first_page_;
+  LargeObjectChunk* first_chunk_;
   intptr_t size_;  // allocated bytes
   int page_count_;  // number of chunks
   intptr_t objects_size_;  // size of objects
-  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
-  HashMap chunk_map_;
+
+  // Shared implementation of AllocateRaw, AllocateRawCode and
+  // AllocateRawFixedArray.
+  MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
+                                                   int object_size,
+                                                   Executability executable);
 
   friend class LargeObjectIterator;
 
@@ -2552,78 +2285,17 @@
   explicit LargeObjectIterator(LargeObjectSpace* space);
   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
 
-  HeapObject* Next();
+  HeapObject* next();
 
   // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return Next(); }
+  virtual HeapObject* next_object() { return next(); }
 
  private:
-  LargePage* current_;
+  LargeObjectChunk* current_;
   HeapObjectCallback size_func_;
 };
 
 
-// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
- public:
-  inline explicit PointerChunkIterator(Heap* heap);
-
-  // Return NULL when the iterator is done.
-  MemoryChunk* next() {
-    switch (state_) {
-      case kOldPointerState: {
-        if (old_pointer_iterator_.has_next()) {
-          return old_pointer_iterator_.next();
-        }
-        state_ = kMapState;
-        // Fall through.
-      }
-      case kMapState: {
-        if (map_iterator_.has_next()) {
-          return map_iterator_.next();
-        }
-        state_ = kLargeObjectState;
-        // Fall through.
-      }
-      case kLargeObjectState: {
-        HeapObject* heap_object;
-        do {
-          heap_object = lo_iterator_.Next();
-          if (heap_object == NULL) {
-            state_ = kFinishedState;
-            return NULL;
-          }
-          // Fixed arrays are the only pointer-containing objects in large
-          // object space.
-        } while (!heap_object->IsFixedArray());
-        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
-        return answer;
-      }
-      case kFinishedState:
-        return NULL;
-      default:
-        break;
-    }
-    UNREACHABLE();
-    return NULL;
-  }
-
-
- private:
-  enum State {
-    kOldPointerState,
-    kMapState,
-    kLargeObjectState,
-    kFinishedState
-  };
-  State state_;
-  PageIterator old_pointer_iterator_;
-  PageIterator map_iterator_;
-  LargeObjectIterator lo_iterator_;
-};
-
-
 #ifdef DEBUG
 struct CommentStatistic {
   const char* comment;
diff --git a/src/splay-tree-inl.h b/src/splay-tree-inl.h
index 4640ed5..9c2287e 100644
--- a/src/splay-tree-inl.h
+++ b/src/splay-tree-inl.h
@@ -45,7 +45,7 @@
 bool SplayTree<Config, Allocator>::Insert(const Key& key, Locator* locator) {
   if (is_empty()) {
     // If the tree is empty, insert the new node.
-    root_ = new Node(key, Config::NoValue());
+    root_ = new Node(key, Config::kNoValue);
   } else {
     // Splay on the key to move the last node on the search path
     // for the key to the root of the tree.
@@ -57,7 +57,7 @@
       return false;
     }
     // Insert the new node.
-    Node* node = new Node(key, Config::NoValue());
+    Node* node = new Node(key, Config::kNoValue);
     InsertInternal(cmp, node);
   }
   locator->bind(root_);
@@ -226,7 +226,7 @@
 void SplayTree<Config, Allocator>::Splay(const Key& key) {
   if (is_empty())
     return;
-  Node dummy_node(Config::kNoKey, Config::NoValue());
+  Node dummy_node(Config::kNoKey, Config::kNoValue);
   // Create a dummy node.  The use of the dummy node is a bit
   // counter-intuitive: The right child of the dummy node will hold
   // the L tree of the algorithm.  The left child of the dummy node
diff --git a/src/store-buffer-inl.h b/src/store-buffer-inl.h
deleted file mode 100644
index dd65cbc..0000000
--- a/src/store-buffer-inl.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STORE_BUFFER_INL_H_
-#define V8_STORE_BUFFER_INL_H_
-
-#include "store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-Address StoreBuffer::TopAddress() {
-  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
-}
-
-
-void StoreBuffer::Mark(Address addr) {
-  ASSERT(!heap_->cell_space()->Contains(addr));
-  ASSERT(!heap_->code_space()->Contains(addr));
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-  *top++ = addr;
-  heap_->public_set_store_buffer_top(top);
-  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
-    ASSERT(top == limit_);
-    Compact();
-  } else {
-    ASSERT(top < limit_);
-  }
-}
-
-
-void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
-  if (store_buffer_rebuilding_enabled_) {
-    SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
-                !heap_->code_space()->Contains(addr) &&
-                !heap_->old_data_space()->Contains(addr) &&
-                !heap_->new_space()->Contains(addr));
-    Address* top = old_top_;
-    *top++ = addr;
-    old_top_ = top;
-    old_buffer_is_sorted_ = false;
-    old_buffer_is_filtered_ = false;
-    if (top >= old_limit_) {
-      ASSERT(callback_ != NULL);
-      (*callback_)(heap_,
-                   MemoryChunk::FromAnyPointerAddress(addr),
-                   kStoreBufferFullEvent);
-    }
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
deleted file mode 100644
index 3852155..0000000
--- a/src/store-buffer.cc
+++ /dev/null
@@ -1,719 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "store-buffer.h"
-#include "store-buffer-inl.h"
-#include "v8-counters.h"
-
-namespace v8 {
-namespace internal {
-
-StoreBuffer::StoreBuffer(Heap* heap)
-    : heap_(heap),
-      start_(NULL),
-      limit_(NULL),
-      old_start_(NULL),
-      old_limit_(NULL),
-      old_top_(NULL),
-      old_reserved_limit_(NULL),
-      old_buffer_is_sorted_(false),
-      old_buffer_is_filtered_(false),
-      during_gc_(false),
-      store_buffer_rebuilding_enabled_(false),
-      callback_(NULL),
-      may_move_store_buffer_entries_(true),
-      virtual_memory_(NULL),
-      hash_set_1_(NULL),
-      hash_set_2_(NULL),
-      hash_sets_are_empty_(true) {
-}
-
-
-void StoreBuffer::SetUp() {
-  virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
-  uintptr_t start_as_int =
-      reinterpret_cast<uintptr_t>(virtual_memory_->address());
-  start_ =
-      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
-  limit_ = start_ + (kStoreBufferSize / kPointerSize);
-
-  old_virtual_memory_ =
-      new VirtualMemory(kOldStoreBufferLength * kPointerSize);
-  old_top_ = old_start_ =
-      reinterpret_cast<Address*>(old_virtual_memory_->address());
-  // Don't know the alignment requirements of the OS, but it is certainly not
-  // less than 0xfff.
-  ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
-  int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
-  ASSERT(initial_length > 0);
-  ASSERT(initial_length <= kOldStoreBufferLength);
-  old_limit_ = old_start_ + initial_length;
-  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
-
-  CHECK(old_virtual_memory_->Commit(
-            reinterpret_cast<void*>(old_start_),
-            (old_limit_ - old_start_) * kPointerSize,
-            false));
-
-  ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
-  ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
-  Address* vm_limit = reinterpret_cast<Address*>(
-      reinterpret_cast<char*>(virtual_memory_->address()) +
-          virtual_memory_->size());
-  ASSERT(start_ <= vm_limit);
-  ASSERT(limit_ <= vm_limit);
-  USE(vm_limit);
-  ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
-  ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
-         0);
-
-  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
-                                kStoreBufferSize,
-                                false));  // Not executable.
-  heap_->public_set_store_buffer_top(start_);
-
-  hash_set_1_ = new uintptr_t[kHashSetLength];
-  hash_set_2_ = new uintptr_t[kHashSetLength];
-  hash_sets_are_empty_ = false;
-
-  ClearFilteringHashSets();
-}
-
-
-void StoreBuffer::TearDown() {
-  delete virtual_memory_;
-  delete old_virtual_memory_;
-  delete[] hash_set_1_;
-  delete[] hash_set_2_;
-  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
-  start_ = limit_ = NULL;
-  heap_->public_set_store_buffer_top(start_);
-}
-
-
-void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
-  isolate->heap()->store_buffer()->Compact();
-}
-
-
-#if V8_TARGET_ARCH_X64
-static int CompareAddresses(const void* void_a, const void* void_b) {
-  intptr_t a =
-      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
-  intptr_t b =
-      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
-  // Unfortunately if int is smaller than intptr_t there is no branch-free
-  // way to return a number with the same sign as the difference between the
-  // pointers.
-  if (a == b) return 0;
-  if (a < b) return -1;
-  ASSERT(a > b);
-  return 1;
-}
-#else
-static int CompareAddresses(const void* void_a, const void* void_b) {
-  intptr_t a =
-      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
-  intptr_t b =
-      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
-  ASSERT(sizeof(1) == sizeof(a));
-  // Shift down to avoid wraparound.
-  return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
-}
-#endif
-
-
-void StoreBuffer::Uniq() {
-  // Remove adjacent duplicates and cells that do not point at new space.
-  Address previous = NULL;
-  Address* write = old_start_;
-  ASSERT(may_move_store_buffer_entries_);
-  for (Address* read = old_start_; read < old_top_; read++) {
-    Address current = *read;
-    if (current != previous) {
-      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
-        *write++ = current;
-      }
-    }
-    previous = current;
-  }
-  old_top_ = write;
-}
-
-
-void StoreBuffer::EnsureSpace(intptr_t space_needed) {
-  while (old_limit_ - old_top_ < space_needed &&
-         old_limit_ < old_reserved_limit_) {
-    size_t grow = old_limit_ - old_start_;  // Double size.
-    CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
-                                      grow * kPointerSize,
-                                      false));
-    old_limit_ += grow;
-  }
-
-  if (old_limit_ - old_top_ >= space_needed) return;
-
-  if (old_buffer_is_filtered_) return;
-  ASSERT(may_move_store_buffer_entries_);
-  Compact();
-
-  old_buffer_is_filtered_ = true;
-  bool page_has_scan_on_scavenge_flag = false;
-
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  while ((chunk = it.next()) != NULL) {
-    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
-  }
-
-  if (page_has_scan_on_scavenge_flag) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
-  // If filtering out the entries from scan_on_scavenge pages got us down to
-  // less than half full, then we are satisfied with that.
-  if (old_limit_ - old_top_ > old_top_ - old_start_) return;
-
-  // Sample 1 entry in 97 and filter out the pages where we estimate that more
-  // than 1 in 8 pointers are to new space.
-  static const int kSampleFinenesses = 5;
-  static const struct Samples {
-    int prime_sample_step;
-    int threshold;
-  } samples[kSampleFinenesses] =  {
-    { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
-    { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
-    { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
-    { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
-    { 1, 0}
-  };
-  for (int i = kSampleFinenesses - 1; i >= 0; i--) {
-    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
-    // As a last resort we mark all pages as being exempt from the store buffer.
-    ASSERT(i != 0 || old_top_ == old_start_);
-    if (old_limit_ - old_top_ > old_top_ - old_start_) return;
-  }
-  UNREACHABLE();
-}
-
-
-// Sample the store buffer to see if some pages are taking up a lot of space
-// in the store buffer.
-void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  while ((chunk = it.next()) != NULL) {
-    chunk->set_store_buffer_counter(0);
-  }
-  bool created_new_scan_on_scavenge_pages = false;
-  MemoryChunk* previous_chunk = NULL;
-  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
-    Address addr = *p;
-    MemoryChunk* containing_chunk = NULL;
-    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
-      containing_chunk = previous_chunk;
-    } else {
-      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
-    }
-    int old_counter = containing_chunk->store_buffer_counter();
-    if (old_counter == threshold) {
-      containing_chunk->set_scan_on_scavenge(true);
-      created_new_scan_on_scavenge_pages = true;
-    }
-    containing_chunk->set_store_buffer_counter(old_counter + 1);
-    previous_chunk = containing_chunk;
-  }
-  if (created_new_scan_on_scavenge_pages) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-  old_buffer_is_filtered_ = true;
-}
-
-
-void StoreBuffer::Filter(int flag) {
-  Address* new_top = old_start_;
-  MemoryChunk* previous_chunk = NULL;
-  for (Address* p = old_start_; p < old_top_; p++) {
-    Address addr = *p;
-    MemoryChunk* containing_chunk = NULL;
-    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
-      containing_chunk = previous_chunk;
-    } else {
-      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
-      previous_chunk = containing_chunk;
-    }
-    if (!containing_chunk->IsFlagSet(flag)) {
-      *new_top++ = addr;
-    }
-  }
-  old_top_ = new_top;
-
-  // Filtering hash sets are inconsistent with the store buffer after this
-  // operation.
-  ClearFilteringHashSets();
-}
-
-
-void StoreBuffer::SortUniq() {
-  Compact();
-  if (old_buffer_is_sorted_) return;
-  qsort(reinterpret_cast<void*>(old_start_),
-        old_top_ - old_start_,
-        sizeof(*old_top_),
-        &CompareAddresses);
-  Uniq();
-
-  old_buffer_is_sorted_ = true;
-
-  // Filtering hash sets are inconsistent with the store buffer after this
-  // operation.
-  ClearFilteringHashSets();
-}
-
-
-bool StoreBuffer::PrepareForIteration() {
-  Compact();
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  bool page_has_scan_on_scavenge_flag = false;
-  while ((chunk = it.next()) != NULL) {
-    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
-  }
-
-  if (page_has_scan_on_scavenge_flag) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
-  // Filtering hash sets are inconsistent with the store buffer after
-  // iteration.
-  ClearFilteringHashSets();
-
-  return page_has_scan_on_scavenge_flag;
-}
-
-
-#ifdef DEBUG
-void StoreBuffer::Clean() {
-  ClearFilteringHashSets();
-  Uniq();  // Also removes things that no longer point to new space.
-  CheckForFullBuffer();
-}
-
-
-static Address* in_store_buffer_1_element_cache = NULL;
-
-
-bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
-  if (!FLAG_enable_slow_asserts) return true;
-  if (in_store_buffer_1_element_cache != NULL &&
-      *in_store_buffer_1_element_cache == cell_address) {
-    return true;
-  }
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-  for (Address* current = top - 1; current >= start_; current--) {
-    if (*current == cell_address) {
-      in_store_buffer_1_element_cache = current;
-      return true;
-    }
-  }
-  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
-    if (*current == cell_address) {
-      in_store_buffer_1_element_cache = current;
-      return true;
-    }
-  }
-  return false;
-}
-#endif
-
-
-void StoreBuffer::ClearFilteringHashSets() {
-  if (!hash_sets_are_empty_) {
-    memset(reinterpret_cast<void*>(hash_set_1_),
-           0,
-           sizeof(uintptr_t) * kHashSetLength);
-    memset(reinterpret_cast<void*>(hash_set_2_),
-           0,
-           sizeof(uintptr_t) * kHashSetLength);
-    hash_sets_are_empty_ = true;
-  }
-}
-
-
-void StoreBuffer::GCPrologue() {
-  ClearFilteringHashSets();
-  during_gc_ = true;
-}
-
-
-#ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
-  // Do nothing.
-}
-
-
-void StoreBuffer::VerifyPointers(PagedSpace* space,
-                                 RegionCallback region_callback) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    FindPointersToNewSpaceOnPage(
-        reinterpret_cast<PagedSpace*>(page->owner()),
-        page,
-        region_callback,
-        &DummyScavengePointer);
-  }
-}
-
-
-void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
-  LargeObjectIterator it(space);
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    if (object->IsFixedArray()) {
-      Address slot_address = object->address();
-      Address end = object->address() + object->Size();
-
-      while (slot_address < end) {
-        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
-        // When we are not in GC the Heap::InNewSpace() predicate
-        // checks that pointers which satisfy predicate point into
-        // the active semispace.
-        heap_->InNewSpace(*slot);
-        slot_address += kPointerSize;
-      }
-    }
-  }
-}
-#endif
-
-
-void StoreBuffer::Verify() {
-#ifdef DEBUG
-  VerifyPointers(heap_->old_pointer_space(),
-                 &StoreBuffer::FindPointersToNewSpaceInRegion);
-  VerifyPointers(heap_->map_space(),
-                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
-  VerifyPointers(heap_->lo_space());
-#endif
-}
-
-
-void StoreBuffer::GCEpilogue() {
-  during_gc_ = false;
-  if (FLAG_verify_heap) {
-    Verify();
-  }
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInRegion(
-    Address start, Address end, ObjectSlotCallback slot_callback) {
-  for (Address slot_address = start;
-       slot_address < end;
-       slot_address += kPointerSize) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (heap_->InNewSpace(*slot)) {
-      HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
-      ASSERT(object->IsHeapObject());
-      slot_callback(reinterpret_cast<HeapObject**>(slot), object);
-      if (heap_->InNewSpace(*slot)) {
-        EnterDirectlyIntoStoreBuffer(slot_address);
-      }
-    }
-  }
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->area_start();
-  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->area_start();
-  return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMaps(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback) {
-  ASSERT(MapStartAlign(start) == start);
-  ASSERT(MapEndAlign(end) == end);
-
-  Address map_address = start;
-  while (map_address < end) {
-    ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
-    ASSERT(Memory::Object_at(map_address)->IsMap());
-
-    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
-    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
-    FindPointersToNewSpaceInRegion(pointer_fields_start,
-                                   pointer_fields_end,
-                                   slot_callback);
-    map_address += Map::kSize;
-  }
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback) {
-  Address map_aligned_start = MapStartAlign(start);
-  Address map_aligned_end   = MapEndAlign(end);
-
-  ASSERT(map_aligned_start == start);
-  ASSERT(map_aligned_end == end);
-
-  FindPointersToNewSpaceInMaps(map_aligned_start,
-                               map_aligned_end,
-                               slot_callback);
-}
-
-
-// This function iterates over all the pointers in a paged space in the heap,
-// looking for pointers into new space.  Within the pages there may be dead
-// objects that have not been overwritten by free spaces or fillers because of
-// lazy sweeping.  These dead objects may not contain pointers to new space.
-// The garbage areas that have been swept properly (these will normally be the
-// large ones) will be marked with free space and filler map words.  In
-// addition any area that has never been used at all for object allocation must
-// be marked with a free space or filler.  Because the free space and filler
-// maps do not move we can always recognize these even after a compaction.
-// Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps.  The special garbage section (see comment in spaces.h) is
-// skipped since it can contain absolutely anything.  Any objects that are
-// allocated during iteration may or may not be visited by the iteration, but
-// they will not be partially visited.
-void StoreBuffer::FindPointersToNewSpaceOnPage(
-    PagedSpace* space,
-    Page* page,
-    RegionCallback region_callback,
-    ObjectSlotCallback slot_callback) {
-  Address visitable_start = page->area_start();
-  Address end_of_page = page->area_end();
-
-  Address visitable_end = visitable_start;
-
-  Object* free_space_map = heap_->free_space_map();
-  Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
-
-  while (visitable_end < end_of_page) {
-    Object* o = *reinterpret_cast<Object**>(visitable_end);
-    // Skip fillers but not things that look like fillers in the special
-    // garbage section which can contain anything.
-    if (o == free_space_map ||
-        o == two_pointer_filler_map ||
-        (visitable_end == space->top() && visitable_end != space->limit())) {
-      if (visitable_start != visitable_end) {
-        // After calling this the special garbage section may have moved.
-        (this->*region_callback)(visitable_start,
-                                 visitable_end,
-                                 slot_callback);
-        if (visitable_end >= space->top() && visitable_end < space->limit()) {
-          visitable_end = space->limit();
-          visitable_start = visitable_end;
-          continue;
-        }
-      }
-      if (visitable_end == space->top() && visitable_end != space->limit()) {
-        visitable_start = visitable_end = space->limit();
-      } else {
-        // At this point we are either at the start of a filler or we are at
-        // the point where the space->top() used to be before the
-        // visit_pointer_region call above.  Either way we can skip the
-        // object at the current spot:  We don't promise to visit objects
-        // allocated during heap traversal, and if space->top() moved then it
-        // must be because an object was allocated at this point.
-        visitable_start =
-            visitable_end + HeapObject::FromAddress(visitable_end)->Size();
-        visitable_end = visitable_start;
-      }
-    } else {
-      ASSERT(o != free_space_map);
-      ASSERT(o != two_pointer_filler_map);
-      ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
-      visitable_end += kPointerSize;
-    }
-  }
-  ASSERT(visitable_end == end_of_page);
-  if (visitable_start != visitable_end) {
-    (this->*region_callback)(visitable_start,
-                             visitable_end,
-                             slot_callback);
-  }
-}
-
-
-void StoreBuffer::IteratePointersInStoreBuffer(
-    ObjectSlotCallback slot_callback) {
-  Address* limit = old_top_;
-  old_top_ = old_start_;
-  {
-    DontMoveStoreBufferEntriesScope scope(this);
-    for (Address* current = old_start_; current < limit; current++) {
-#ifdef DEBUG
-      Address* saved_top = old_top_;
-#endif
-      Object** slot = reinterpret_cast<Object**>(*current);
-      Object* object = *slot;
-      if (heap_->InFromSpace(object)) {
-        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-        if (heap_->InNewSpace(*slot)) {
-          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
-        }
-      }
-      ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
-    }
-  }
-}
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
-  // We do not sort or remove duplicated entries from the store buffer because
-  // we expect that callback will rebuild the store buffer thus removing
-  // all duplicates and pointers to old space.
-  bool some_pages_to_scan = PrepareForIteration();
-
-  // TODO(gc): we want to skip slots on evacuation candidates
-  // but we can't simply figure that out from slot address
-  // because slot can belong to a large object.
-  IteratePointersInStoreBuffer(slot_callback);
-
-  // We are done scanning all the pointers that were in the store buffer, but
-  // there may be some pages marked scan_on_scavenge that have pointers to new
-  // space that are not in the store buffer.  We must scan them now.  As we
-  // scan, the surviving pointers to new space will be added to the store
-  // buffer.  If there are still a lot of pointers to new space then we will
-  // keep the scan_on_scavenge flag on the page and discard the pointers that
-  // were added to the store buffer.  If there are not many pointers to new
-  // space left on the page we will keep the pointers in the store buffer and
-  // remove the flag from the page.
-  if (some_pages_to_scan) {
-    if (callback_ != NULL) {
-      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
-    }
-    PointerChunkIterator it(heap_);
-    MemoryChunk* chunk;
-    while ((chunk = it.next()) != NULL) {
-      if (chunk->scan_on_scavenge()) {
-        chunk->set_scan_on_scavenge(false);
-        if (callback_ != NULL) {
-          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
-        }
-        if (chunk->owner() == heap_->lo_space()) {
-          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
-          HeapObject* array = large_page->GetObject();
-          ASSERT(array->IsFixedArray());
-          Address start = array->address();
-          Address end = start + array->Size();
-          FindPointersToNewSpaceInRegion(start, end, slot_callback);
-        } else {
-          Page* page = reinterpret_cast<Page*>(chunk);
-          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
-          FindPointersToNewSpaceOnPage(
-              owner,
-              page,
-              (owner == heap_->map_space() ?
-                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
-                 &StoreBuffer::FindPointersToNewSpaceInRegion),
-              slot_callback);
-        }
-      }
-    }
-    if (callback_ != NULL) {
-      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
-    }
-  }
-}
-
-
-void StoreBuffer::Compact() {
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-
-  if (top == start_) return;
-
-  // There's no check of the limit in the loop below so we check here for
-  // the worst case (compaction doesn't eliminate any pointers).
-  ASSERT(top <= limit_);
-  heap_->public_set_store_buffer_top(start_);
-  EnsureSpace(top - start_);
-  ASSERT(may_move_store_buffer_entries_);
-  // Goes through the addresses in the store buffer attempting to remove
-  // duplicates.  In the interest of speed this is a lossy operation.  Some
-  // duplicates will remain.  We have two hash sets with different hash
-  // functions to reduce the number of unnecessary clashes.
-  hash_sets_are_empty_ = false;  // Hash sets are in use.
-  for (Address* current = start_; current < top; current++) {
-    ASSERT(!heap_->cell_space()->Contains(*current));
-    ASSERT(!heap_->code_space()->Contains(*current));
-    ASSERT(!heap_->old_data_space()->Contains(*current));
-    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
-    // Shift out the last bits including any tags.
-    int_addr >>= kPointerSizeLog2;
-    int hash1 =
-        ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
-    if (hash_set_1_[hash1] == int_addr) continue;
-    uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
-    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
-    hash2 &= (kHashSetLength - 1);
-    if (hash_set_2_[hash2] == int_addr) continue;
-    if (hash_set_1_[hash1] == 0) {
-      hash_set_1_[hash1] = int_addr;
-    } else if (hash_set_2_[hash2] == 0) {
-      hash_set_2_[hash2] = int_addr;
-    } else {
-      // Rather than slowing down we just throw away some entries.  This will
-      // cause some duplicates to remain undetected.
-      hash_set_1_[hash1] = int_addr;
-      hash_set_2_[hash2] = 0;
-    }
-    old_buffer_is_sorted_ = false;
-    old_buffer_is_filtered_ = false;
-    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
-    ASSERT(old_top_ <= old_limit_);
-  }
-  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
-  CheckForFullBuffer();
-}
-
-
-void StoreBuffer::CheckForFullBuffer() {
-  EnsureSpace(kStoreBufferSize * 2);
-}
-
-} }  // namespace v8::internal
diff --git a/src/store-buffer.h b/src/store-buffer.h
deleted file mode 100644
index 951a9ca..0000000
--- a/src/store-buffer.h
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_STORE_BUFFER_H_
-#define V8_STORE_BUFFER_H_
-
-#include "allocation.h"
-#include "checks.h"
-#include "globals.h"
-#include "platform.h"
-#include "v8globals.h"
-
-namespace v8 {
-namespace internal {
-
-class StoreBuffer;
-
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
-typedef void (StoreBuffer::*RegionCallback)(
-    Address start, Address end, ObjectSlotCallback slot_callback);
-
-// Used to implement the write barrier by collecting addresses of pointers
-// between spaces.
-class StoreBuffer {
- public:
-  explicit StoreBuffer(Heap* heap);
-
-  static void StoreBufferOverflow(Isolate* isolate);
-
-  inline Address TopAddress();
-
-  void SetUp();
-  void TearDown();
-
-  // This is used by the mutator to enter addresses into the store buffer.
-  inline void Mark(Address addr);
-
-  // This is used by the heap traversal to enter the addresses into the store
-  // buffer that should still be in the store buffer after GC.  It enters
-  // addresses directly into the old buffer because the GC starts by wiping the
-  // old buffer and thereafter only visits each cell once so there is no need
-  // to attempt to remove any dupes.  During the first part of a GC we
-  // are using the store buffer to access the old spaces and at the same time
-  // we are rebuilding the store buffer using this function.  There is, however
-  // no issue of overwriting the buffer we are iterating over, because this
-  // stage of the scavenge can only reduce the number of addresses in the store
-  // buffer (some objects are promoted so pointers to them do not need to be in
-  // the store buffer).  The later parts of the GC scan the pages that are
-  // exempt from the store buffer and process the promotion queue.  These steps
-  // can overflow this buffer.  We check for this and on overflow we call the
-  // callback set up with the StoreBufferRebuildScope object.
-  inline void EnterDirectlyIntoStoreBuffer(Address addr);
-
-  // Iterates over all pointers that go from old space to new space.  It will
-  // delete the store buffer as it starts so the callback should reenter
-  // surviving old-to-new pointers into the store buffer to rebuild it.
-  void IteratePointersToNewSpace(ObjectSlotCallback callback);
-
-  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
-  static const int kStoreBufferSize = kStoreBufferOverflowBit;
-  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
-  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
-  static const int kHashSetLengthLog2 = 12;
-  static const int kHashSetLength = 1 << kHashSetLengthLog2;
-
-  void Compact();
-
-  void GCPrologue();
-  void GCEpilogue();
-
-  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
-  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
-  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
-  void SetTop(Object*** top) {
-    ASSERT(top >= Start());
-    ASSERT(top <= Limit());
-    old_top_ = reinterpret_cast<Address*>(top);
-  }
-
-  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
-  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
-
-  // Goes through the store buffer removing pointers to things that have
-  // been promoted.  Rebuilds the store buffer completely if it overflowed.
-  void SortUniq();
-
-  void EnsureSpace(intptr_t space_needed);
-  void Verify();
-
-  bool PrepareForIteration();
-
-#ifdef DEBUG
-  void Clean();
-  // Slow, for asserts only.
-  bool CellIsInStoreBuffer(Address cell);
-#endif
-
-  void Filter(int flag);
-
- private:
-  Heap* heap_;
-
-  // The store buffer is divided up into a new buffer that is constantly being
-  // filled by mutator activity and an old buffer that is filled with the data
-  // from the new buffer after compression.
-  Address* start_;
-  Address* limit_;
-
-  Address* old_start_;
-  Address* old_limit_;
-  Address* old_top_;
-  Address* old_reserved_limit_;
-  VirtualMemory* old_virtual_memory_;
-
-  bool old_buffer_is_sorted_;
-  bool old_buffer_is_filtered_;
-  bool during_gc_;
-  // The garbage collector iterates over many pointers to new space that are not
-  // handled by the store buffer.  This flag indicates whether the pointers
-  // found by the callbacks should be added to the store buffer or not.
-  bool store_buffer_rebuilding_enabled_;
-  StoreBufferCallback callback_;
-  bool may_move_store_buffer_entries_;
-
-  VirtualMemory* virtual_memory_;
-
-  // Two hash sets used for filtering.
-  // If address is in the hash set then it is guaranteed to be in the
-  // old part of the store buffer.
-  uintptr_t* hash_set_1_;
-  uintptr_t* hash_set_2_;
-  bool hash_sets_are_empty_;
-
-  void ClearFilteringHashSets();
-
-  void CheckForFullBuffer();
-  void Uniq();
-  void ExemptPopularPages(int prime_sample_step, int threshold);
-
-  void FindPointersToNewSpaceInRegion(Address start,
-                                      Address end,
-                                      ObjectSlotCallback slot_callback);
-
-  // For each region of pointers on a page in use from an old space call
-  // visit_pointer_region callback.
-  // If either visit_pointer_region or callback can cause an allocation
-  // in old space and changes in allocation watermark then
-  // can_preallocate_during_iteration should be set to true.
-  void IteratePointersOnPage(
-      PagedSpace* space,
-      Page* page,
-      RegionCallback region_callback,
-      ObjectSlotCallback slot_callback);
-
-  void FindPointersToNewSpaceInMaps(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback);
-
-  void FindPointersToNewSpaceInMapsRegion(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback);
-
-  void FindPointersToNewSpaceOnPage(
-    PagedSpace* space,
-    Page* page,
-    RegionCallback region_callback,
-    ObjectSlotCallback slot_callback);
-
-  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
-
-#ifdef DEBUG
-  void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
-  void VerifyPointers(LargeObjectSpace* space);
-#endif
-
-  friend class StoreBufferRebuildScope;
-  friend class DontMoveStoreBufferEntriesScope;
-};
-
-
-class StoreBufferRebuildScope {
- public:
-  explicit StoreBufferRebuildScope(Heap* heap,
-                                   StoreBuffer* store_buffer,
-                                   StoreBufferCallback callback)
-      : heap_(heap),
-        store_buffer_(store_buffer),
-        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
-        stored_callback_(store_buffer->callback_) {
-    store_buffer_->store_buffer_rebuilding_enabled_ = true;
-    store_buffer_->callback_ = callback;
-    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
-  }
-
-  ~StoreBufferRebuildScope() {
-    store_buffer_->callback_ = stored_callback_;
-    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
-    store_buffer_->CheckForFullBuffer();
-  }
-
- private:
-  Heap* heap_;
-  StoreBuffer* store_buffer_;
-  bool stored_state_;
-  StoreBufferCallback stored_callback_;
-};
-
-
-class DontMoveStoreBufferEntriesScope {
- public:
-  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer),
-        stored_state_(store_buffer->may_move_store_buffer_entries_) {
-    store_buffer_->may_move_store_buffer_entries_ = false;
-  }
-
-  ~DontMoveStoreBufferEntriesScope() {
-    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
-  }
-
- private:
-  StoreBuffer* store_buffer_;
-  bool stored_state_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_STORE_BUFFER_H_
diff --git a/src/string-search.h b/src/string-search.h
index 8c3456a..1223db0 100644
--- a/src/string-search.h
+++ b/src/string-search.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -242,9 +242,9 @@
 
 
 template <typename PatternChar, typename SubjectChar>
-inline bool CharCompare(const PatternChar* pattern,
-                        const SubjectChar* subject,
-                        int length) {
+static inline bool CharCompare(const PatternChar* pattern,
+                               const SubjectChar* subject,
+                               int length) {
   ASSERT(length > 0);
   int pos = 0;
   do {
@@ -369,10 +369,6 @@
   shift_table[pattern_length] = 1;
   suffix_table[pattern_length] = pattern_length + 1;
 
-  if (pattern_length <= start) {
-    return;
-  }
-
   // Find suffixes.
   PatternChar last_char = pattern[pattern_length - 1];
   int suffix = pattern_length + 1;
@@ -559,10 +555,10 @@
 // object should be constructed once and the Search function then called
 // for each search.
 template <typename SubjectChar, typename PatternChar>
-int SearchString(Isolate* isolate,
-                 Vector<const SubjectChar> subject,
-                 Vector<const PatternChar> pattern,
-                 int start_index) {
+static int SearchString(Isolate* isolate,
+                        Vector<const SubjectChar> subject,
+                        Vector<const PatternChar> pattern,
+                        int start_index) {
   StringSearch<PatternChar, SubjectChar> search(isolate, pattern);
   return search.Search(subject, start_index);
 }
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 35f7be5..8086cf9 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -350,24 +350,29 @@
   }
   DescriptorArray* descs = map->instance_descriptors();
   for (int i = 0; i < descs->number_of_descriptors(); i++) {
-    if (descs->GetType(i) == FIELD) {
-      Object* key = descs->GetKey(i);
-      if (key->IsString() || key->IsNumber()) {
-        int len = 3;
-        if (key->IsString()) {
-          len = String::cast(key)->length();
+    switch (descs->GetType(i)) {
+      case FIELD: {
+        Object* key = descs->GetKey(i);
+        if (key->IsString() || key->IsNumber()) {
+          int len = 3;
+          if (key->IsString()) {
+            len = String::cast(key)->length();
+          }
+          for (; len < 18; len++)
+            Put(' ');
+          if (key->IsString()) {
+            Put(String::cast(key));
+          } else {
+            key->ShortPrint();
+          }
+          Add(": ");
+          Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
+          Add("%o\n", value);
         }
-        for (; len < 18; len++)
-          Put(' ');
-        if (key->IsString()) {
-          Put(String::cast(key));
-        } else {
-          key->ShortPrint();
-        }
-        Add(": ");
-        Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
-        Add("%o\n", value);
       }
+      break;
+      default:
+      break;
     }
   }
 }
diff --git a/src/string.js b/src/string.js
index 84dde3d..297105d 100644
--- a/src/string.js
+++ b/src/string.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,8 +28,8 @@
 
 // This file relies on the fact that the following declaration has been made
 // in runtime.js:
-// var $String = global.String;
-// var $NaN = 0/0;
+// const $String = global.String;
+// const $NaN = 0/0;
 
 
 // Set the String function and constructor.
@@ -46,18 +46,16 @@
 
 // ECMA-262 section 15.5.4.2
 function StringToString() {
-  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
     throw new $TypeError('String.prototype.toString is not generic');
-  }
   return %_ValueOf(this);
 }
 
 
 // ECMA-262 section 15.5.4.3
 function StringValueOf() {
-  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
+  if (!IS_STRING(this) && !IS_STRING_WRAPPER(this))
     throw new $TypeError('String.prototype.valueOf is not generic');
-  }
   return %_ValueOf(this);
 }
 
@@ -93,8 +91,7 @@
 // ECMA-262, section 15.5.4.6
 function StringConcat() {
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
-    throw MakeTypeError("called_on_null_or_undefined",
-                        ["String.prototype.concat"]);
+    throw MakeTypeError("called_on_null_or_undefined", ["String.prototype.concat"]);
   }
   var len = %_ArgumentsLength();
   var this_as_string = TO_STRING_INLINE(this);
@@ -244,15 +241,6 @@
 
   // Convert the search argument to a string and search for it.
   search = TO_STRING_INLINE(search);
-  if (search.length == 1 &&
-      subject.length > 0xFF &&
-      IS_STRING(replace) &&
-      %StringIndexOf(replace, '$', 0) < 0) {
-    // Searching by traversing a cons string tree and replace with cons of
-    // slices works only when the replaced string is a single character, being
-    // replaced by a simple string and only pays off for long strings.
-    return %StringReplaceOneCharWithString(subject, search, replace);
-  }
   var start = %StringIndexOf(subject, search, 0);
   if (start < 0) return subject;
   var end = start + search.length;
@@ -370,7 +358,7 @@
       builder_elements.push(SubString(string, position, next));
     }
   }
-}
+};
 
 
 // Compute the string of a given regular expression capture.
@@ -383,7 +371,7 @@
   if (start < 0) return;
   var end = lastCaptureInfo[CAPTURE(scaled + 1)];
   return SubString(string, start, end);
-}
+};
 
 
 // Add the string of a given regular expression capture to the
@@ -396,7 +384,7 @@
   if (start < 0) return;
   var end = matchInfo[CAPTURE(scaled + 1)];
   builder.addSpecialSlice(start, end);
-}
+};
 
 // TODO(lrn): This array will survive indefinitely if replace is never
 // called again. However, it will be empty, since the contents are cleared
@@ -543,37 +531,32 @@
   var s_len = s.length;
   var start_i = TO_INTEGER(start);
   var end_i = s_len;
-  if (end !== void 0) {
+  if (end !== void 0)
     end_i = TO_INTEGER(end);
-  }
 
   if (start_i < 0) {
     start_i += s_len;
-    if (start_i < 0) {
+    if (start_i < 0)
       start_i = 0;
-    }
   } else {
-    if (start_i > s_len) {
-      return '';
-    }
+    if (start_i > s_len)
+      start_i = s_len;
   }
 
   if (end_i < 0) {
     end_i += s_len;
-    if (end_i < 0) {
-      return '';
-    }
+    if (end_i < 0)
+      end_i = 0;
   } else {
-    if (end_i > s_len) {
+    if (end_i > s_len)
       end_i = s_len;
-    }
   }
 
-  if (end_i <= start_i) {
-    return '';
-  }
+  var num_c = end_i - start_i;
+  if (num_c < 0)
+    num_c = 0;
 
-  return SubString(s, start_i, end_i);
+  return SubString(s, start_i, start_i + num_c);
 }
 
 
@@ -585,19 +568,20 @@
   }
   var subject = TO_STRING_INLINE(this);
   limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
+  if (limit === 0) return [];
 
   // ECMA-262 says that if separator is undefined, the result should
-  // be an array of size 1 containing the entire string.
-  if (IS_UNDEFINED(separator)) {
+  // be an array of size 1 containing the entire string.  SpiderMonkey
+  // and KJS have this behavior only when no separator is given.  If
+  // undefined is explicitly given, they convert it to a string and
+  // use that.  We do as SpiderMonkey and KJS.
+  if (%_ArgumentsLength() === 0) {
     return [subject];
   }
 
   var length = subject.length;
   if (!IS_REGEXP(separator)) {
     separator = TO_STRING_INLINE(separator);
-
-    if (limit === 0) return [];
-
     var separator_length = separator.length;
 
     // If the separator string is empty then return the elements in the subject.
@@ -608,14 +592,6 @@
     return result;
   }
 
-  if (limit === 0) return [];
-
-  // Separator is a regular expression.
-  return StringSplitOnRegExp(subject, separator, limit, length);
-}
-
-
-function StringSplitOnRegExp(subject, separator, limit, length) {
   %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
 
   if (length === 0) {
@@ -712,7 +688,7 @@
     }
   }
 
-  return ((start_i + 1 == end_i)
+  return (start_i + 1 == end_i
           ? %_StringCharAt(s, start_i)
           : %_SubString(s, start_i, end_i));
 }
@@ -756,7 +732,7 @@
   var end = start + len;
   if (end > s.length) end = s.length;
 
-  return ((start + 1 == end)
+  return (start + 1 == end
           ? %_StringCharAt(s, start)
           : %_SubString(s, start, end));
 }
@@ -856,7 +832,7 @@
                               .replace(/>/g, "&gt;")
                               .replace(/"/g, "&quot;")
                               .replace(/'/g, "&#039;");
-}
+};
 
 
 // Compatibility support for KJS.
@@ -977,7 +953,7 @@
 
 
   // Set up the non-enumerable functions on the String prototype object.
-  InstallFunctions($String.prototype, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
     "valueOf", StringValueOf,
     "toString", StringToString,
     "charAt", StringCharAt,
diff --git a/src/strtod.cc b/src/strtod.cc
index 0dc618a..c89c8f3 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,6 +27,7 @@
 
 #include <stdarg.h>
 #include <math.h>
+#include <limits>
 
 #include "globals.h"
 #include "utils.h"
@@ -175,15 +176,13 @@
 static bool DoubleStrtod(Vector<const char> trimmed,
                          int exponent,
                          double* result) {
-#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) \
-    && !defined(_MSC_VER)
+#if (defined(V8_TARGET_ARCH_IA32) || defined(USE_SIMULATOR)) && !defined(WIN32)
   // On x86 the floating-point stack can be 64 or 80 bits wide. If it is
   // 80 bits wide (as is the case on Linux) then double-rounding occurs and the
   // result is not accurate.
-  // We know that Windows32 with MSVC, unlike with MinGW32, uses 64 bits and is
-  // therefore accurate.
-  // Note that the ARM and MIPS simulators are compiled for 32bits. They
-  // therefore exhibit the same problem.
+  // We know that Windows32 uses 64 bits and is therefore accurate.
+  // Note that the ARM simulator is compiled for 32bits. It therefore exhibits
+  // the same problem.
   return false;
 #endif
   if (trimmed.length() <= kMaxExactDoubleIntegerDecimalDigits) {
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 3371b1b..cdb4874 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -45,13 +45,18 @@
 
 StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
   ASSERT(isolate == Isolate::Current());
+  memset(primary_, 0, sizeof(primary_[0]) * StubCache::kPrimaryTableSize);
+  memset(secondary_, 0, sizeof(secondary_[0]) * StubCache::kSecondaryTableSize);
 }
 
 
-void StubCache::Initialize() {
+void StubCache::Initialize(bool create_heap_objects) {
   ASSERT(IsPowerOf2(kPrimaryTableSize));
   ASSERT(IsPowerOf2(kSecondaryTableSize));
-  Clear();
+  if (create_heap_objects) {
+    HandleScope scope;
+    Clear();
+  }
 }
 
 
@@ -77,15 +82,14 @@
   // Compute the primary entry.
   int primary_offset = PrimaryOffset(name, flags, map);
   Entry* primary = entry(primary_, primary_offset);
-  Code* old_code = primary->value;
+  Code* hit = primary->value;
 
   // If the primary entry has useful data in it, we retire it to the
   // secondary cache before overwriting it.
-  if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
-    Map* old_map = primary->map;
-    Code::Flags old_flags = Code::RemoveTypeFromFlags(old_code->flags());
-    int seed = PrimaryOffset(primary->key, old_flags, old_map);
-    int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
+  if (hit != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+    Code::Flags primary_flags = Code::RemoveTypeFromFlags(hit->flags());
+    int secondary_offset =
+        SecondaryOffset(primary->key, primary_flags, primary_offset);
     Entry* secondary = entry(secondary_, secondary_offset);
     *secondary = *primary;
   }
@@ -93,14 +97,12 @@
   // Update primary cache.
   primary->key = name;
   primary->value = code;
-  primary->map = map;
-  isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
-                                               Handle<JSObject> receiver) {
+MaybeObject* StubCache::ComputeLoadNonexistent(String* name,
+                                               JSObject* receiver) {
   ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
   // If no global objects are present in the prototype chain, the load
   // nonexistent IC stub can be shared for all names for a given map
@@ -108,446 +110,558 @@
   // there are global objects involved, we need to check global
   // property cells in the stub and therefore the stub will be
   // specific to the name.
-  Handle<String> cache_name = factory()->empty_string();
+  String* cache_name = heap()->empty_string();
   if (receiver->IsGlobalObject()) cache_name = name;
-  Handle<JSObject> last = receiver;
+  JSObject* last = receiver;
   while (last->GetPrototype() != heap()->null_value()) {
-    last = Handle<JSObject>(JSObject::cast(last->GetPrototype()));
+    last = JSObject::cast(last->GetPrototype());
     if (last->IsGlobalObject()) cache_name = name;
   }
   // Compile the stub that is either shared for all names or
   // name specific if there are global objects involved.
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, NONEXISTENT);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  LoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-      compiler.CompileLoadNonexistent(cache_name, receiver, last);
-  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code));
-  JSObject::UpdateMapCodeCache(receiver, cache_name, code);
+  Object* code = receiver->map()->FindInCodeCache(cache_name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadNonexistent(cache_name, receiver, last);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), cache_name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, cache_name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(cache_name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
-                                         Handle<JSObject> receiver,
-                                         Handle<JSObject> holder,
+MaybeObject* StubCache::ComputeLoadField(String* name,
+                                         JSObject* receiver,
+                                         JSObject* holder,
                                          int field_index) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  LoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-      compiler.CompileLoadField(receiver, holder, field_index, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadField(receiver, holder, field_index, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
-                                            Handle<JSObject> receiver,
-                                            Handle<JSObject> holder,
-                                            Handle<AccessorInfo> callback) {
+MaybeObject* StubCache::ComputeLoadCallback(String* name,
+                                            JSObject* receiver,
+                                            JSObject* holder,
+                                            AccessorInfo* callback) {
   ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  LoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-      compiler.CompileLoadCallback(name, receiver, holder, callback);
-  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadCallback(name, receiver, holder, callback);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
-                                            Handle<JSObject> receiver,
-                                            Handle<JSObject> holder,
-                                            Handle<JSFunction> value) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+MaybeObject* StubCache::ComputeLoadConstant(String* name,
+                                            JSObject* receiver,
+                                            JSObject* holder,
+                                            Object* value) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  LoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-        compiler.CompileLoadConstant(receiver, holder, value, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadConstant(receiver, holder, value, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
-                                               Handle<JSObject> receiver,
-                                               Handle<JSObject> holder) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+MaybeObject* StubCache::ComputeLoadInterceptor(String* name,
+                                               JSObject* receiver,
+                                               JSObject* holder) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  LoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-        compiler.CompileLoadInterceptor(receiver, holder, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadInterceptor(receiver, holder, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeLoadNormal() {
-  return isolate_->builtins()->LoadIC_Normal();
+MaybeObject* StubCache::ComputeLoadNormal() {
+  return isolate_->builtins()->builtin(Builtins::kLoadIC_Normal);
 }
 
 
-Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
-                                          Handle<JSObject> receiver,
-                                          Handle<GlobalObject> holder,
-                                          Handle<JSGlobalPropertyCell> cell,
+MaybeObject* StubCache::ComputeLoadGlobal(String* name,
+                                          JSObject* receiver,
+                                          GlobalObject* holder,
+                                          JSGlobalPropertyCell* cell,
                                           bool is_dont_delete) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  LoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-      compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
-  PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    LoadStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileLoadGlobal(receiver,
+                                                           holder,
+                                                           cell,
+                                                           name,
+                                                           is_dont_delete);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
-                                              Handle<JSObject> receiver,
-                                              Handle<JSObject> holder,
+MaybeObject* StubCache::ComputeKeyedLoadField(String* name,
+                                              JSObject* receiver,
+                                              JSObject* holder,
                                               int field_index) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-      compiler.CompileLoadField(name, receiver, holder, field_index);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadField(name, receiver, holder, field_index);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
-                                                 Handle<JSObject> receiver,
-                                                 Handle<JSObject> holder,
-                                                 Handle<JSFunction> value) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+MaybeObject* StubCache::ComputeKeyedLoadConstant(String* name,
+                                                 JSObject* receiver,
+                                                 JSObject* holder,
+                                                 Object* value) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-      compiler.CompileLoadConstant(name, receiver, holder, value);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadConstant(name, receiver, holder, value);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
-                                                    Handle<JSObject> receiver,
-                                                    Handle<JSObject> holder) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+MaybeObject* StubCache::ComputeKeyedLoadInterceptor(String* name,
+                                                    JSObject* receiver,
+                                                    JSObject* holder) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadInterceptor(receiver, holder, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+MaybeObject* StubCache::ComputeKeyedLoadCallback(String* name,
+                                                 JSObject* receiver,
+                                                 JSObject* holder,
+                                                 AccessorInfo* callback) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate_);
-  Handle<Code> code =
-      compiler.CompileLoadCallback(name, receiver, holder, callback);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code =
+          compiler.CompileLoadCallback(name, receiver, holder, callback);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
-                                                    Handle<JSArray> receiver) {
+
+MaybeObject* StubCache::ComputeKeyedLoadArrayLength(String* name,
+                                                    JSArray* receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileLoadArrayLength(name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  ASSERT(receiver->IsJSObject());
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileLoadArrayLength(name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
-                                                     Handle<String> receiver) {
+MaybeObject* StubCache::ComputeKeyedLoadStringLength(String* name,
+                                                     String* receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Handle<Map> map(receiver->map());
-  Handle<Object> probe(map->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileLoadStringLength(name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  Map::UpdateCodeCache(map, name, code);
+  Map* map = receiver->map();
+  Object* code = map->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileLoadStringLength(name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result = map->UpdateCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
-    Handle<String> name,
-    Handle<JSFunction> receiver) {
+MaybeObject* StubCache::ComputeKeyedLoadFunctionPrototype(
+    String* name,
+    JSFunction* receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileLoadFunctionPrototype(name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedLoadStubCompiler compiler;
+    { MaybeObject* maybe_code = compiler.CompileLoadFunctionPrototype(name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
-                                          Handle<JSObject> receiver,
+MaybeObject* StubCache::ComputeStoreField(String* name,
+                                          JSObject* receiver,
                                           int field_index,
-                                          Handle<Map> transition,
+                                          Map* transition,
                                           StrictModeFlag strict_mode) {
-  PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
+  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, type, strict_mode);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  StoreStubCompiler compiler(isolate_, strict_mode);
-  Handle<Code> code =
-      compiler.CompileStoreField(receiver, field_index, transition, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler(strict_mode);
+    { MaybeObject* maybe_code =
+          compiler.CompileStoreField(receiver, field_index, transition, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
-    Handle<JSObject> receiver,
-    KeyedIC::StubKind stub_kind,
+MaybeObject* StubCache::ComputeKeyedLoadOrStoreElement(
+    JSObject* receiver,
+    bool is_store,
     StrictModeFlag strict_mode) {
-  KeyedAccessGrowMode grow_mode =
-      KeyedIC::GetGrowModeFromStubKind(stub_kind);
-  Code::ExtraICState extra_state =
-      Code::ComputeExtraICState(grow_mode, strict_mode);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(
-          stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
-                                     : Code::KEYED_STORE_IC,
+          is_store ? Code::KEYED_STORE_IC :
+                     Code::KEYED_LOAD_IC,
           NORMAL,
-          extra_state);
-  Handle<String> name;
-  switch (stub_kind) {
-    case KeyedIC::LOAD:
-      name = isolate()->factory()->KeyedLoadElementMonomorphic_symbol();
-      break;
-    case KeyedIC::STORE_NO_TRANSITION:
-      name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol();
-      break;
-    case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
-      name = isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_symbol();
-      break;
-    default:
-      UNREACHABLE();
-      break;
-  }
-  Handle<Map> receiver_map(receiver->map());
-  Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
+          strict_mode);
+  String* name = is_store
+      ? isolate()->heap()->KeyedStoreElementMonomorphic_symbol()
+      : isolate()->heap()->KeyedLoadElementMonomorphic_symbol();
+  Object* maybe_code = receiver->map()->FindInCodeCache(name, flags);
+  if (!maybe_code->IsUndefined()) return Code::cast(maybe_code);
 
-  Handle<Code> code;
-  switch (stub_kind) {
-    case KeyedIC::LOAD: {
-      KeyedLoadStubCompiler compiler(isolate_);
-      code = compiler.CompileLoadElement(receiver_map);
-      break;
-    }
-    case KeyedIC::STORE_AND_GROW_NO_TRANSITION: {
-      KeyedStoreStubCompiler compiler(isolate_, strict_mode,
-                                      ALLOW_JSARRAY_GROWTH);
-      code = compiler.CompileStoreElement(receiver_map);
-      break;
-    }
-    case KeyedIC::STORE_NO_TRANSITION: {
-      KeyedStoreStubCompiler compiler(isolate_, strict_mode,
-                                      DO_NOT_ALLOW_JSARRAY_GROWTH);
-      code = compiler.CompileStoreElement(receiver_map);
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-
-  ASSERT(!code.is_null());
-
-  if (stub_kind == KeyedIC::LOAD) {
-    PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, 0));
+  MaybeObject* maybe_new_code = NULL;
+  Map* receiver_map = receiver->map();
+  if (is_store) {
+    KeyedStoreStubCompiler compiler(strict_mode);
+    maybe_new_code = compiler.CompileStoreElement(receiver_map);
   } else {
-    PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
+    KeyedLoadStubCompiler compiler;
+    maybe_new_code = compiler.CompileLoadElement(receiver_map);
   }
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Code* code;
+  if (!maybe_new_code->To(&code)) return maybe_new_code;
+  if (is_store) {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+                            Code::cast(code), 0));
+  } else {
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+                            Code::cast(code), 0));
+  }
+  ASSERT(code->IsCode());
+  Object* result;
+  { MaybeObject* maybe_result =
+        receiver->UpdateMapCodeCache(name, Code::cast(code));
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
-  return (strict_mode == kStrictMode)
-      ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict()
-      : isolate_->builtins()->Builtins::StoreIC_Normal();
+MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
+  return isolate_->builtins()->builtin((strict_mode == kStrictMode)
+                            ? Builtins::kStoreIC_Normal_Strict
+                            : Builtins::kStoreIC_Normal);
 }
 
 
-Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
-                                           Handle<GlobalObject> receiver,
-                                           Handle<JSGlobalPropertyCell> cell,
+MaybeObject* StubCache::ComputeStoreGlobal(String* name,
+                                           GlobalObject* receiver,
+                                           JSGlobalPropertyCell* cell,
                                            StrictModeFlag strict_mode) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, NORMAL, strict_mode);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  StoreStubCompiler compiler(isolate_, strict_mode);
-  Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler(strict_mode);
+    { MaybeObject* maybe_code =
+          compiler.CompileStoreGlobal(receiver, cell, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
-                                             Handle<JSObject> receiver,
-                                             Handle<AccessorInfo> callback,
-                                             StrictModeFlag strict_mode) {
+MaybeObject* StubCache::ComputeStoreCallback(
+    String* name,
+    JSObject* receiver,
+    AccessorInfo* callback,
+    StrictModeFlag strict_mode) {
   ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, CALLBACKS, strict_mode);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  StoreStubCompiler compiler(isolate_, strict_mode);
-  Handle<Code> code = compiler.CompileStoreCallback(receiver, callback, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler(strict_mode);
+    { MaybeObject* maybe_code =
+          compiler.CompileStoreCallback(receiver, callback, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
-                                                Handle<JSObject> receiver,
-                                                StrictModeFlag strict_mode) {
+MaybeObject* StubCache::ComputeStoreInterceptor(
+    String* name,
+    JSObject* receiver,
+    StrictModeFlag strict_mode) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::STORE_IC, INTERCEPTOR, strict_mode);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  StoreStubCompiler compiler(isolate_, strict_mode);
-  Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    StoreStubCompiler compiler(strict_mode);
+    { MaybeObject* maybe_code =
+          compiler.CompileStoreInterceptor(receiver, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate_,
+            CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::STORE_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
-Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
-                                               Handle<JSObject> receiver,
+
+MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
+                                               JSObject* receiver,
                                                int field_index,
-                                               Handle<Map> transition,
+                                               Map* transition,
                                                StrictModeFlag strict_mode) {
-  PropertyType type = (transition.is_null()) ? FIELD : MAP_TRANSITION;
+  PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
   Code::Flags flags = Code::ComputeMonomorphicFlags(
       Code::KEYED_STORE_IC, type, strict_mode);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedStoreStubCompiler compiler(isolate(), strict_mode,
-                                  DO_NOT_ALLOW_JSARRAY_GROWTH);
-  Handle<Code> code =
-      compiler.CompileStoreField(receiver, field_index, transition, name);
-  PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    KeyedStoreStubCompiler compiler(strict_mode);
+    { MaybeObject* maybe_code =
+          compiler.CompileStoreField(receiver, field_index, transition, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    PROFILE(isolate(),
+            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+                            Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          receiver->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
-
 #define CALL_LOGGER_TAG(kind, type) \
     (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type)
 
-Handle<Code> StubCache::ComputeCallConstant(int argc,
+MaybeObject* StubCache::ComputeCallConstant(int argc,
                                             Code::Kind kind,
-                                            Code::ExtraICState extra_state,
-                                            Handle<String> name,
-                                            Handle<Object> object,
-                                            Handle<JSObject> holder,
-                                            Handle<JSFunction> function) {
+                                            Code::ExtraICState extra_ic_state,
+                                            String* name,
+                                            Object* object,
+                                            JSObject* holder,
+                                            JSFunction* function) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(*object, *holder);
-  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+      IC::GetCodeCacheForObject(object, holder);
+  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
 
   // Compute check type based on receiver/holder.
   CheckType check = RECEIVER_MAP_CHECK;
@@ -559,36 +673,51 @@
     check = BOOLEAN_CHECK;
   }
 
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(kind, CONSTANT_FUNCTION, extra_state,
-                                    cache_holder, argc);
-  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
-  Handle<Code> code =
-      compiler.CompileCallConstant(object, holder, function, name, check);
-  code->set_check_type(check);
-  ASSERT_EQ(flags, code->flags());
-  PROFILE(isolate_,
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(map_holder, name, code);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+                                                    CONSTANT_FUNCTION,
+                                                    extra_ic_state,
+                                                    cache_holder,
+                                                    argc);
+  Object* code = map_holder->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    // If the function hasn't been compiled yet, we cannot do it now
+    // because it may cause GC. To avoid this issue, we return an
+    // internal error which will make sure we do not update any
+    // caches.
+    if (!function->is_compiled()) return Failure::InternalError();
+    // Compile the stub - only create stubs for fully compiled functions.
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
+    { MaybeObject* maybe_code =
+          compiler.CompileCallConstant(object, holder, function, name, check);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    Code::cast(code)->set_check_type(check);
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    PROFILE(isolate_,
+            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+                            Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          map_holder->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeCallField(int argc,
+MaybeObject* StubCache::ComputeCallField(int argc,
                                          Code::Kind kind,
-                                         Code::ExtraICState extra_state,
-                                         Handle<String> name,
-                                         Handle<Object> object,
-                                         Handle<JSObject> holder,
+                                         Code::ExtraICState extra_ic_state,
+                                         String* name,
+                                         Object* object,
+                                         JSObject* holder,
                                          int index) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(*object, *holder);
-  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+      IC::GetCodeCacheForObject(object, holder);
+  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -597,35 +726,47 @@
     object = holder;
   }
 
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(kind, FIELD, extra_state,
-                                    cache_holder, argc);
-  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
-  Handle<Code> code =
-      compiler.CompileCallField(Handle<JSObject>::cast(object),
-                                holder, index, name);
-  ASSERT_EQ(flags, code->flags());
-  PROFILE(isolate_,
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(map_holder, name, code);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+                                                    FIELD,
+                                                    extra_ic_state,
+                                                    cache_holder,
+                                                    argc);
+  Object* code = map_holder->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
+    { MaybeObject* maybe_code =
+          compiler.CompileCallField(JSObject::cast(object),
+                                    holder,
+                                    index,
+                                    name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    PROFILE(isolate_,
+            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+                            Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          map_holder->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeCallInterceptor(int argc,
-                                               Code::Kind kind,
-                                               Code::ExtraICState extra_state,
-                                               Handle<String> name,
-                                               Handle<Object> object,
-                                               Handle<JSObject> holder) {
+MaybeObject* StubCache::ComputeCallInterceptor(
+    int argc,
+    Code::Kind kind,
+    Code::ExtraICState extra_ic_state,
+    String* name,
+    Object* object,
+    JSObject* holder) {
   // Compute the check type and the map.
   InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(*object, *holder);
-  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+      IC::GetCodeCacheForObject(object, holder);
+  JSObject* map_holder = IC::GetCodeCacheHolder(object, cache_holder);
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -634,60 +775,135 @@
     object = holder;
   }
 
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(kind, INTERCEPTOR, extra_state,
-                                    cache_holder, argc);
-  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
-  Handle<Code> code =
-      compiler.CompileCallInterceptor(Handle<JSObject>::cast(object),
-                                      holder, name);
-  ASSERT_EQ(flags, code->flags());
-  PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(map_holder, name, code);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+                                                    INTERCEPTOR,
+                                                    extra_ic_state,
+                                                    cache_holder,
+                                                    argc);
+  Object* code = map_holder->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
+    { MaybeObject* maybe_code =
+          compiler.CompileCallInterceptor(JSObject::cast(object), holder, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    PROFILE(isolate(),
+            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+                            Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          map_holder->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
   return code;
 }
 
 
-Handle<Code> StubCache::ComputeCallGlobal(int argc,
+MaybeObject* StubCache::ComputeCallNormal(int argc,
                                           Code::Kind kind,
-                                          Code::ExtraICState extra_state,
-                                          Handle<String> name,
-                                          Handle<JSObject> receiver,
-                                          Handle<GlobalObject> holder,
-                                          Handle<JSGlobalPropertyCell> cell,
-                                          Handle<JSFunction> function) {
-  InlineCacheHolderFlag cache_holder =
-      IC::GetCodeCacheForObject(*receiver, *holder);
-  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
-  Code::Flags flags =
-      Code::ComputeMonomorphicFlags(kind, NORMAL, extra_state,
-                                    cache_holder, argc);
-  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
-  Handle<Code> code =
-      compiler.CompileCallGlobal(receiver, holder, cell, function, name);
-  ASSERT_EQ(flags, code->flags());
-  PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(map_holder, name, code);
+                                          Code::ExtraICState extra_ic_state,
+                                          String* name,
+                                          JSObject* receiver) {
+  Object* code;
+  { MaybeObject* maybe_code = ComputeCallNormal(argc, kind, extra_ic_state);
+    if (!maybe_code->ToObject(&code)) return maybe_code;
+  }
   return code;
 }
 
 
-static void FillCache(Isolate* isolate, Handle<Code> code) {
-  Handle<UnseededNumberDictionary> dictionary =
-      UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(),
-                                    code->flags(),
-                                    code);
-  isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
+MaybeObject* StubCache::ComputeCallGlobal(int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state,
+                                          String* name,
+                                          JSObject* receiver,
+                                          GlobalObject* holder,
+                                          JSGlobalPropertyCell* cell,
+                                          JSFunction* function) {
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(receiver, holder);
+  JSObject* map_holder = IC::GetCodeCacheHolder(receiver, cache_holder);
+  Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
+                                                    NORMAL,
+                                                    extra_ic_state,
+                                                    cache_holder,
+                                                    argc);
+  Object* code = map_holder->map()->FindInCodeCache(name, flags);
+  if (code->IsUndefined()) {
+    // If the function hasn't been compiled yet, we cannot do it now
+    // because it may cause GC. To avoid this issue, we return an
+    // internal error which will make sure we do not update any
+    // caches.
+    if (!function->is_compiled()) return Failure::InternalError();
+    CallStubCompiler compiler(argc, kind, extra_ic_state, cache_holder);
+    { MaybeObject* maybe_code =
+          compiler.CompileCallGlobal(receiver, holder, cell, function, name);
+      if (!maybe_code->ToObject(&code)) return maybe_code;
+    }
+    ASSERT_EQ(flags, Code::cast(code)->flags());
+    PROFILE(isolate(),
+            CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
+                            Code::cast(code), name));
+    GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
+    Object* result;
+    { MaybeObject* maybe_result =
+          map_holder->UpdateMapCodeCache(name, Code::cast(code));
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+    }
+  }
+  return code;
+}
+
+
+static Object* GetProbeValue(Isolate* isolate, Code::Flags flags) {
+  // Use raw_unchecked... so we don't get assert failures during GC.
+  UnseededNumberDictionary* dictionary =
+      isolate->heap()->raw_unchecked_non_monomorphic_cache();
+  int entry = dictionary->FindEntry(isolate, flags);
+  if (entry != -1) return dictionary->ValueAt(entry);
+  return isolate->heap()->raw_unchecked_undefined_value();
+}
+
+
+MUST_USE_RESULT static MaybeObject* ProbeCache(Isolate* isolate,
+                                               Code::Flags flags) {
+  Heap* heap = isolate->heap();
+  Object* probe = GetProbeValue(isolate, flags);
+  if (probe != heap->undefined_value()) return probe;
+  // Seed the cache with an undefined value to make sure that any
+  // generated code object can always be inserted into the cache
+  // without causing  allocation failures.
+  Object* result;
+  { MaybeObject* maybe_result =
+        heap->non_monomorphic_cache()->AtNumberPut(flags,
+                                                   heap->undefined_value());
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  heap->public_set_non_monomorphic_cache(
+      UnseededNumberDictionary::cast(result));
+  return probe;
+}
+
+
+static MaybeObject* FillCache(Isolate* isolate, MaybeObject* maybe_code) {
+  Object* code;
+  if (maybe_code->ToObject(&code)) {
+    if (code->IsCode()) {
+      Heap* heap = isolate->heap();
+      int entry = heap->non_monomorphic_cache()->FindEntry(
+          Code::cast(code)->flags());
+      // The entry must be present see comment in ProbeCache.
+      ASSERT(entry != -1);
+      ASSERT(heap->non_monomorphic_cache()->ValueAt(entry) ==
+             heap->undefined_value());
+      heap->non_monomorphic_cache()->ValueAtPut(entry, code);
+      CHECK(GetProbeValue(isolate, Code::cast(code)->flags()) == code);
+    }
+  }
+  return maybe_code;
 }
 
 
@@ -697,200 +913,209 @@
   Code::ExtraICState extra_state =
       CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
       CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Code::Flags flags =
-      Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
-
-  // Use raw_unchecked... so we don't get assert failures during GC.
-  UnseededNumberDictionary* dictionary =
-      isolate()->heap()->raw_unchecked_non_monomorphic_cache();
-  int entry = dictionary->FindEntry(isolate(), flags);
-  ASSERT(entry != -1);
-  Object* code = dictionary->ValueAt(entry);
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         UNINITIALIZED,
+                                         extra_state,
+                                         NORMAL,
+                                         argc);
+  Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
+  ASSERT(result != heap()->undefined_value());
   // This might be called during the marking phase of the collector
   // hence the unchecked cast.
-  return reinterpret_cast<Code*>(code);
+  return reinterpret_cast<Code*>(result);
 }
 
 
-Handle<Code> StubCache::ComputeCallInitialize(int argc,
+MaybeObject* StubCache::ComputeCallInitialize(int argc,
                                               RelocInfo::Mode mode,
                                               Code::Kind kind) {
   Code::ExtraICState extra_state =
       CallICBase::StringStubState::encode(DEFAULT_STRING_STUB) |
       CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
-  Code::Flags flags =
-      Code::ComputeFlags(kind, UNINITIALIZED, extra_state, NORMAL, argc);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallInitialize(flags);
-  FillCache(isolate_, code);
-  return code;
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         UNINITIALIZED,
+                                         extra_state,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallInitialize(flags));
 }
 
 
-Handle<Code> StubCache::ComputeCallInitialize(int argc, RelocInfo::Mode mode) {
-  return ComputeCallInitialize(argc, mode, Code::CALL_IC);
+Handle<Code> StubCache::ComputeCallInitialize(int argc,
+                                              RelocInfo::Mode mode) {
+  CALL_HEAP_FUNCTION(isolate_,
+                     ComputeCallInitialize(argc, mode, Code::CALL_IC),
+                     Code);
 }
 
 
 Handle<Code> StubCache::ComputeKeyedCallInitialize(int argc) {
-  return ComputeCallInitialize(argc, RelocInfo::CODE_TARGET,
-                               Code::KEYED_CALL_IC);
+  CALL_HEAP_FUNCTION(
+      isolate_,
+      ComputeCallInitialize(argc, RelocInfo::CODE_TARGET, Code::KEYED_CALL_IC),
+      Code);
 }
 
 
-Handle<Code> StubCache::ComputeCallPreMonomorphic(
+MaybeObject* StubCache::ComputeCallPreMonomorphic(
     int argc,
     Code::Kind kind,
-    Code::ExtraICState extra_state) {
-  Code::Flags flags =
-      Code::ComputeFlags(kind, PREMONOMORPHIC, extra_state, NORMAL, argc);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallPreMonomorphic(flags);
-  FillCache(isolate_, code);
-  return code;
+    Code::ExtraICState extra_ic_state) {
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         PREMONOMORPHIC,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallPreMonomorphic(flags));
 }
 
 
-Handle<Code> StubCache::ComputeCallNormal(int argc,
+MaybeObject* StubCache::ComputeCallNormal(int argc,
                                           Code::Kind kind,
-                                          Code::ExtraICState extra_state) {
-  Code::Flags flags =
-      Code::ComputeFlags(kind, MONOMORPHIC, extra_state, NORMAL, argc);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallNormal(flags);
-  FillCache(isolate_, code);
-  return code;
+                                          Code::ExtraICState extra_ic_state) {
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         MONOMORPHIC,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallNormal(flags));
 }
 
 
-Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
   ASSERT(kind == Code::KEYED_CALL_IC);
-  Code::Flags flags =
-      Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
-                         NORMAL, argc);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallArguments(flags);
-  FillCache(isolate_, code);
-  return code;
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         MEGAMORPHIC,
+                                         Code::kNoExtraICState,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallArguments(flags));
 }
 
 
-Handle<Code> StubCache::ComputeCallMegamorphic(
+MaybeObject* StubCache::ComputeCallMegamorphic(
     int argc,
     Code::Kind kind,
-    Code::ExtraICState extra_state) {
-  Code::Flags flags =
-      Code::ComputeFlags(kind, MEGAMORPHIC, extra_state,
-                         NORMAL, argc);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallMegamorphic(flags);
-  FillCache(isolate_, code);
-  return code;
+    Code::ExtraICState extra_ic_state) {
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         MEGAMORPHIC,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallMegamorphic(flags));
 }
 
 
-Handle<Code> StubCache::ComputeCallMiss(int argc,
+MaybeObject* StubCache::ComputeCallMiss(int argc,
                                         Code::Kind kind,
-                                        Code::ExtraICState extra_state) {
+                                        Code::ExtraICState extra_ic_state) {
   // MONOMORPHIC_PROTOTYPE_FAILURE state is used to make sure that miss stubs
   // and monomorphic stubs are not mixed up together in the stub cache.
-  Code::Flags flags =
-      Code::ComputeFlags(kind, MONOMORPHIC_PROTOTYPE_FAILURE, extra_state,
-                         NORMAL, argc, OWN_MAP);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallMiss(flags);
-  FillCache(isolate_, code);
-  return code;
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         MONOMORPHIC_PROTOTYPE_FAILURE,
+                                         extra_ic_state,
+                                         NORMAL,
+                                         argc,
+                                         OWN_MAP);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallMiss(flags));
 }
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
-                                              Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugBreak(
+    int argc,
+    Code::Kind kind) {
   // Extra IC state is irrelevant for debug break ICs. They jump to
   // the actual call ic to carry out the work.
-  Code::Flags flags =
-      Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
-                         NORMAL, argc);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallDebugBreak(flags);
-  FillCache(isolate_, code);
-  return code;
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         DEBUG_BREAK,
+                                         Code::kNoExtraICState,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallDebugBreak(flags));
 }
 
 
-Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
-                                                      Code::Kind kind) {
+MaybeObject* StubCache::ComputeCallDebugPrepareStepIn(
+    int argc,
+    Code::Kind kind) {
   // Extra IC state is irrelevant for debug break ICs. They jump to
   // the actual call ic to carry out the work.
-  Code::Flags flags =
-      Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
-                         NORMAL, argc);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code = compiler.CompileCallDebugPrepareStepIn(flags);
-  FillCache(isolate_, code);
-  return code;
+  Code::Flags flags = Code::ComputeFlags(kind,
+                                         DEBUG_PREPARE_STEP_IN,
+                                         Code::kNoExtraICState,
+                                         NORMAL,
+                                         argc);
+  Object* probe;
+  { MaybeObject* maybe_probe = ProbeCache(isolate_, flags);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  if (!probe->IsUndefined()) return probe;
+  StubCompiler compiler;
+  return FillCache(isolate_, compiler.CompileCallDebugPrepareStepIn(flags));
 }
 #endif
 
 
 void StubCache::Clear() {
-  Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
   for (int i = 0; i < kPrimaryTableSize; i++) {
     primary_[i].key = heap()->empty_string();
-    primary_[i].value = empty;
+    primary_[i].value = isolate_->builtins()->builtin(
+        Builtins::kIllegal);
   }
   for (int j = 0; j < kSecondaryTableSize; j++) {
     secondary_[j].key = heap()->empty_string();
-    secondary_[j].value = empty;
+    secondary_[j].value = isolate_->builtins()->builtin(
+        Builtins::kIllegal);
   }
 }
 
 
 void StubCache::CollectMatchingMaps(SmallMapList* types,
                                     String* name,
-                                    Code::Flags flags,
-                                    Handle<Context> global_context) {
+                                    Code::Flags flags) {
   for (int i = 0; i < kPrimaryTableSize; i++) {
     if (primary_[i].key == name) {
       Map* map = primary_[i].value->FindFirstMap();
@@ -899,8 +1124,7 @@
       if (map == NULL) continue;
 
       int offset = PrimaryOffset(name, flags, map);
-      if (entry(primary_, offset) == &primary_[i] &&
-          !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
+      if (entry(primary_, offset) == &primary_[i]) {
         types->Add(Handle<Map>(map));
       }
     }
@@ -923,8 +1147,7 @@
 
       // Lookup in secondary table and add matches.
       int offset = SecondaryOffset(name, flags, primary_offset);
-      if (entry(secondary_, offset) == &secondary_[i] &&
-          !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
+      if (entry(secondary_, offset) == &secondary_[i]) {
         types->Add(Handle<Map>(map));
       }
     }
@@ -1119,8 +1342,8 @@
   JSObject* recv = JSObject::cast(args[0]);
   String* name = String::cast(args[1]);
   Object* value = args[2];
-  ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode);
   StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3));
+  ASSERT(strict_mode == kStrictMode || strict_mode == kNonStrictMode);
   ASSERT(recv->HasNamedInterceptor());
   PropertyAttributes attr = NONE;
   MaybeObject* result = recv->SetPropertyWithInterceptor(
@@ -1137,47 +1360,62 @@
 }
 
 
-Handle<Code> StubCompiler::CompileCallInitialize(Code::Flags flags) {
+MaybeObject* StubCompiler::CompileCallInitialize(Code::Flags flags) {
+  HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc, extra_state);
+    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallInitialize");
+  Object* result;
+  { MaybeObject* maybe_result =
+        GetCodeWithFlags(flags, "CompileCallInitialize");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
   isolate()->counters()->call_initialize_stubs()->Increment();
+  Code* code = Code::cast(result);
+  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_INITIALIZE_TAG),
-                          *code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, *code));
-  return code;
+                          code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_INITIALIZE, Code::cast(code)));
+  return result;
 }
 
 
-Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
+MaybeObject* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
+  HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   // The code of the PreMonomorphic stub is the same as the code
   // of the Initialized stub.  They just differ on the code object flags.
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateInitialize(masm(), argc, extra_state);
+    CallIC::GenerateInitialize(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateInitialize(masm(), argc);
   }
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
+  Object* result;
+  { MaybeObject* maybe_result =
+        GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
   isolate()->counters()->call_premonomorphic_stubs()->Increment();
+  Code* code = Code::cast(result);
+  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_PRE_MONOMORPHIC_TAG),
-                          *code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, *code));
-  return code;
+                          code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_PRE_MONOMORPHIC, Code::cast(code)));
+  return result;
 }
 
 
-Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags) {
+MaybeObject* StubCompiler::CompileCallNormal(Code::Flags flags) {
+  HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
@@ -1188,82 +1426,116 @@
   } else {
     KeyedCallIC::GenerateNormal(masm(), argc);
   }
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallNormal");
+  Object* result;
+  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallNormal");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
   isolate()->counters()->call_normal_stubs()->Increment();
+  Code* code = Code::cast(result);
+  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_NORMAL_TAG),
-                          *code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, *code));
-  return code;
+                          code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_NORMAL, Code::cast(code)));
+  return result;
 }
 
 
-Handle<Code> StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
+MaybeObject* StubCompiler::CompileCallMegamorphic(Code::Flags flags) {
+  HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMegamorphic(masm(), argc, extra_state);
+    CallIC::GenerateMegamorphic(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateMegamorphic(masm(), argc);
   }
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMegamorphic");
+  Object* result;
+  { MaybeObject* maybe_result =
+        GetCodeWithFlags(flags, "CompileCallMegamorphic");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
   isolate()->counters()->call_megamorphic_stubs()->Increment();
+  Code* code = Code::cast(result);
+  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
-                          *code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
-  return code;
+                          code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
+  return result;
 }
 
 
-Handle<Code> StubCompiler::CompileCallArguments(Code::Flags flags) {
+MaybeObject* StubCompiler::CompileCallArguments(Code::Flags flags) {
+  HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   KeyedCallIC::GenerateNonStrictArguments(masm(), argc);
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallArguments");
+  Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  Object* result;
+  { MaybeObject* maybe_result =
+        GetCodeWithFlags(flags, "CompileCallArguments");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Code* code = Code::cast(result);
+  USE(code);
   PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
-                                          CALL_MEGAMORPHIC_TAG),
-                          *code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, *code));
-  return code;
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MEGAMORPHIC_TAG),
+                          code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MEGAMORPHIC, Code::cast(code)));
+  return result;
 }
 
 
-Handle<Code> StubCompiler::CompileCallMiss(Code::Flags flags) {
+MaybeObject* StubCompiler::CompileCallMiss(Code::Flags flags) {
+  HandleScope scope(isolate());
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
-  Code::ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+  Code::ExtraICState extra_ic_state = Code::ExtractExtraICStateFromFlags(flags);
   if (kind == Code::CALL_IC) {
-    CallIC::GenerateMiss(masm(), argc, extra_state);
+    CallIC::GenerateMiss(masm(), argc, extra_ic_state);
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallMiss");
+  Object* result;
+  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "CompileCallMiss");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
   isolate()->counters()->call_megamorphic_stubs()->Increment();
+  Code* code = Code::cast(result);
+  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_MISS_TAG),
-                          *code, code->arguments_count()));
-  GDBJIT(AddCode(GDBJITInterface::CALL_MISS, *code));
-  return code;
+                          code, code->arguments_count()));
+  GDBJIT(AddCode(GDBJITInterface::CALL_MISS, Code::cast(code)));
+  return result;
 }
 
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-Handle<Code> StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
+MaybeObject* StubCompiler::CompileCallDebugBreak(Code::Flags flags) {
+  HandleScope scope(isolate());
   Debug::GenerateCallICDebugBreak(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugBreak");
+  Object* result;
+  { MaybeObject* maybe_result =
+        GetCodeWithFlags(flags, "CompileCallDebugBreak");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Code* code = Code::cast(result);
+  USE(code);
+  Code::Kind kind = Code::ExtractKindFromFlags(flags);
+  USE(kind);
   PROFILE(isolate(),
-          CodeCreateEvent(CALL_LOGGER_TAG(Code::ExtractKindFromFlags(flags),
-                                          CALL_DEBUG_BREAK_TAG),
-                          *code, code->arguments_count()));
-  return code;
+          CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_DEBUG_BREAK_TAG),
+                          code, code->arguments_count()));
+  return result;
 }
 
 
-Handle<Code> StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
-  // Use the same code for the the step in preparations as we do for the
-  // miss case.
+MaybeObject* StubCompiler::CompileCallDebugPrepareStepIn(Code::Flags flags) {
+  HandleScope scope(isolate());
+  // Use the same code for the the step in preparations as we do for
+  // the miss case.
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
   Code::Kind kind = Code::ExtractKindFromFlags(flags);
   if (kind == Code::CALL_IC) {
@@ -1272,96 +1544,133 @@
   } else {
     KeyedCallIC::GenerateMiss(masm(), argc);
   }
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
+  Object* result;
+  { MaybeObject* maybe_result =
+        GetCodeWithFlags(flags, "CompileCallDebugPrepareStepIn");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Code* code = Code::cast(result);
+  USE(code);
   PROFILE(isolate(),
           CodeCreateEvent(
               CALL_LOGGER_TAG(kind, CALL_DEBUG_PREPARE_STEP_IN_TAG),
-              *code,
+              code,
               code->arguments_count()));
-  return code;
+  return result;
 }
-#endif  // ENABLE_DEBUGGER_SUPPORT
+#endif
 
 #undef CALL_LOGGER_TAG
 
-
-Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
+MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags,
                                             const char* name) {
+  // Check for allocation failures during stub compilation.
+  if (failure_->IsFailure()) return failure_;
+
   // Create code object in the heap.
   CodeDesc desc;
   masm_.GetCode(&desc);
-  Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
+  MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
 #ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code_stubs) code->Disassemble(name);
+  if (FLAG_print_code_stubs && !result->IsFailure()) {
+    Code::cast(result->ToObjectUnchecked())->Disassemble(name);
+  }
 #endif
-  return code;
+  return result;
 }
 
 
-Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
-                                            Handle<String> name) {
-  return (FLAG_print_code_stubs && !name.is_null())
-      ? GetCodeWithFlags(flags, *name->ToCString())
-      : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
+MaybeObject* StubCompiler::GetCodeWithFlags(Code::Flags flags, String* name) {
+  if (FLAG_print_code_stubs && (name != NULL)) {
+    return GetCodeWithFlags(flags, *name->ToCString());
+  }
+  return GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL));
 }
 
 
-void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
-                                         Handle<String> name,
+void StubCompiler::LookupPostInterceptor(JSObject* holder,
+                                         String* name,
                                          LookupResult* lookup) {
-  holder->LocalLookupRealNamedProperty(*name, lookup);
-  if (lookup->IsProperty()) return;
-
-  lookup->NotFound();
-  if (holder->GetPrototype()->IsNull()) return;
-
-  holder->GetPrototype()->Lookup(*name, lookup);
+  holder->LocalLookupRealNamedProperty(name, lookup);
+  if (!lookup->IsProperty()) {
+    lookup->NotFound();
+    Object* proto = holder->GetPrototype();
+    if (!proto->IsNull()) {
+      proto->Lookup(name, lookup);
+    }
+  }
 }
 
 
-Handle<Code> LoadStubCompiler::GetCode(PropertyType type, Handle<String> name) {
+
+MaybeObject* LoadStubCompiler::GetCode(PropertyType type, String* name) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
-  Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  return code;
+  MaybeObject* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(isolate(),
+            CodeCreateEvent(Logger::LOAD_IC_TAG,
+                            Code::cast(result->ToObjectUnchecked()),
+                            name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
+                   name,
+                   Code::cast(result->ToObjectUnchecked())));
+  }
+  return result;
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::GetCode(PropertyType type,
-                                            Handle<String> name,
+MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type,
+                                            String* name,
                                             InlineCacheState state) {
   Code::Flags flags = Code::ComputeFlags(
       Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
-  Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  return code;
+  MaybeObject* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(isolate(),
+            CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG,
+                            Code::cast(result->ToObjectUnchecked()),
+                            name));
+    GDBJIT(AddCode(GDBJITInterface::LOAD_IC,
+                   name,
+                   Code::cast(result->ToObjectUnchecked())));
+  }
+  return result;
 }
 
 
-Handle<Code> StoreStubCompiler::GetCode(PropertyType type,
-                                        Handle<String> name) {
+MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
-  Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
-  return code;
+  MaybeObject* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(isolate(),
+            CodeCreateEvent(Logger::STORE_IC_TAG,
+                            Code::cast(result->ToObjectUnchecked()),
+                            name));
+    GDBJIT(AddCode(GDBJITInterface::STORE_IC,
+                   name,
+                   Code::cast(result->ToObjectUnchecked())));
+  }
+  return result;
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::GetCode(PropertyType type,
-                                             Handle<String> name,
+MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type,
+                                             String* name,
                                              InlineCacheState state) {
-  Code::ExtraICState extra_state =
-      Code::ComputeExtraICState(grow_mode_, strict_mode_);
   Code::Flags flags =
-      Code::ComputeFlags(Code::KEYED_STORE_IC, state, extra_state, type);
-  Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
-  GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
-  return code;
+      Code::ComputeFlags(Code::KEYED_STORE_IC, state, strict_mode_, type);
+  MaybeObject* result = GetCodeWithFlags(flags, name);
+  if (!result->IsFailure()) {
+    PROFILE(isolate(),
+            CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
+                            Code::cast(result->ToObjectUnchecked()),
+                            name));
+    GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC,
+                   name,
+                   Code::cast(result->ToObjectUnchecked())));
+  }
+  return result;
 }
 
 
@@ -1371,49 +1680,50 @@
 }
 
 
-CallStubCompiler::CallStubCompiler(Isolate* isolate,
-                                   int argc,
+CallStubCompiler::CallStubCompiler(int argc,
                                    Code::Kind kind,
-                                   Code::ExtraICState extra_state,
+                                   Code::ExtraICState extra_ic_state,
                                    InlineCacheHolderFlag cache_holder)
-    : StubCompiler(isolate),
-      arguments_(argc),
+    : arguments_(argc),
       kind_(kind),
-      extra_state_(extra_state),
+      extra_ic_state_(extra_ic_state),
       cache_holder_(cache_holder) {
 }
 
 
-bool CallStubCompiler::HasCustomCallGenerator(Handle<JSFunction> function) {
-  if (function->shared()->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = function->shared()->builtin_function_id();
+bool CallStubCompiler::HasCustomCallGenerator(JSFunction* function) {
+  SharedFunctionInfo* info = function->shared();
+  if (info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = info->builtin_function_id();
 #define CALL_GENERATOR_CASE(name) if (id == k##name) return true;
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
   }
-
   CallOptimization optimization(function);
-  return optimization.is_simple_api_call();
+  if (optimization.is_simple_api_call()) {
+    return true;
+  }
+  return false;
 }
 
 
-Handle<Code> CallStubCompiler::CompileCustomCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> fname) {
+MaybeObject* CallStubCompiler::CompileCustomCall(Object* object,
+                                                 JSObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 JSFunction* function,
+                                                 String* fname) {
   ASSERT(HasCustomCallGenerator(function));
 
-  if (function->shared()->HasBuiltinFunctionId()) {
-    BuiltinFunctionId id = function->shared()->builtin_function_id();
-#define CALL_GENERATOR_CASE(name)                               \
-    if (id == k##name) {                                        \
-      return CallStubCompiler::Compile##name##Call(object,      \
-                                                   holder,      \
-                                                   cell,        \
-                                                   function,    \
-                                                   fname);      \
+  SharedFunctionInfo* info = function->shared();
+  if (info->HasBuiltinFunctionId()) {
+    BuiltinFunctionId id = info->builtin_function_id();
+#define CALL_GENERATOR_CASE(name)                           \
+    if (id == k##name) {                                    \
+      return CallStubCompiler::Compile##name##Call(object,  \
+                                                  holder,   \
+                                                  cell,     \
+                                                  function, \
+                                                  fname);   \
     }
     CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
 #undef CALL_GENERATOR_CASE
@@ -1429,99 +1739,100 @@
 }
 
 
-Handle<Code> CallStubCompiler::GetCode(PropertyType type, Handle<String> name) {
+MaybeObject* CallStubCompiler::GetCode(PropertyType type, String* name) {
   int argc = arguments_.immediate();
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
                                                     type,
-                                                    extra_state_,
+                                                    extra_ic_state_,
                                                     cache_holder_,
                                                     argc);
   return GetCodeWithFlags(flags, name);
 }
 
 
-Handle<Code> CallStubCompiler::GetCode(Handle<JSFunction> function) {
-  Handle<String> function_name;
+MaybeObject* CallStubCompiler::GetCode(JSFunction* function) {
+  String* function_name = NULL;
   if (function->shared()->name()->IsString()) {
-    function_name = Handle<String>(String::cast(function->shared()->name()));
+    function_name = String::cast(function->shared()->name());
   }
   return GetCode(CONSTANT_FUNCTION, function_name);
 }
 
 
-Handle<Code> ConstructStubCompiler::GetCode() {
+MaybeObject* ConstructStubCompiler::GetCode() {
   Code::Flags flags = Code::ComputeFlags(Code::STUB);
-  Handle<Code> code = GetCodeWithFlags(flags, "ConstructStub");
-  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, "ConstructStub"));
-  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", *code));
-  return code;
+  Object* result;
+  { MaybeObject* maybe_result = GetCodeWithFlags(flags, "ConstructStub");
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+  }
+  Code* code = Code::cast(result);
+  USE(code);
+  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, code, "ConstructStub"));
+  GDBJIT(AddCode(GDBJITInterface::STUB, "ConstructStub", Code::cast(code)));
+  return result;
 }
 
 
 CallOptimization::CallOptimization(LookupResult* lookup) {
-  if (lookup->IsFound() &&
-      lookup->IsCacheable() &&
-      lookup->type() == CONSTANT_FUNCTION) {
-    // We only optimize constant function calls.
-    Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
+  if (!lookup->IsProperty() || !lookup->IsCacheable() ||
+      lookup->type() != CONSTANT_FUNCTION) {
+    Initialize(NULL);
   } else {
-    Initialize(Handle<JSFunction>::null());
+    // We only optimize constant function calls.
+    Initialize(lookup->GetConstantFunction());
   }
 }
 
-CallOptimization::CallOptimization(Handle<JSFunction> function) {
+CallOptimization::CallOptimization(JSFunction* function) {
   Initialize(function);
 }
 
 
-int CallOptimization::GetPrototypeDepthOfExpectedType(
-    Handle<JSObject> object,
-    Handle<JSObject> holder) const {
-  ASSERT(is_simple_api_call());
-  if (expected_receiver_type_.is_null()) return 0;
+int CallOptimization::GetPrototypeDepthOfExpectedType(JSObject* object,
+                                                      JSObject* holder) const {
+  ASSERT(is_simple_api_call_);
+  if (expected_receiver_type_ == NULL) return 0;
   int depth = 0;
-  while (!object.is_identical_to(holder)) {
-    if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
-    object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
+  while (object != holder) {
+    if (object->IsInstanceOf(expected_receiver_type_)) return depth;
+    object = JSObject::cast(object->GetPrototype());
     ++depth;
   }
-  if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
+  if (holder->IsInstanceOf(expected_receiver_type_)) return depth;
   return kInvalidProtoDepth;
 }
 
 
-void CallOptimization::Initialize(Handle<JSFunction> function) {
-  constant_function_ = Handle<JSFunction>::null();
+void CallOptimization::Initialize(JSFunction* function) {
+  constant_function_ = NULL;
   is_simple_api_call_ = false;
-  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
-  api_call_info_ = Handle<CallHandlerInfo>::null();
+  expected_receiver_type_ = NULL;
+  api_call_info_ = NULL;
 
-  if (function.is_null() || !function->is_compiled()) return;
+  if (function == NULL || !function->is_compiled()) return;
 
   constant_function_ = function;
   AnalyzePossibleApiFunction(function);
 }
 
 
-void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
-  if (!function->shared()->IsApiFunction()) return;
-  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
+void CallOptimization::AnalyzePossibleApiFunction(JSFunction* function) {
+  SharedFunctionInfo* sfi = function->shared();
+  if (!sfi->IsApiFunction()) return;
+  FunctionTemplateInfo* info = sfi->get_api_func_data();
 
   // Require a C++ callback.
   if (info->call_code()->IsUndefined()) return;
-  api_call_info_ =
-      Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
+  api_call_info_ = CallHandlerInfo::cast(info->call_code());
 
   // Accept signatures that either have no restrictions at all or
   // only have restrictions on the receiver.
   if (!info->signature()->IsUndefined()) {
-    Handle<SignatureInfo> signature =
-        Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
+    SignatureInfo* signature = SignatureInfo::cast(info->signature());
     if (!signature->args()->IsUndefined()) return;
     if (!signature->receiver()->IsUndefined()) {
       expected_receiver_type_ =
-          Handle<FunctionTemplateInfo>(
-              FunctionTemplateInfo::cast(signature->receiver()));
+          FunctionTemplateInfo::cast(signature->receiver());
     }
   }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 29bdb61..18c157b 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,6 @@
 
 #include "allocation.h"
 #include "arguments.h"
-#include "ic-inl.h"
 #include "macro-assembler.h"
 #include "objects.h"
 #include "zone-inl.h"
@@ -69,175 +68,214 @@
   struct Entry {
     String* key;
     Code* value;
-    Map* map;
   };
 
-  void Initialize();
+  void Initialize(bool create_heap_objects);
 
 
   // Computes the right stub matching. Inserts the result in the
   // cache before returning.  This might compile a stub if needed.
-  Handle<Code> ComputeLoadNonexistent(Handle<String> name,
-                                      Handle<JSObject> receiver);
+  MUST_USE_RESULT MaybeObject* ComputeLoadNonexistent(
+      String* name,
+      JSObject* receiver);
 
-  Handle<Code> ComputeLoadField(Handle<String> name,
-                                Handle<JSObject> receiver,
-                                Handle<JSObject> holder,
-                                int field_index);
+  MUST_USE_RESULT MaybeObject* ComputeLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int field_index);
 
-  Handle<Code> ComputeLoadCallback(Handle<String> name,
-                                   Handle<JSObject> receiver,
-                                   Handle<JSObject> holder,
-                                   Handle<AccessorInfo> callback);
+  MUST_USE_RESULT MaybeObject* ComputeLoadCallback(
+      String* name,
+      JSObject* receiver,
+      JSObject* holder,
+      AccessorInfo* callback);
 
-  Handle<Code> ComputeLoadConstant(Handle<String> name,
-                                   Handle<JSObject> receiver,
-                                   Handle<JSObject> holder,
-                                   Handle<JSFunction> value);
+  MUST_USE_RESULT MaybeObject* ComputeLoadConstant(String* name,
+                                                   JSObject* receiver,
+                                                   JSObject* holder,
+                                                   Object* value);
 
-  Handle<Code> ComputeLoadInterceptor(Handle<String> name,
-                                      Handle<JSObject> receiver,
-                                      Handle<JSObject> holder);
+  MUST_USE_RESULT MaybeObject* ComputeLoadInterceptor(
+      String* name,
+      JSObject* receiver,
+      JSObject* holder);
 
-  Handle<Code> ComputeLoadNormal();
+  MUST_USE_RESULT MaybeObject* ComputeLoadNormal();
 
-  Handle<Code> ComputeLoadGlobal(Handle<String> name,
-                                 Handle<JSObject> receiver,
-                                 Handle<GlobalObject> holder,
-                                 Handle<JSGlobalPropertyCell> cell,
-                                 bool is_dont_delete);
+
+  MUST_USE_RESULT MaybeObject* ComputeLoadGlobal(
+      String* name,
+      JSObject* receiver,
+      GlobalObject* holder,
+      JSGlobalPropertyCell* cell,
+      bool is_dont_delete);
+
 
   // ---
 
-  Handle<Code> ComputeKeyedLoadField(Handle<String> name,
-                                     Handle<JSObject> receiver,
-                                     Handle<JSObject> holder,
-                                     int field_index);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadField(String* name,
+                                                     JSObject* receiver,
+                                                     JSObject* holder,
+                                                     int field_index);
 
-  Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
-                                        Handle<JSObject> receiver,
-                                        Handle<JSObject> holder,
-                                        Handle<AccessorInfo> callback);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadCallback(
+      String* name,
+      JSObject* receiver,
+      JSObject* holder,
+      AccessorInfo* callback);
 
-  Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
-                                        Handle<JSObject> receiver,
-                                        Handle<JSObject> holder,
-                                        Handle<JSFunction> value);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadConstant(
+      String* name,
+      JSObject* receiver,
+      JSObject* holder,
+      Object* value);
 
-  Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
-                                           Handle<JSObject> receiver,
-                                           Handle<JSObject> holder);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadInterceptor(
+      String* name,
+      JSObject* receiver,
+      JSObject* holder);
 
-  Handle<Code> ComputeKeyedLoadArrayLength(Handle<String> name,
-                                           Handle<JSArray> receiver);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadArrayLength(
+      String* name,
+      JSArray* receiver);
 
-  Handle<Code> ComputeKeyedLoadStringLength(Handle<String> name,
-                                            Handle<String> receiver);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadStringLength(
+      String* name,
+      String* receiver);
 
-  Handle<Code> ComputeKeyedLoadFunctionPrototype(Handle<String> name,
-                                                 Handle<JSFunction> receiver);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadFunctionPrototype(
+      String* name,
+      JSFunction* receiver);
 
   // ---
 
-  Handle<Code> ComputeStoreField(Handle<String> name,
-                                 Handle<JSObject> receiver,
-                                 int field_index,
-                                 Handle<Map> transition,
-                                 StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* ComputeStoreField(
+      String* name,
+      JSObject* receiver,
+      int field_index,
+      Map* transition,
+      StrictModeFlag strict_mode);
 
-  Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* ComputeStoreNormal(
+      StrictModeFlag strict_mode);
 
-  Handle<Code> ComputeStoreGlobal(Handle<String> name,
-                                  Handle<GlobalObject> receiver,
-                                  Handle<JSGlobalPropertyCell> cell,
-                                  StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* ComputeStoreGlobal(
+      String* name,
+      GlobalObject* receiver,
+      JSGlobalPropertyCell* cell,
+      StrictModeFlag strict_mode);
 
-  Handle<Code> ComputeStoreCallback(Handle<String> name,
-                                    Handle<JSObject> receiver,
-                                    Handle<AccessorInfo> callback,
-                                    StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* ComputeStoreCallback(
+      String* name,
+      JSObject* receiver,
+      AccessorInfo* callback,
+      StrictModeFlag strict_mode);
 
-  Handle<Code> ComputeStoreInterceptor(Handle<String> name,
-                                       Handle<JSObject> receiver,
-                                       StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* ComputeStoreInterceptor(
+      String* name,
+      JSObject* receiver,
+      StrictModeFlag strict_mode);
 
   // ---
 
-  Handle<Code> ComputeKeyedStoreField(Handle<String> name,
-                                      Handle<JSObject> receiver,
-                                      int field_index,
-                                      Handle<Map> transition,
-                                      StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedStoreField(
+      String* name,
+      JSObject* receiver,
+      int field_index,
+      Map* transition,
+      StrictModeFlag strict_mode);
 
-  Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<JSObject> receiver,
-                                              KeyedIC::StubKind stub_kind,
-                                              StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* ComputeKeyedLoadOrStoreElement(
+      JSObject* receiver,
+      bool is_store,
+      StrictModeFlag strict_mode);
 
   // ---
 
-  Handle<Code> ComputeCallField(int argc,
-                                Code::Kind,
-                                Code::ExtraICState extra_state,
-                                Handle<String> name,
-                                Handle<Object> object,
-                                Handle<JSObject> holder,
-                                int index);
+  MUST_USE_RESULT MaybeObject* ComputeCallField(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      Object* object,
+      JSObject* holder,
+      int index);
 
-  Handle<Code> ComputeCallConstant(int argc,
-                                   Code::Kind,
-                                   Code::ExtraICState extra_state,
-                                   Handle<String> name,
-                                   Handle<Object> object,
-                                   Handle<JSObject> holder,
-                                   Handle<JSFunction> function);
+  MUST_USE_RESULT MaybeObject* ComputeCallConstant(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      Object* object,
+      JSObject* holder,
+      JSFunction* function);
 
-  Handle<Code> ComputeCallInterceptor(int argc,
-                                      Code::Kind,
-                                      Code::ExtraICState extra_state,
-                                      Handle<String> name,
-                                      Handle<Object> object,
-                                      Handle<JSObject> holder);
+  MUST_USE_RESULT MaybeObject* ComputeCallNormal(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      JSObject* receiver);
 
-  Handle<Code> ComputeCallGlobal(int argc,
-                                 Code::Kind,
-                                 Code::ExtraICState extra_state,
-                                 Handle<String> name,
-                                 Handle<JSObject> receiver,
-                                 Handle<GlobalObject> holder,
-                                 Handle<JSGlobalPropertyCell> cell,
-                                 Handle<JSFunction> function);
+  MUST_USE_RESULT MaybeObject* ComputeCallInterceptor(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      Object* object,
+      JSObject* holder);
+
+  MUST_USE_RESULT MaybeObject* ComputeCallGlobal(
+      int argc,
+      Code::Kind,
+      Code::ExtraICState extra_ic_state,
+      String* name,
+      JSObject* receiver,
+      GlobalObject* holder,
+      JSGlobalPropertyCell* cell,
+      JSFunction* function);
 
   // ---
 
-  Handle<Code> ComputeCallInitialize(int argc, RelocInfo::Mode mode);
+  MUST_USE_RESULT MaybeObject* ComputeCallInitialize(int argc,
+                                                     RelocInfo::Mode mode,
+                                                     Code::Kind kind);
+
+  Handle<Code> ComputeCallInitialize(int argc,
+                                     RelocInfo::Mode mode);
 
   Handle<Code> ComputeKeyedCallInitialize(int argc);
 
-  Handle<Code> ComputeCallPreMonomorphic(int argc,
-                                         Code::Kind kind,
-                                         Code::ExtraICState extra_state);
+  MUST_USE_RESULT MaybeObject* ComputeCallPreMonomorphic(
+      int argc,
+      Code::Kind kind,
+      Code::ExtraICState extra_ic_state);
 
-  Handle<Code> ComputeCallNormal(int argc,
-                                 Code::Kind kind,
-                                 Code::ExtraICState state);
+  MUST_USE_RESULT MaybeObject* ComputeCallNormal(int argc,
+                                                 Code::Kind kind,
+                                                 Code::ExtraICState state);
 
-  Handle<Code> ComputeCallArguments(int argc, Code::Kind kind);
+  MUST_USE_RESULT MaybeObject* ComputeCallArguments(int argc,
+                                                    Code::Kind kind);
 
-  Handle<Code> ComputeCallMegamorphic(int argc,
-                                      Code::Kind kind,
-                                      Code::ExtraICState state);
+  MUST_USE_RESULT MaybeObject* ComputeCallMegamorphic(int argc,
+                                                      Code::Kind kind,
+                                                      Code::ExtraICState state);
 
-  Handle<Code> ComputeCallMiss(int argc,
-                               Code::Kind kind,
-                               Code::ExtraICState state);
+  MUST_USE_RESULT MaybeObject* ComputeCallMiss(int argc,
+                                               Code::Kind kind,
+                                               Code::ExtraICState state);
 
   // Finds the Code object stored in the Heap::non_monomorphic_cache().
-  Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
+  MUST_USE_RESULT Code* FindCallInitialize(int argc,
+                                           RelocInfo::Mode mode,
+                                           Code::Kind kind);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind);
+  MUST_USE_RESULT MaybeObject* ComputeCallDebugBreak(int argc, Code::Kind kind);
 
-  Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind);
+  MUST_USE_RESULT MaybeObject* ComputeCallDebugPrepareStepIn(int argc,
+                                                             Code::Kind kind);
 #endif
 
   // Update cache for entry hash(name, map).
@@ -249,11 +287,10 @@
   // Collect all maps that match the name and flags.
   void CollectMatchingMaps(SmallMapList* types,
                            String* name,
-                           Code::Flags flags,
-                           Handle<Context> global_context);
+                           Code::Flags flags);
 
   // Generate code for probing the stub cache table.
-  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
+  // Arguments extra and extra2 may be used to pass additional scratch
   // registers. Set to no_reg if not needed.
   void GenerateProbe(MacroAssembler* masm,
                      Code::Flags flags,
@@ -261,8 +298,7 @@
                      Register name,
                      Register scratch,
                      Register extra,
-                     Register extra2 = no_reg,
-                     Register extra3 = no_reg);
+                     Register extra2 = no_reg);
 
   enum Table {
     kPrimary,
@@ -276,12 +312,6 @@
   }
 
 
-  SCTableReference map_reference(StubCache::Table table) {
-    return SCTableReference(
-        reinterpret_cast<Address>(&first_entry(table)->map));
-  }
-
-
   SCTableReference value_reference(StubCache::Table table) {
     return SCTableReference(
         reinterpret_cast<Address>(&first_entry(table)->value));
@@ -299,25 +329,18 @@
 
   Isolate* isolate() { return isolate_; }
   Heap* heap() { return isolate()->heap(); }
-  Factory* factory() { return isolate()->factory(); }
 
  private:
   explicit StubCache(Isolate* isolate);
 
-  Handle<Code> ComputeCallInitialize(int argc,
-                                     RelocInfo::Mode mode,
-                                     Code::Kind kind);
+  friend class Isolate;
+  friend class SCTableReference;
+  static const int kPrimaryTableSize = 2048;
+  static const int kSecondaryTableSize = 512;
+  Entry primary_[kPrimaryTableSize];
+  Entry secondary_[kSecondaryTableSize];
 
-  // The stub cache has a primary and secondary level.  The two levels have
-  // different hashing algorithms in order to avoid simultaneous collisions
-  // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
-  // update strategy on updates is fairly clear and simple:  Any existing entry
-  // in the primary cache is moved to the secondary cache, and secondary cache
-  // entries are overwritten.
-
-  // Hash algorithm for the primary table.  This algorithm is replicated in
-  // assembler for every architecture.  Returns an index into the table that
-  // is scaled by 1 << kHeapObjectTagSize.
+  // Computes the hashed offsets for primary and secondary caches.
   static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
     // This works well because the heap object tag size and the hash
     // shift are equal.  Shifting down the length field to get the
@@ -341,44 +364,27 @@
     return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
   }
 
-  // Hash algorithm for the secondary table.  This algorithm is replicated in
-  // assembler for every architecture.  Returns an index into the table that
-  // is scaled by 1 << kHeapObjectTagSize.
   static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
     // Use the seed from the primary cache in the secondary cache.
     uint32_t string_low32bits =
         static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
-    // We always set the in_loop bit to zero when generating the lookup code
-    // so do it here too so the hash codes match.
-    uint32_t iflags =
-        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
-    uint32_t key = (seed - string_low32bits) + iflags;
+    uint32_t key = seed - string_low32bits + flags;
     return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
   }
 
   // Compute the entry for a given offset in exactly the same way as
   // we do in generated code.  We generate an hash code that already
-  // ends in String::kHashShift 0s.  Then we multiply it so it is a multiple
+  // ends in String::kHashShift 0s.  Then we shift it so it is a multiple
   // of sizeof(Entry).  This makes it easier to avoid making mistakes
   // in the hashed offset computations.
   static Entry* entry(Entry* table, int offset) {
-    const int multiplier = sizeof(*table) >> String::kHashShift;
+    const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
     return reinterpret_cast<Entry*>(
-        reinterpret_cast<Address>(table) + offset * multiplier);
+        reinterpret_cast<Address>(table) + (offset << shift_amount));
   }
 
-  static const int kPrimaryTableBits = 11;
-  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
-  static const int kSecondaryTableBits = 9;
-  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
-
-  Entry primary_[kPrimaryTableSize];
-  Entry secondary_[kSecondaryTableSize];
   Isolate* isolate_;
 
-  friend class Isolate;
-  friend class SCTableReference;
-
   DISALLOW_COPY_AND_ASSIGN(StubCache);
 };
 
@@ -400,24 +406,21 @@
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
 
 
-// The stub compilers compile stubs for the stub cache.
+// The stub compiler compiles stubs for the stub cache.
 class StubCompiler BASE_EMBEDDED {
  public:
-  explicit StubCompiler(Isolate* isolate)
-      : isolate_(isolate), masm_(isolate, NULL, 256), failure_(NULL) { }
+  StubCompiler()
+      : scope_(), masm_(Isolate::Current(), NULL, 256), failure_(NULL) { }
 
-  // Functions to compile either CallIC or KeyedCallIC.  The specific kind
-  // is extracted from the code flags.
-  Handle<Code> CompileCallInitialize(Code::Flags flags);
-  Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
-  Handle<Code> CompileCallNormal(Code::Flags flags);
-  Handle<Code> CompileCallMegamorphic(Code::Flags flags);
-  Handle<Code> CompileCallArguments(Code::Flags flags);
-  Handle<Code> CompileCallMiss(Code::Flags flags);
-
+  MUST_USE_RESULT MaybeObject* CompileCallInitialize(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallPreMonomorphic(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallNormal(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallMegamorphic(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallArguments(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallMiss(Code::Flags flags);
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  Handle<Code> CompileCallDebugBreak(Code::Flags flags);
-  Handle<Code> CompileCallDebugPrepareStepIn(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallDebugBreak(Code::Flags flags);
+  MUST_USE_RESULT MaybeObject* CompileCallDebugPrepareStepIn(Code::Flags flags);
 #endif
 
   // Static functions for generating parts of stubs.
@@ -437,10 +440,8 @@
                                                         Label* miss);
 
   static void GenerateFastPropertyLoad(MacroAssembler* masm,
-                                       Register dst,
-                                       Register src,
-                                       Handle<JSObject> holder,
-                                       int index);
+                                       Register dst, Register src,
+                                       JSObject* holder, int index);
 
   static void GenerateLoadArrayLength(MacroAssembler* masm,
                                       Register receiver,
@@ -461,9 +462,9 @@
                                             Label* miss_label);
 
   static void GenerateStoreField(MacroAssembler* masm,
-                                 Handle<JSObject> object,
+                                 JSObject* object,
                                  int index,
-                                 Handle<Map> transition,
+                                 Map* transition,
                                  Register receiver_reg,
                                  Register name_reg,
                                  Register scratch,
@@ -489,87 +490,88 @@
   // The function can optionally (when save_at_depth !=
   // kInvalidProtoDepth) save the object at the given depth by moving
   // it to [esp + kPointerSize].
-  Register CheckPrototypes(Handle<JSObject> object,
+
+  Register CheckPrototypes(JSObject* object,
                            Register object_reg,
-                           Handle<JSObject> holder,
+                           JSObject* holder,
                            Register holder_reg,
                            Register scratch1,
                            Register scratch2,
-                           Handle<String> name,
+                           String* name,
                            Label* miss) {
     return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
                            scratch2, name, kInvalidProtoDepth, miss);
   }
 
-  Register CheckPrototypes(Handle<JSObject> object,
+  Register CheckPrototypes(JSObject* object,
                            Register object_reg,
-                           Handle<JSObject> holder,
+                           JSObject* holder,
                            Register holder_reg,
                            Register scratch1,
                            Register scratch2,
-                           Handle<String> name,
+                           String* name,
                            int save_at_depth,
                            Label* miss);
 
  protected:
-  Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
-  Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name);
+  MaybeObject* GetCodeWithFlags(Code::Flags flags, const char* name);
+  MaybeObject* GetCodeWithFlags(Code::Flags flags, String* name);
 
   MacroAssembler* masm() { return &masm_; }
   void set_failure(Failure* failure) { failure_ = failure; }
 
-  void GenerateLoadField(Handle<JSObject> object,
-                         Handle<JSObject> holder,
+  void GenerateLoadField(JSObject* object,
+                         JSObject* holder,
                          Register receiver,
                          Register scratch1,
                          Register scratch2,
                          Register scratch3,
                          int index,
-                         Handle<String> name,
+                         String* name,
                          Label* miss);
 
-  void GenerateLoadCallback(Handle<JSObject> object,
-                            Handle<JSObject> holder,
-                            Register receiver,
-                            Register name_reg,
-                            Register scratch1,
-                            Register scratch2,
-                            Register scratch3,
-                            Handle<AccessorInfo> callback,
-                            Handle<String> name,
-                            Label* miss);
+  MaybeObject* GenerateLoadCallback(JSObject* object,
+                                    JSObject* holder,
+                                    Register receiver,
+                                    Register name_reg,
+                                    Register scratch1,
+                                    Register scratch2,
+                                    Register scratch3,
+                                    AccessorInfo* callback,
+                                    String* name,
+                                    Label* miss);
 
-  void GenerateLoadConstant(Handle<JSObject> object,
-                            Handle<JSObject> holder,
+  void GenerateLoadConstant(JSObject* object,
+                            JSObject* holder,
                             Register receiver,
                             Register scratch1,
                             Register scratch2,
                             Register scratch3,
-                            Handle<JSFunction> value,
-                            Handle<String> name,
+                            Object* value,
+                            String* name,
                             Label* miss);
 
-  void GenerateLoadInterceptor(Handle<JSObject> object,
-                               Handle<JSObject> holder,
+  void GenerateLoadInterceptor(JSObject* object,
+                               JSObject* holder,
                                LookupResult* lookup,
                                Register receiver,
                                Register name_reg,
                                Register scratch1,
                                Register scratch2,
                                Register scratch3,
-                               Handle<String> name,
+                               String* name,
                                Label* miss);
 
-  static void LookupPostInterceptor(Handle<JSObject> holder,
-                                    Handle<String> name,
+  static void LookupPostInterceptor(JSObject* holder,
+                                    String* name,
                                     LookupResult* lookup);
 
-  Isolate* isolate() { return isolate_; }
+  Isolate* isolate() { return scope_.isolate(); }
   Heap* heap() { return isolate()->heap(); }
   Factory* factory() { return isolate()->factory(); }
 
  private:
-  Isolate* isolate_;
+  HandleScope scope_;
   MacroAssembler masm_;
   Failure* failure_;
 };
@@ -577,75 +579,70 @@
 
 class LoadStubCompiler: public StubCompiler {
  public:
-  explicit LoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+  MUST_USE_RESULT MaybeObject* CompileLoadNonexistent(String* name,
+                                                      JSObject* object,
+                                                      JSObject* last);
 
-  Handle<Code> CompileLoadNonexistent(Handle<String> name,
-                                      Handle<JSObject> object,
-                                      Handle<JSObject> last);
+  MUST_USE_RESULT MaybeObject* CompileLoadField(JSObject* object,
+                                                JSObject* holder,
+                                                int index,
+                                                String* name);
 
-  Handle<Code> CompileLoadField(Handle<JSObject> object,
-                                Handle<JSObject> holder,
-                                int index,
-                                Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback);
 
-  Handle<Code> CompileLoadCallback(Handle<String> name,
-                                   Handle<JSObject> object,
-                                   Handle<JSObject> holder,
-                                   Handle<AccessorInfo> callback);
+  MUST_USE_RESULT MaybeObject* CompileLoadConstant(JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* value,
+                                                   String* name);
 
-  Handle<Code> CompileLoadConstant(Handle<JSObject> object,
-                                   Handle<JSObject> holder,
-                                   Handle<JSFunction> value,
-                                   Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name);
 
-  Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
-                                      Handle<JSObject> holder,
-                                      Handle<String> name);
-
-  Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
-                                 Handle<GlobalObject> holder,
-                                 Handle<JSGlobalPropertyCell> cell,
-                                 Handle<String> name,
-                                 bool is_dont_delete);
+  MUST_USE_RESULT MaybeObject* CompileLoadGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 String* name,
+                                                 bool is_dont_delete);
 
  private:
-  Handle<Code> GetCode(PropertyType type, Handle<String> name);
+  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
 };
 
 
 class KeyedLoadStubCompiler: public StubCompiler {
  public:
-  explicit KeyedLoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+  MUST_USE_RESULT MaybeObject* CompileLoadField(String* name,
+                                                JSObject* object,
+                                                JSObject* holder,
+                                                int index);
 
-  Handle<Code> CompileLoadField(Handle<String> name,
-                                Handle<JSObject> object,
-                                Handle<JSObject> holder,
-                                int index);
+  MUST_USE_RESULT MaybeObject* CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback);
 
-  Handle<Code> CompileLoadCallback(Handle<String> name,
-                                   Handle<JSObject> object,
-                                   Handle<JSObject> holder,
-                                   Handle<AccessorInfo> callback);
+  MUST_USE_RESULT MaybeObject* CompileLoadConstant(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* value);
 
-  Handle<Code> CompileLoadConstant(Handle<String> name,
-                                   Handle<JSObject> object,
-                                   Handle<JSObject> holder,
-                                   Handle<JSFunction> value);
+  MUST_USE_RESULT MaybeObject* CompileLoadInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name);
 
-  Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
-                                      Handle<JSObject> holder,
-                                      Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileLoadArrayLength(String* name);
+  MUST_USE_RESULT MaybeObject* CompileLoadStringLength(String* name);
+  MUST_USE_RESULT MaybeObject* CompileLoadFunctionPrototype(String* name);
 
-  Handle<Code> CompileLoadArrayLength(Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileLoadElement(Map* receiver_map);
 
-  Handle<Code> CompileLoadStringLength(Handle<String> name);
-
-  Handle<Code> CompileLoadFunctionPrototype(Handle<String> name);
-
-  Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
-
-  Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
-                                      CodeHandleList* handler_ics);
+  MUST_USE_RESULT MaybeObject* CompileLoadMegamorphic(
+      MapList* receiver_maps,
+      CodeList* handler_ics);
 
   static void GenerateLoadExternalArray(MacroAssembler* masm,
                                         ElementsKind elements_kind);
@@ -657,36 +654,34 @@
   static void GenerateLoadDictionaryElement(MacroAssembler* masm);
 
  private:
-  Handle<Code> GetCode(PropertyType type,
-                       Handle<String> name,
+  MaybeObject* GetCode(PropertyType type,
+                       String* name,
                        InlineCacheState state = MONOMORPHIC);
 };
 
 
 class StoreStubCompiler: public StubCompiler {
  public:
-  StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode)
-    : StubCompiler(isolate), strict_mode_(strict_mode) { }
+  explicit StoreStubCompiler(StrictModeFlag strict_mode)
+    : strict_mode_(strict_mode) { }
 
+  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
+                                                 int index,
+                                                 Map* transition,
+                                                 String* name);
 
-  Handle<Code> CompileStoreField(Handle<JSObject> object,
-                                 int index,
-                                 Handle<Map> transition,
-                                 Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileStoreCallback(JSObject* object,
+                                                    AccessorInfo* callbacks,
+                                                    String* name);
+  MUST_USE_RESULT MaybeObject* CompileStoreInterceptor(JSObject* object,
+                                                       String* name);
+  MUST_USE_RESULT MaybeObject* CompileStoreGlobal(GlobalObject* object,
+                                                  JSGlobalPropertyCell* holder,
+                                                  String* name);
 
-  Handle<Code> CompileStoreCallback(Handle<JSObject> object,
-                                    Handle<AccessorInfo> callback,
-                                    Handle<String> name);
-
-  Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
-                                       Handle<String> name);
-
-  Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object,
-                                  Handle<JSGlobalPropertyCell> holder,
-                                  Handle<String> name);
 
  private:
-  Handle<Code> GetCode(PropertyType type, Handle<String> name);
+  MaybeObject* GetCode(PropertyType type, String* name);
 
   StrictModeFlag strict_mode_;
 };
@@ -694,32 +689,25 @@
 
 class KeyedStoreStubCompiler: public StubCompiler {
  public:
-  KeyedStoreStubCompiler(Isolate* isolate,
-                         StrictModeFlag strict_mode,
-                         KeyedAccessGrowMode grow_mode)
-    : StubCompiler(isolate),
-      strict_mode_(strict_mode),
-      grow_mode_(grow_mode) { }
+  explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode)
+    : strict_mode_(strict_mode) { }
 
-  Handle<Code> CompileStoreField(Handle<JSObject> object,
-                                 int index,
-                                 Handle<Map> transition,
-                                 Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
+                                                 int index,
+                                                 Map* transition,
+                                                 String* name);
 
-  Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
+  MUST_USE_RESULT MaybeObject* CompileStoreElement(Map* receiver_map);
 
-  Handle<Code> CompileStorePolymorphic(MapHandleList* receiver_maps,
-                                       CodeHandleList* handler_stubs,
-                                       MapHandleList* transitioned_maps);
+  MUST_USE_RESULT MaybeObject* CompileStoreMegamorphic(
+      MapList* receiver_maps,
+      CodeList* handler_ics);
 
   static void GenerateStoreFastElement(MacroAssembler* masm,
-                                       bool is_js_array,
-                                       ElementsKind element_kind,
-                                       KeyedAccessGrowMode grow_mode);
+                                       bool is_js_array);
 
   static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
-                                             bool is_js_array,
-                                             KeyedAccessGrowMode grow_mode);
+                                             bool is_js_array);
 
   static void GenerateStoreExternalArray(MacroAssembler* masm,
                                          ElementsKind elements_kind);
@@ -727,12 +715,11 @@
   static void GenerateStoreDictionaryElement(MacroAssembler* masm);
 
  private:
-  Handle<Code> GetCode(PropertyType type,
-                       Handle<String> name,
+  MaybeObject* GetCode(PropertyType type,
+                       String* name,
                        InlineCacheState state = MONOMORPHIC);
 
   StrictModeFlag strict_mode_;
-  KeyedAccessGrowMode grow_mode_;
 };
 
 
@@ -752,97 +739,105 @@
 
 class CallStubCompiler: public StubCompiler {
  public:
-  CallStubCompiler(Isolate* isolate,
-                   int argc,
+  CallStubCompiler(int argc,
                    Code::Kind kind,
-                   Code::ExtraICState extra_state,
+                   Code::ExtraICState extra_ic_state,
                    InlineCacheHolderFlag cache_holder);
 
-  Handle<Code> CompileCallField(Handle<JSObject> object,
-                                Handle<JSObject> holder,
-                                int index,
-                                Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileCallField(
+      JSObject* object,
+      JSObject* holder,
+      int index,
+      String* name);
 
-  Handle<Code> CompileCallConstant(Handle<Object> object,
-                                   Handle<JSObject> holder,
-                                   Handle<JSFunction> function,
-                                   Handle<String> name,
-                                   CheckType check);
+  MUST_USE_RESULT MaybeObject* CompileCallConstant(
+      Object* object,
+      JSObject* holder,
+      JSFunction* function,
+      String* name,
+      CheckType check);
 
-  Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
-                                      Handle<JSObject> holder,
-                                      Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileCallInterceptor(
+      JSObject* object,
+      JSObject* holder,
+      String* name);
 
-  Handle<Code> CompileCallGlobal(Handle<JSObject> object,
-                                 Handle<GlobalObject> holder,
-                                 Handle<JSGlobalPropertyCell> cell,
-                                 Handle<JSFunction> function,
-                                 Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileCallGlobal(
+      JSObject* object,
+      GlobalObject* holder,
+      JSGlobalPropertyCell* cell,
+      JSFunction* function,
+      String* name);
 
-  static bool HasCustomCallGenerator(Handle<JSFunction> function);
+  static bool HasCustomCallGenerator(JSFunction* function);
 
  private:
-  // Compiles a custom call constant/global IC.  For constant calls cell is
-  // NULL.  Returns an empty handle if there is no custom call code for the
-  // given function.
-  Handle<Code> CompileCustomCall(Handle<Object> object,
-                                 Handle<JSObject> holder,
-                                 Handle<JSGlobalPropertyCell> cell,
-                                 Handle<JSFunction> function,
-                                 Handle<String> name);
+  // Compiles a custom call constant/global IC. For constant calls
+  // cell is NULL. Returns undefined if there is no custom call code
+  // for the given function or it can't be generated.
+  MUST_USE_RESULT MaybeObject* CompileCustomCall(Object* object,
+                                                 JSObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 JSFunction* function,
+                                                 String* name);
 
-#define DECLARE_CALL_GENERATOR(name)                                    \
-  Handle<Code> Compile##name##Call(Handle<Object> object,               \
-                                   Handle<JSObject> holder,             \
-                                   Handle<JSGlobalPropertyCell> cell,   \
-                                   Handle<JSFunction> function,         \
-                                   Handle<String> fname);
+#define DECLARE_CALL_GENERATOR(name)                                           \
+  MUST_USE_RESULT MaybeObject* Compile##name##Call(Object* object,             \
+                                                   JSObject* holder,           \
+                                                   JSGlobalPropertyCell* cell, \
+                                                   JSFunction* function,       \
+                                                   String* fname);
   CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
 #undef DECLARE_CALL_GENERATOR
 
-  Handle<Code> CompileFastApiCall(const CallOptimization& optimization,
-                                  Handle<Object> object,
-                                  Handle<JSObject> holder,
-                                  Handle<JSGlobalPropertyCell> cell,
-                                  Handle<JSFunction> function,
-                                  Handle<String> name);
+  MUST_USE_RESULT MaybeObject* CompileFastApiCall(
+      const CallOptimization& optimization,
+      Object* object,
+      JSObject* holder,
+      JSGlobalPropertyCell* cell,
+      JSFunction* function,
+      String* name);
 
-  Handle<Code> GetCode(PropertyType type, Handle<String> name);
-  Handle<Code> GetCode(Handle<JSFunction> function);
+  const ParameterCount arguments_;
+  const Code::Kind kind_;
+  const Code::ExtraICState extra_ic_state_;
+  const InlineCacheHolderFlag cache_holder_;
 
   const ParameterCount& arguments() { return arguments_; }
 
-  void GenerateNameCheck(Handle<String> name, Label* miss);
+  MUST_USE_RESULT MaybeObject* GetCode(PropertyType type, String* name);
 
-  void GenerateGlobalReceiverCheck(Handle<JSObject> object,
-                                   Handle<JSObject> holder,
-                                   Handle<String> name,
+  // Convenience function. Calls GetCode above passing
+  // CONSTANT_FUNCTION type and the name of the given function.
+  MUST_USE_RESULT MaybeObject* GetCode(JSFunction* function);
+
+  void GenerateNameCheck(String* name, Label* miss);
+
+  void GenerateGlobalReceiverCheck(JSObject* object,
+                                   JSObject* holder,
+                                   String* name,
                                    Label* miss);
 
   // Generates code to load the function from the cell checking that
   // it still contains the same function.
-  void GenerateLoadFunctionFromCell(Handle<JSGlobalPropertyCell> cell,
-                                    Handle<JSFunction> function,
+  void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+                                    JSFunction* function,
                                     Label* miss);
 
-  // Generates a jump to CallIC miss stub.
-  void GenerateMissBranch();
-
-  const ParameterCount arguments_;
-  const Code::Kind kind_;
-  const Code::ExtraICState extra_state_;
-  const InlineCacheHolderFlag cache_holder_;
+  // Generates a jump to CallIC miss stub. Returns Failure if the jump cannot
+  // be generated.
+  MUST_USE_RESULT MaybeObject* GenerateMissBranch();
 };
 
 
 class ConstructStubCompiler: public StubCompiler {
  public:
-  explicit ConstructStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+  explicit ConstructStubCompiler() {}
 
-  Handle<Code> CompileConstructStub(Handle<JSFunction> function);
+  MUST_USE_RESULT MaybeObject* CompileConstructStub(JSFunction* function);
 
  private:
-  Handle<Code> GetCode();
+  MaybeObject* GetCode();
 };
 
 
@@ -851,14 +846,14 @@
  public:
   explicit CallOptimization(LookupResult* lookup);
 
-  explicit CallOptimization(Handle<JSFunction> function);
+  explicit CallOptimization(JSFunction* function);
 
   bool is_constant_call() const {
-    return !constant_function_.is_null();
+    return constant_function_ != NULL;
   }
 
-  Handle<JSFunction> constant_function() const {
-    ASSERT(is_constant_call());
+  JSFunction* constant_function() const {
+    ASSERT(constant_function_ != NULL);
     return constant_function_;
   }
 
@@ -866,32 +861,32 @@
     return is_simple_api_call_;
   }
 
-  Handle<FunctionTemplateInfo> expected_receiver_type() const {
-    ASSERT(is_simple_api_call());
+  FunctionTemplateInfo* expected_receiver_type() const {
+    ASSERT(is_simple_api_call_);
     return expected_receiver_type_;
   }
 
-  Handle<CallHandlerInfo> api_call_info() const {
-    ASSERT(is_simple_api_call());
+  CallHandlerInfo* api_call_info() const {
+    ASSERT(is_simple_api_call_);
     return api_call_info_;
   }
 
   // Returns the depth of the object having the expected type in the
   // prototype chain between the two arguments.
-  int GetPrototypeDepthOfExpectedType(Handle<JSObject> object,
-                                      Handle<JSObject> holder) const;
+  int GetPrototypeDepthOfExpectedType(JSObject* object,
+                                      JSObject* holder) const;
 
  private:
-  void Initialize(Handle<JSFunction> function);
+  void Initialize(JSFunction* function);
 
   // Determines whether the given function can be called using the
   // fast api call builtin.
-  void AnalyzePossibleApiFunction(Handle<JSFunction> function);
+  void AnalyzePossibleApiFunction(JSFunction* function);
 
-  Handle<JSFunction> constant_function_;
+  JSFunction* constant_function_;
   bool is_simple_api_call_;
-  Handle<FunctionTemplateInfo> expected_receiver_type_;
-  Handle<CallHandlerInfo> api_call_info_;
+  FunctionTemplateInfo* expected_receiver_type_;
+  CallHandlerInfo* api_call_info_;
 };
 
 
diff --git a/src/token.h b/src/token.h
index 3036e55..eb825c1 100644
--- a/src/token.h
+++ b/src/token.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -73,7 +73,6 @@
   T(INIT_VAR, "=init_var", 2)  /* AST-use only. */                      \
   T(INIT_LET, "=init_let", 2)  /* AST-use only. */                      \
   T(INIT_CONST, "=init_const", 2)  /* AST-use only. */                  \
-  T(INIT_CONST_HARMONY, "=init_const_harmony", 2)  /* AST-use only. */  \
   T(ASSIGN, "=", 2)                                                     \
   T(ASSIGN_BIT_OR, "|=", 2)                                             \
   T(ASSIGN_BIT_XOR, "^=", 2)                                            \
@@ -170,8 +169,6 @@
   T(FUTURE_RESERVED_WORD, NULL, 0)                                      \
   T(FUTURE_STRICT_RESERVED_WORD, NULL, 0)                               \
   K(CONST, "const", 0)                                                  \
-  K(EXPORT, "export", 0)                                                \
-  K(IMPORT, "import", 0)                                                \
   K(LET, "let", 0)                                                      \
                                                                         \
   /* Illegal token - not able to scan. */                               \
@@ -215,14 +212,10 @@
     return EQ <= op && op <= IN;
   }
 
-  static bool IsOrderedRelationalCompareOp(Value op) {
+  static bool IsOrderedCompareOp(Value op) {
     return op == LT || op == LTE || op == GT || op == GTE;
   }
 
-  static bool IsEqualityOp(Value op) {
-    return op == EQ || op == EQ_STRICT;
-  }
-
   static Value NegateCompareOp(Value op) {
     ASSERT(IsCompareOp(op));
     switch (op) {
diff --git a/src/type-info.cc b/src/type-info.cc
index 159be6a..4df7ece 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,10 +60,8 @@
 
 
 TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
-                                       Handle<Context> global_context,
-                                       Isolate* isolate) {
+                                       Handle<Context> global_context) {
   global_context_ = global_context;
-  isolate_ = isolate;
   BuildDictionary(code);
   ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue);
 }
@@ -73,41 +71,29 @@
   int entry = dictionary_->FindEntry(ast_id);
   return entry != UnseededNumberDictionary::kNotFound
       ? Handle<Object>(dictionary_->ValueAt(entry))
-      : Handle<Object>::cast(isolate_->factory()->undefined_value());
-}
-
-
-bool TypeFeedbackOracle::LoadIsUninitialized(Property* expr) {
-  Handle<Object> map_or_code = GetInfo(expr->id());
-  if (map_or_code->IsMap()) return false;
-  if (map_or_code->IsCode()) {
-    Handle<Code> code = Handle<Code>::cast(map_or_code);
-    return code->is_inline_cache_stub() && code->ic_state() == UNINITIALIZED;
-  }
-  return false;
+      : Isolate::Current()->factory()->undefined_value();
 }
 
 
 bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
-  Handle<Object> map_or_code = GetInfo(expr->id());
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     return code->is_keyed_load_stub() &&
         code->ic_state() == MONOMORPHIC &&
         Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
-        code->FindFirstMap() != NULL &&
-        !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
+        code->FindFirstMap() != NULL;
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
-  Handle<Object> map_or_code = GetInfo(expr->id());
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    Builtins* builtins = isolate_->builtins();
+    Builtins* builtins = Isolate::Current()->builtins();
     return code->is_keyed_load_stub() &&
         *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
         code->ic_state() == MEGAMORPHIC;
@@ -117,34 +103,24 @@
 
 
 bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
-  Handle<Object> map_or_code = GetInfo(expr->id());
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    bool allow_growth =
-        Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
-        ALLOW_JSARRAY_GROWTH;
     return code->is_keyed_store_stub() &&
-        !allow_growth &&
         code->ic_state() == MONOMORPHIC &&
-        Code::ExtractTypeFromFlags(code->flags()) == NORMAL &&
-        code->FindFirstMap() != NULL &&
-        !CanRetainOtherContext(code->FindFirstMap(), *global_context_);
+        Code::ExtractTypeFromFlags(code->flags()) == NORMAL;
   }
   return false;
 }
 
 
 bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
-  Handle<Object> map_or_code = GetInfo(expr->id());
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    Builtins* builtins = isolate_->builtins();
-    bool allow_growth =
-        Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
-        ALLOW_JSARRAY_GROWTH;
+    Builtins* builtins = Isolate::Current()->builtins();
     return code->is_keyed_store_stub() &&
-        !allow_growth &&
         *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
         *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
         code->ic_state() == MEGAMORPHIC;
@@ -155,40 +131,18 @@
 
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
   Handle<Object> value = GetInfo(expr->id());
-  return value->IsMap() || value->IsSmi() || value->IsJSFunction();
-}
-
-
-bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
-  Handle<Object> value = GetInfo(expr->id());
-  return value->IsJSFunction();
-}
-
-
-bool TypeFeedbackOracle::ObjectLiteralStoreIsMonomorphic(
-    ObjectLiteral::Property* prop) {
-  Handle<Object> map_or_code = GetInfo(prop->key()->id());
-  return map_or_code->IsMap();
-}
-
-
-bool TypeFeedbackOracle::IsForInFastCase(ForInStatement* stmt) {
-  Handle<Object> value = GetInfo(stmt->PrepareId());
-  return value->IsSmi() &&
-      Smi::cast(*value)->value() == TypeFeedbackCells::kForInFastCaseMarker;
+  return value->IsMap() || value->IsSmi();
 }
 
 
 Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
   ASSERT(LoadIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code = GetInfo(expr->id());
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     Map* first_map = code->FindFirstMap();
     ASSERT(first_map != NULL);
-    return CanRetainOtherContext(first_map, *global_context_)
-        ? Handle<Map>::null()
-        : Handle<Map>(first_map);
+    return Handle<Map>(first_map);
   }
   return Handle<Map>::cast(map_or_code);
 }
@@ -196,14 +150,10 @@
 
 Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
   ASSERT(StoreIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code = GetInfo(expr->id());
+  Handle<Object> map_or_code(GetInfo(expr->id()));
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
-    Map* first_map = code->FindFirstMap();
-    ASSERT(first_map != NULL);
-    return CanRetainOtherContext(first_map, *global_context_)
-        ? Handle<Map>::null()
-        : Handle<Map>(first_map);
+    return Handle<Map>(code->FindFirstMap());
   }
   return Handle<Map>::cast(map_or_code);
 }
@@ -253,7 +203,6 @@
   return check;
 }
 
-
 Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
     CheckType check) {
   JSFunction* function = NULL;
@@ -276,26 +225,9 @@
 }
 
 
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
-  return Handle<JSFunction>::cast(GetInfo(expr->id()));
-}
-
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
-  return Handle<JSFunction>::cast(GetInfo(expr->id()));
-}
-
-
-Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
-    ObjectLiteral::Property* prop) {
-  ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
-  return Handle<Map>::cast(GetInfo(prop->key()->id()));
-}
-
-
 bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
   return *GetInfo(expr->id()) ==
-      isolate_->builtins()->builtin(id);
+      Isolate::Current()->builtins()->builtin(id);
 }
 
 
@@ -319,7 +251,6 @@
     case CompareIC::STRINGS:
       return TypeInfo::String();
     case CompareIC::OBJECTS:
-    case CompareIC::KNOWN_OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
     case CompareIC::GENERIC:
@@ -339,23 +270,6 @@
 }
 
 
-Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
-  Handle<Object> object = GetInfo(expr->id());
-  if (!object->IsCode()) return Handle<Map>::null();
-  Handle<Code> code = Handle<Code>::cast(object);
-  if (!code->is_compare_ic_stub()) return Handle<Map>::null();
-  CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
-  if (state != CompareIC::KNOWN_OBJECTS) {
-    return Handle<Map>::null();
-  }
-  Map* first_map = code->FindFirstMap();
-  ASSERT(first_map != NULL);
-  return CanRetainOtherContext(first_map, *global_context_)
-      ? Handle<Map>::null()
-      : Handle<Map>(first_map);
-}
-
-
 TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
   Handle<Object> object = GetInfo(expr->id());
   TypeInfo unknown = TypeInfo::Unknown();
@@ -393,10 +307,6 @@
       case BinaryOpIC::SMI:
         switch (result_type) {
           case BinaryOpIC::UNINITIALIZED:
-            if (expr->op() == Token::DIV) {
-              return TypeInfo::Double();
-            }
-            return TypeInfo::Smi();
           case BinaryOpIC::SMI:
             return TypeInfo::Smi();
           case BinaryOpIC::INT32:
@@ -442,14 +352,9 @@
       return unknown;
     case CompareIC::SMIS:
       return TypeInfo::Smi();
-    case CompareIC::STRINGS:
-      return TypeInfo::String();
-    case CompareIC::SYMBOLS:
-      return TypeInfo::Symbol();
     case CompareIC::HEAP_NUMBERS:
       return TypeInfo::Number();
     case CompareIC::OBJECTS:
-    case CompareIC::KNOWN_OBJECTS:
       // TODO(kasperl): We really need a type for JS objects here.
       return TypeInfo::NonPrimitive();
     case CompareIC::GENERIC:
@@ -492,70 +397,24 @@
                                               Handle<String> name,
                                               Code::Flags flags,
                                               SmallMapList* types) {
+  Isolate* isolate = Isolate::Current();
   Handle<Object> object = GetInfo(ast_id);
   if (object->IsUndefined() || object->IsSmi()) return;
 
-  if (*object ==
-      isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
+  if (*object == isolate->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
     // TODO(fschneider): We could collect the maps and signal that
     // we need a generic store (or load) here.
     ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
   } else if (object->IsMap()) {
     types->Add(Handle<Map>::cast(object));
-  } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
-      Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+  } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
     types->Reserve(4);
     ASSERT(object->IsCode());
-    isolate_->stub_cache()->CollectMatchingMaps(types,
-                                                *name,
-                                                flags,
-                                                global_context_);
+    isolate->stub_cache()->CollectMatchingMaps(types, *name, flags);
   }
 }
 
 
-// Check if a map originates from a given global context. We use this
-// information to filter out maps from different context to avoid
-// retaining objects from different tabs in Chrome via optimized code.
-bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
-                                               Context* global_context) {
-  Object* constructor = NULL;
-  while (!map->prototype()->IsNull()) {
-    constructor = map->constructor();
-    if (!constructor->IsNull()) {
-      // If the constructor is not null or a JSFunction, we have to
-      // conservatively assume that it may retain a global context.
-      if (!constructor->IsJSFunction()) return true;
-      // Check if the constructor directly references a foreign context.
-      if (CanRetainOtherContext(JSFunction::cast(constructor),
-                                global_context)) {
-        return true;
-      }
-    }
-    map = HeapObject::cast(map->prototype())->map();
-  }
-  constructor = map->constructor();
-  if (constructor->IsNull()) return false;
-  JSFunction* function = JSFunction::cast(constructor);
-  return CanRetainOtherContext(function, global_context);
-}
-
-
-bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
-                                               Context* global_context) {
-  return function->context()->global() != global_context->global()
-      && function->context()->global() != global_context->builtins();
-}
-
-
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list) {
-  for (int i = 0; i < list->length(); ++i) {
-    if (list->at(i).is_identical_to(map)) return;
-  }
-  list->Add(map);
-}
-
-
 void TypeFeedbackOracle::CollectKeyedReceiverTypes(unsigned ast_id,
                                                    SmallMapList* types) {
   Handle<Object> object = GetInfo(ast_id);
@@ -569,10 +428,7 @@
       RelocInfo* info = it.rinfo();
       Object* object = info->target_object();
       if (object->IsMap()) {
-        Map* map = Map::cast(object);
-        if (!CanRetainOtherContext(map, *global_context_)) {
-          AddMapIfMissing(Handle<Map>(map), types);
-        }
+        types->Add(Handle<Map>(Map::cast(object)));
       }
     }
   }
@@ -596,7 +452,6 @@
   GetRelocInfos(code, &infos);
   CreateDictionary(code, &infos);
   ProcessRelocInfos(&infos);
-  ProcessTypeFeedbackCells(code);
   // Allocate handle in the parent scope.
   dictionary_ = scope.CloseAndEscape(dictionary_);
 }
@@ -614,13 +469,8 @@
 void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
                                           ZoneList<RelocInfo>* infos) {
   DisableAssertNoAllocation allocation_allowed;
-  int cell_count = code->type_feedback_info()->IsTypeFeedbackInfo()
-      ? TypeFeedbackInfo::cast(code->type_feedback_info())->
-          type_feedback_cells()->CellCount()
-      : 0;
-  int length = infos->length() + cell_count;
   byte* old_start = code->instruction_start();
-  dictionary_ = FACTORY->NewUnseededNumberDictionary(length);
+  dictionary_ = FACTORY->NewUnseededNumberDictionary(infos->length());
   byte* new_start = code->instruction_start();
   RelocateRelocInfos(infos, old_start, new_start);
 }
@@ -638,69 +488,49 @@
 
 void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
   for (int i = 0; i < infos->length(); i++) {
-    RelocInfo reloc_entry = (*infos)[i];
-    Address target_address = reloc_entry.target_address();
     unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
-    Code* target = Code::GetCodeFromTargetAddress(target_address);
-    switch (target->kind()) {
-      case Code::LOAD_IC:
-      case Code::STORE_IC:
-      case Code::CALL_IC:
-      case Code::KEYED_CALL_IC:
-        if (target->ic_state() == MONOMORPHIC) {
-          if (target->kind() == Code::CALL_IC &&
-              target->check_type() != RECEIVER_MAP_CHECK) {
-            SetInfo(ast_id, Smi::FromInt(target->check_type()));
-          } else {
-            Object* map = target->FindFirstMap();
-            if (map == NULL) {
-              SetInfo(ast_id, static_cast<Object*>(target));
-            } else if (!CanRetainOtherContext(Map::cast(map),
-                                              *global_context_)) {
-              SetInfo(ast_id, map);
-            }
-          }
-        } else {
-          SetInfo(ast_id, target);
-        }
-        break;
-
-      case Code::KEYED_LOAD_IC:
-      case Code::KEYED_STORE_IC:
-        if (target->ic_state() == MONOMORPHIC ||
-            target->ic_state() == MEGAMORPHIC) {
-          SetInfo(ast_id, target);
-        }
-        break;
-
-      case Code::UNARY_OP_IC:
-      case Code::BINARY_OP_IC:
-      case Code::COMPARE_IC:
-      case Code::TO_BOOLEAN_IC:
-        SetInfo(ast_id, target);
-        break;
-
-      default:
-        break;
-    }
+    Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
+    ProcessTarget(ast_id, target);
   }
 }
 
 
-void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
-  Object* raw_info = code->type_feedback_info();
-  if (!raw_info->IsTypeFeedbackInfo()) return;
-  Handle<TypeFeedbackCells> cache(
-      TypeFeedbackInfo::cast(raw_info)->type_feedback_cells());
-  for (int i = 0; i < cache->CellCount(); i++) {
-    unsigned ast_id = cache->AstId(i)->value();
-    Object* value = cache->Cell(i)->value();
-    if (value->IsSmi() ||
-        (value->IsJSFunction() &&
-         !CanRetainOtherContext(JSFunction::cast(value),
-                                *global_context_))) {
-      SetInfo(ast_id, value);
-    }
+void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
+  switch (target->kind()) {
+    case Code::LOAD_IC:
+    case Code::STORE_IC:
+    case Code::CALL_IC:
+    case Code::KEYED_CALL_IC:
+      if (target->ic_state() == MONOMORPHIC) {
+        if (target->kind() == Code::CALL_IC &&
+            target->check_type() != RECEIVER_MAP_CHECK) {
+          SetInfo(ast_id,  Smi::FromInt(target->check_type()));
+        } else {
+          Object* map = target->FindFirstMap();
+          SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
+        }
+      } else if (target->ic_state() == MEGAMORPHIC) {
+        SetInfo(ast_id, target);
+      }
+      break;
+
+    case Code::KEYED_LOAD_IC:
+    case Code::KEYED_STORE_IC:
+      if (target->ic_state() == MONOMORPHIC ||
+          target->ic_state() == MEGAMORPHIC) {
+        SetInfo(ast_id, target);
+      }
+      break;
+
+    case Code::UNARY_OP_IC:
+    case Code::BINARY_OP_IC:
+    case Code::COMPARE_IC:
+    case Code::TO_BOOLEAN_IC:
+      SetInfo(ast_id, target);
+      break;
+
+    default:
+      break;
   }
 }
 
diff --git a/src/type-info.h b/src/type-info.h
index d461331..a031740 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,6 @@
 #define V8_TYPE_INFO_H_
 
 #include "allocation.h"
-#include "ast.h"
 #include "globals.h"
 #include "zone-inl.h"
 
@@ -65,8 +64,6 @@
   static TypeInfo Integer32() { return TypeInfo(kInteger32); }
   // We know it's a Smi.
   static TypeInfo Smi() { return TypeInfo(kSmi); }
-  // We know it's a Symbol.
-  static TypeInfo Symbol() { return TypeInfo(kSymbol); }
   // We know it's a heap number.
   static TypeInfo Double() { return TypeInfo(kDouble); }
   // We know it's a string.
@@ -140,16 +137,6 @@
     return ((type_ & kSmi) == kSmi);
   }
 
-  inline bool IsSymbol() {
-    ASSERT(type_ != kUninitialized);
-    return ((type_ & kSymbol) == kSymbol);
-  }
-
-  inline bool IsNonSymbol() {
-    ASSERT(type_ != kUninitialized);
-    return ((type_ & kSymbol) == kString);
-  }
-
   inline bool IsInteger32() {
     ASSERT(type_ != kUninitialized);
     return ((type_ & kInteger32) == kInteger32);
@@ -181,7 +168,6 @@
       case kNumber: return "Number";
       case kInteger32: return "Integer32";
       case kSmi: return "Smi";
-      case kSymbol: return "Symbol";
       case kDouble: return "Double";
       case kString: return "String";
       case kNonPrimitive: return "Object";
@@ -200,7 +186,6 @@
     kSmi = 0x17,           // 0010111
     kDouble = 0x19,        // 0011001
     kString = 0x30,        // 0110000
-    kSymbol = 0x32,        // 0110010
     kNonPrimitive = 0x40,  // 1000000
     kUninitialized = 0x7f  // 1111111
   };
@@ -220,34 +205,24 @@
 class Assignment;
 class BinaryOperation;
 class Call;
-class CallNew;
 class CaseClause;
 class CompareOperation;
 class CompilationInfo;
 class CountOperation;
-class Expression;
 class Property;
 class SmallMapList;
 class UnaryOperation;
-class ForInStatement;
 
 
 class TypeFeedbackOracle BASE_EMBEDDED {
  public:
-  TypeFeedbackOracle(Handle<Code> code,
-                     Handle<Context> global_context,
-                     Isolate* isolate);
+  TypeFeedbackOracle(Handle<Code> code, Handle<Context> global_context);
 
   bool LoadIsMonomorphicNormal(Property* expr);
-  bool LoadIsUninitialized(Property* expr);
   bool LoadIsMegamorphicWithTypeInfo(Property* expr);
   bool StoreIsMonomorphicNormal(Expression* expr);
   bool StoreIsMegamorphicWithTypeInfo(Expression* expr);
   bool CallIsMonomorphic(Call* expr);
-  bool CallNewIsMonomorphic(CallNew* expr);
-  bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
-
-  bool IsForInFastCase(ForInStatement* expr);
 
   Handle<Map> LoadMonomorphicReceiverType(Property* expr);
   Handle<Map> StoreMonomorphicReceiverType(Expression* expr);
@@ -265,18 +240,9 @@
   void CollectKeyedReceiverTypes(unsigned ast_id,
                                  SmallMapList* types);
 
-  static bool CanRetainOtherContext(Map* map, Context* global_context);
-  static bool CanRetainOtherContext(JSFunction* function,
-                                    Context* global_context);
-
   CheckType GetCallCheckType(Call* expr);
   Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
 
-  Handle<JSFunction> GetCallTarget(Call* expr);
-  Handle<JSFunction> GetCallNewTarget(CallNew* expr);
-
-  Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
-
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
   // TODO(1571) We can't use ToBooleanStub::Types as the return value because
@@ -289,7 +255,6 @@
   TypeInfo BinaryType(BinaryOperation* expr);
   TypeInfo CompareType(CompareOperation* expr);
   bool IsSymbolCompare(CompareOperation* expr);
-  Handle<Map> GetCompareMap(CompareOperation* expr);
   TypeInfo SwitchType(CaseClause* clause);
   TypeInfo IncrementType(CountOperation* expr);
 
@@ -308,14 +273,13 @@
                           byte* old_start,
                           byte* new_start);
   void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
-  void ProcessTypeFeedbackCells(Handle<Code> code);
+  void ProcessTarget(unsigned ast_id, Code* target);
 
   // Returns an element from the backing store. Returns undefined if
   // there is no information.
   Handle<Object> GetInfo(unsigned ast_id);
 
   Handle<Context> global_context_;
-  Isolate* isolate_;
   Handle<UnseededNumberDictionary> dictionary_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
diff --git a/src/unicode-inl.h b/src/unicode-inl.h
index 9c0ebf9..c0649d7 100644
--- a/src/unicode-inl.h
+++ b/src/unicode-inl.h
@@ -78,7 +78,7 @@
 }
 
 
-unsigned Utf8::Encode(char* str, uchar c, int previous) {
+unsigned Utf8::Encode(char* str, uchar c) {
   static const int kMask = ~(1 << 6);
   if (c <= kMaxOneByteChar) {
     str[0] = c;
@@ -88,13 +88,6 @@
     str[1] = 0x80 | (c & kMask);
     return 2;
   } else if (c <= kMaxThreeByteChar) {
-    if (Utf16::IsTrailSurrogate(c) &&
-        Utf16::IsLeadSurrogate(previous)) {
-      const int kUnmatchedSize = kSizeOfUnmatchedSurrogate;
-      return Encode(str - kUnmatchedSize,
-                    Utf16::CombineSurrogatePair(previous, c),
-                    Utf16::kNoPreviousCharacter) - kUnmatchedSize;
-    }
     str[0] = 0xE0 | (c >> 12);
     str[1] = 0x80 | ((c >> 6) & kMask);
     str[2] = 0x80 | (c & kMask);
@@ -120,16 +113,12 @@
   return CalculateValue(bytes, length, cursor);
 }
 
-unsigned Utf8::Length(uchar c, int previous) {
+unsigned Utf8::Length(uchar c) {
   if (c <= kMaxOneByteChar) {
     return 1;
   } else if (c <= kMaxTwoByteChar) {
     return 2;
   } else if (c <= kMaxThreeByteChar) {
-    if (Utf16::IsTrailSurrogate(c) &&
-        Utf16::IsLeadSurrogate(previous)) {
-      return kSizeOfUnmatchedSurrogate - kBytesSavedByCombiningSurrogates;
-    }
     return 3;
   } else {
     return 4;
diff --git a/src/unicode.cc b/src/unicode.cc
index 14f3806..6e0ac1a 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// This file was generated at 2012-03-06 09:55:58.934483
+// This file was generated at 2011-01-03 10:57:02.088925
 
 #include "unicode-inl.h"
 #include <stdlib.h>
@@ -210,7 +210,7 @@
 uchar Utf8::CalculateValue(const byte* str,
                            unsigned length,
                            unsigned* cursor) {
-  // We only get called for non-ASCII characters.
+  // We only get called for non-ascii characters.
   if (length == 1) {
     *cursor += 1;
     return kBadChar;
@@ -276,7 +276,6 @@
   return kBadChar;
 }
 
-
 const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
     unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
   unsigned offset = *offset_ptr;
@@ -287,8 +286,8 @@
   }
   const byte* data = reinterpret_cast<const byte*>(str.data());
   if (data[offset] <= kMaxOneByteChar) {
-    // The next character is an ASCII char so we scan forward over
-    // the following ASCII characters and return the next pure ASCII
+    // The next character is an ascii char so we scan forward over
+    // the following ascii characters and return the next pure ascii
     // substring
     const byte* result = data + offset;
     offset++;
@@ -298,13 +297,13 @@
     *offset_ptr = offset;
     return result;
   } else {
-    // The next character is non-ASCII so we just fill the buffer
+    // The next character is non-ascii so we just fill the buffer
     unsigned cursor = 0;
     unsigned chars_read = 0;
     while (offset < str.length()) {
       uchar c = data[offset];
       if (c <= kMaxOneByteChar) {
-        // Fast case for ASCII characters
+        // Fast case for ascii characters
         if (!CharacterStream::EncodeAsciiCharacter(c,
                                                    buffer,
                                                    capacity,
@@ -339,16 +338,6 @@
   return result;
 }
 
-unsigned CharacterStream::Utf16Length() {
-  unsigned result = 0;
-  while (has_more()) {
-    uchar c = GetNext();
-    result += c > Utf16::kMaxNonSurrogateCharCode ? 2 : 1;
-  }
-  Rewind();
-  return result;
-}
-
 void CharacterStream::Seek(unsigned position) {
   Rewind();
   for (unsigned i = 0; i < position; i++) {
@@ -358,8 +347,8 @@
 
 // Uppercase:            point.category == 'Lu'
 
-static const uint16_t kUppercaseTable0Size = 450;
-static const int32_t kUppercaseTable0[450] = {
+static const uint16_t kUppercaseTable0Size = 430;
+static const int32_t kUppercaseTable0[430] = {
   1073741889, 90, 1073742016, 214, 1073742040, 222, 256, 258,  // NOLINT
   260, 262, 264, 266, 268, 270, 272, 274,  // NOLINT
   276, 278, 280, 282, 284, 286, 288, 290,  // NOLINT
@@ -380,24 +369,22 @@
   530, 532, 534, 536, 538, 540, 542, 544,  // NOLINT
   546, 548, 550, 552, 554, 556, 558, 560,  // NOLINT
   562, 1073742394, 571, 1073742397, 574, 577, 1073742403, 582,  // NOLINT
-  584, 586, 588, 590, 880, 882, 886, 902,  // NOLINT
-  1073742728, 906, 908, 1073742734, 911, 1073742737, 929, 1073742755,  // NOLINT
-  939, 975, 1073742802, 980, 984, 986, 988, 990,  // NOLINT
-  992, 994, 996, 998, 1000, 1002, 1004, 1006,  // NOLINT
-  1012, 1015, 1073742841, 1018, 1073742845, 1071, 1120, 1122,  // NOLINT
-  1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138,  // NOLINT
-  1140, 1142, 1144, 1146, 1148, 1150, 1152, 1162,  // NOLINT
-  1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178,  // NOLINT
-  1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194,  // NOLINT
-  1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210,  // NOLINT
-  1212, 1214, 1073743040, 1217, 1219, 1221, 1223, 1225,  // NOLINT
-  1227, 1229, 1232, 1234, 1236, 1238, 1240, 1242,  // NOLINT
-  1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258,  // NOLINT
-  1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274,  // NOLINT
-  1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290,  // NOLINT
-  1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306,  // NOLINT
-  1308, 1310, 1312, 1314, 1316, 1318, 1073743153, 1366,  // NOLINT
-  1073746080, 4293, 4295, 4301, 7680, 7682, 7684, 7686,  // NOLINT
+  584, 586, 588, 590, 902, 1073742728, 906, 908,  // NOLINT
+  1073742734, 911, 1073742737, 929, 1073742755, 939, 1073742802, 980,  // NOLINT
+  984, 986, 988, 990, 992, 994, 996, 998,  // NOLINT
+  1000, 1002, 1004, 1006, 1012, 1015, 1073742841, 1018,  // NOLINT
+  1073742845, 1071, 1120, 1122, 1124, 1126, 1128, 1130,  // NOLINT
+  1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146,  // NOLINT
+  1148, 1150, 1152, 1162, 1164, 1166, 1168, 1170,  // NOLINT
+  1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186,  // NOLINT
+  1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202,  // NOLINT
+  1204, 1206, 1208, 1210, 1212, 1214, 1073743040, 1217,  // NOLINT
+  1219, 1221, 1223, 1225, 1227, 1229, 1232, 1234,  // NOLINT
+  1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250,  // NOLINT
+  1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266,  // NOLINT
+  1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282,  // NOLINT
+  1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298,  // NOLINT
+  1073743153, 1366, 1073746080, 4293, 7680, 7682, 7684, 7686,  // NOLINT
   7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702,  // NOLINT
   7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718,  // NOLINT
   7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734,  // NOLINT
@@ -406,44 +393,28 @@
   7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782,  // NOLINT
   7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798,  // NOLINT
   7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814,  // NOLINT
-  7816, 7818, 7820, 7822, 7824, 7826, 7828, 7838,  // NOLINT
-  7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854,  // NOLINT
-  7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870,  // NOLINT
-  7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886,  // NOLINT
-  7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902,  // NOLINT
-  7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918,  // NOLINT
-  7920, 7922, 7924, 7926, 7928, 7930, 7932, 7934,  // NOLINT
-  1073749768, 7951, 1073749784, 7965, 1073749800, 7983, 1073749816, 7999,  // NOLINT
-  1073749832, 8013, 8025, 8027, 8029, 8031, 1073749864, 8047,  // NOLINT
-  1073749944, 8123, 1073749960, 8139, 1073749976, 8155, 1073749992, 8172,  // NOLINT
-  1073750008, 8187 };  // NOLINT
-static const uint16_t kUppercaseTable1Size = 86;
-static const int32_t kUppercaseTable1[86] = {
+  7816, 7818, 7820, 7822, 7824, 7826, 7828, 7840,  // NOLINT
+  7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856,  // NOLINT
+  7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872,  // NOLINT
+  7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888,  // NOLINT
+  7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904,  // NOLINT
+  7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920,  // NOLINT
+  7922, 7924, 7926, 7928, 1073749768, 7951, 1073749784, 7965,  // NOLINT
+  1073749800, 7983, 1073749816, 7999, 1073749832, 8013, 8025, 8027,  // NOLINT
+  8029, 8031, 1073749864, 8047, 1073749944, 8123, 1073749960, 8139,  // NOLINT
+  1073749976, 8155, 1073749992, 8172, 1073750008, 8187 };  // NOLINT
+static const uint16_t kUppercaseTable1Size = 79;
+static const int32_t kUppercaseTable1[79] = {
   258, 263, 1073742091, 269, 1073742096, 274, 277, 1073742105,  // NOLINT
   285, 292, 294, 296, 1073742122, 301, 1073742128, 307,  // NOLINT
   1073742142, 319, 325, 387, 1073744896, 3118, 3168, 1073744994,  // NOLINT
-  3172, 3175, 3177, 3179, 1073745005, 3184, 3186, 3189,  // NOLINT
-  1073745022, 3200, 3202, 3204, 3206, 3208, 3210, 3212,  // NOLINT
-  3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228,  // NOLINT
-  3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244,  // NOLINT
-  3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260,  // NOLINT
-  3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276,  // NOLINT
-  3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292,  // NOLINT
-  3294, 3296, 3298, 3307, 3309, 3314 };  // NOLINT
-static const uint16_t kUppercaseTable5Size = 91;
-static const int32_t kUppercaseTable5[91] = {
-  1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614,  // NOLINT
-  1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630,  // NOLINT
-  1632, 1634, 1636, 1638, 1640, 1642, 1644, 1664,  // NOLINT
-  1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680,  // NOLINT
-  1682, 1684, 1686, 1826, 1828, 1830, 1832, 1834,  // NOLINT
-  1836, 1838, 1842, 1844, 1846, 1848, 1850, 1852,  // NOLINT
-  1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868,  // NOLINT
-  1870, 1872, 1874, 1876, 1878, 1880, 1882, 1884,  // NOLINT
-  1886, 1888, 1890, 1892, 1894, 1896, 1898, 1900,  // NOLINT
-  1902, 1913, 1915, 1073743741, 1918, 1920, 1922, 1924,  // NOLINT
-  1926, 1931, 1933, 1936, 1938, 1952, 1954, 1956,  // NOLINT
-  1958, 1960, 1962 };  // NOLINT
+  3172, 3175, 3177, 3179, 3189, 3200, 3202, 3204,  // NOLINT
+  3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220,  // NOLINT
+  3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236,  // NOLINT
+  3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252,  // NOLINT
+  3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268,  // NOLINT
+  3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284,  // NOLINT
+  3286, 3288, 3290, 3292, 3294, 3296, 3298 };  // NOLINT
 static const uint16_t kUppercaseTable7Size = 2;
 static const int32_t kUppercaseTable7[2] = {
   1073749793, 7994 };  // NOLINT
@@ -456,9 +427,6 @@
     case 1: return LookupPredicate(kUppercaseTable1,
                                        kUppercaseTable1Size,
                                        c);
-    case 5: return LookupPredicate(kUppercaseTable5,
-                                       kUppercaseTable5Size,
-                                       c);
     case 7: return LookupPredicate(kUppercaseTable7,
                                        kUppercaseTable7Size,
                                        c);
@@ -468,93 +436,77 @@
 
 // Lowercase:            point.category == 'Ll'
 
-static const uint16_t kLowercaseTable0Size = 463;
-static const int32_t kLowercaseTable0[463] = {
-  1073741921, 122, 181, 1073742047, 246, 1073742072, 255, 257,  // NOLINT
-  259, 261, 263, 265, 267, 269, 271, 273,  // NOLINT
-  275, 277, 279, 281, 283, 285, 287, 289,  // NOLINT
-  291, 293, 295, 297, 299, 301, 303, 305,  // NOLINT
-  307, 309, 1073742135, 312, 314, 316, 318, 320,  // NOLINT
-  322, 324, 326, 1073742152, 329, 331, 333, 335,  // NOLINT
-  337, 339, 341, 343, 345, 347, 349, 351,  // NOLINT
-  353, 355, 357, 359, 361, 363, 365, 367,  // NOLINT
-  369, 371, 373, 375, 378, 380, 1073742206, 384,  // NOLINT
-  387, 389, 392, 1073742220, 397, 402, 405, 1073742233,  // NOLINT
-  411, 414, 417, 419, 421, 424, 1073742250, 427,  // NOLINT
-  429, 432, 436, 438, 1073742265, 442, 1073742269, 447,  // NOLINT
-  454, 457, 460, 462, 464, 466, 468, 470,  // NOLINT
-  472, 474, 1073742300, 477, 479, 481, 483, 485,  // NOLINT
-  487, 489, 491, 493, 1073742319, 496, 499, 501,  // NOLINT
-  505, 507, 509, 511, 513, 515, 517, 519,  // NOLINT
-  521, 523, 525, 527, 529, 531, 533, 535,  // NOLINT
-  537, 539, 541, 543, 545, 547, 549, 551,  // NOLINT
-  553, 555, 557, 559, 561, 1073742387, 569, 572,  // NOLINT
-  1073742399, 576, 578, 583, 585, 587, 589, 1073742415,  // NOLINT
-  659, 1073742485, 687, 881, 883, 887, 1073742715, 893,  // NOLINT
-  912, 1073742764, 974, 1073742800, 977, 1073742805, 983, 985,  // NOLINT
-  987, 989, 991, 993, 995, 997, 999, 1001,  // NOLINT
-  1003, 1005, 1073742831, 1011, 1013, 1016, 1073742843, 1020,  // NOLINT
-  1073742896, 1119, 1121, 1123, 1125, 1127, 1129, 1131,  // NOLINT
-  1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147,  // NOLINT
-  1149, 1151, 1153, 1163, 1165, 1167, 1169, 1171,  // NOLINT
-  1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187,  // NOLINT
-  1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203,  // NOLINT
-  1205, 1207, 1209, 1211, 1213, 1215, 1218, 1220,  // NOLINT
-  1222, 1224, 1226, 1228, 1073743054, 1231, 1233, 1235,  // NOLINT
-  1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251,  // NOLINT
-  1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267,  // NOLINT
-  1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283,  // NOLINT
-  1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299,  // NOLINT
-  1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315,  // NOLINT
-  1317, 1319, 1073743201, 1415, 1073749248, 7467, 1073749355, 7543,  // NOLINT
-  1073749369, 7578, 7681, 7683, 7685, 7687, 7689, 7691,  // NOLINT
-  7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707,  // NOLINT
-  7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723,  // NOLINT
-  7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739,  // NOLINT
-  7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755,  // NOLINT
-  7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771,  // NOLINT
-  7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787,  // NOLINT
-  7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803,  // NOLINT
-  7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819,  // NOLINT
-  7821, 7823, 7825, 7827, 1073749653, 7837, 7839, 7841,  // NOLINT
-  7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857,  // NOLINT
-  7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873,  // NOLINT
-  7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889,  // NOLINT
-  7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905,  // NOLINT
-  7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921,  // NOLINT
-  7923, 7925, 7927, 7929, 7931, 7933, 1073749759, 7943,  // NOLINT
-  1073749776, 7957, 1073749792, 7975, 1073749808, 7991, 1073749824, 8005,  // NOLINT
-  1073749840, 8023, 1073749856, 8039, 1073749872, 8061, 1073749888, 8071,  // NOLINT
-  1073749904, 8087, 1073749920, 8103, 1073749936, 8116, 1073749942, 8119,  // NOLINT
-  8126, 1073749954, 8132, 1073749958, 8135, 1073749968, 8147, 1073749974,  // NOLINT
-  8151, 1073749984, 8167, 1073750002, 8180, 1073750006, 8183 };  // NOLINT
-static const uint16_t kLowercaseTable1Size = 84;
-static const int32_t kLowercaseTable1[84] = {
-  266, 1073742094, 271, 275, 303, 308, 313, 1073742140,  // NOLINT
-  317, 1073742150, 329, 334, 388, 1073744944, 3166, 3169,  // NOLINT
-  1073744997, 3174, 3176, 3178, 3180, 3185, 1073745011, 3188,  // NOLINT
-  1073745014, 3195, 3201, 3203, 3205, 3207, 3209, 3211,  // NOLINT
+static const uint16_t kLowercaseTable0Size = 449;
+static const int32_t kLowercaseTable0[449] = {
+  1073741921, 122, 170, 181, 186, 1073742047, 246, 1073742072,  // NOLINT
+  255, 257, 259, 261, 263, 265, 267, 269,  // NOLINT
+  271, 273, 275, 277, 279, 281, 283, 285,  // NOLINT
+  287, 289, 291, 293, 295, 297, 299, 301,  // NOLINT
+  303, 305, 307, 309, 1073742135, 312, 314, 316,  // NOLINT
+  318, 320, 322, 324, 326, 1073742152, 329, 331,  // NOLINT
+  333, 335, 337, 339, 341, 343, 345, 347,  // NOLINT
+  349, 351, 353, 355, 357, 359, 361, 363,  // NOLINT
+  365, 367, 369, 371, 373, 375, 378, 380,  // NOLINT
+  1073742206, 384, 387, 389, 392, 1073742220, 397, 402,  // NOLINT
+  405, 1073742233, 411, 414, 417, 419, 421, 424,  // NOLINT
+  1073742250, 427, 429, 432, 436, 438, 1073742265, 442,  // NOLINT
+  1073742269, 447, 454, 457, 460, 462, 464, 466,  // NOLINT
+  468, 470, 472, 474, 1073742300, 477, 479, 481,  // NOLINT
+  483, 485, 487, 489, 491, 493, 1073742319, 496,  // NOLINT
+  499, 501, 505, 507, 509, 511, 513, 515,  // NOLINT
+  517, 519, 521, 523, 525, 527, 529, 531,  // NOLINT
+  533, 535, 537, 539, 541, 543, 545, 547,  // NOLINT
+  549, 551, 553, 555, 557, 559, 561, 1073742387,  // NOLINT
+  569, 572, 1073742399, 576, 578, 583, 585, 587,  // NOLINT
+  589, 1073742415, 659, 1073742485, 687, 1073742715, 893, 912,  // NOLINT
+  1073742764, 974, 1073742800, 977, 1073742805, 983, 985, 987,  // NOLINT
+  989, 991, 993, 995, 997, 999, 1001, 1003,  // NOLINT
+  1005, 1073742831, 1011, 1013, 1016, 1073742843, 1020, 1073742896,  // NOLINT
+  1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133,  // NOLINT
+  1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149,  // NOLINT
+  1151, 1153, 1163, 1165, 1167, 1169, 1171, 1173,  // NOLINT
+  1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189,  // NOLINT
+  1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205,  // NOLINT
+  1207, 1209, 1211, 1213, 1215, 1218, 1220, 1222,  // NOLINT
+  1224, 1226, 1228, 1073743054, 1231, 1233, 1235, 1237,  // NOLINT
+  1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253,  // NOLINT
+  1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269,  // NOLINT
+  1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285,  // NOLINT
+  1287, 1289, 1291, 1293, 1295, 1297, 1299, 1073743201,  // NOLINT
+  1415, 1073749248, 7467, 1073749346, 7543, 1073749369, 7578, 7681,  // NOLINT
+  7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697,  // NOLINT
+  7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713,  // NOLINT
+  7715, 7717, 7719, 7721, 7723, 7725, 7727, 7729,  // NOLINT
+  7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745,  // NOLINT
+  7747, 7749, 7751, 7753, 7755, 7757, 7759, 7761,  // NOLINT
+  7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777,  // NOLINT
+  7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793,  // NOLINT
+  7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809,  // NOLINT
+  7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825,  // NOLINT
+  7827, 1073749653, 7835, 7841, 7843, 7845, 7847, 7849,  // NOLINT
+  7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865,  // NOLINT
+  7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881,  // NOLINT
+  7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897,  // NOLINT
+  7899, 7901, 7903, 7905, 7907, 7909, 7911, 7913,  // NOLINT
+  7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929,  // NOLINT
+  1073749760, 7943, 1073749776, 7957, 1073749792, 7975, 1073749808, 7991,  // NOLINT
+  1073749824, 8005, 1073749840, 8023, 1073749856, 8039, 1073749872, 8061,  // NOLINT
+  1073749888, 8071, 1073749904, 8087, 1073749920, 8103, 1073749936, 8116,  // NOLINT
+  1073749942, 8119, 8126, 1073749954, 8132, 1073749958, 8135, 1073749968,  // NOLINT
+  8147, 1073749974, 8151, 1073749984, 8167, 1073750002, 8180, 1073750006,  // NOLINT
+  8183 };  // NOLINT
+static const uint16_t kLowercaseTable1Size = 79;
+static const int32_t kLowercaseTable1[79] = {
+  113, 127, 266, 1073742094, 271, 275, 303, 308,  // NOLINT
+  313, 1073742140, 317, 1073742150, 329, 334, 388, 1073744944,  // NOLINT
+  3166, 3169, 1073744997, 3174, 3176, 3178, 3180, 3188,  // NOLINT
+  1073745014, 3191, 3201, 3203, 3205, 3207, 3209, 3211,  // NOLINT
   3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227,  // NOLINT
   3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243,  // NOLINT
   3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259,  // NOLINT
   3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275,  // NOLINT
   3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291,  // NOLINT
-  3293, 3295, 3297, 1073745123, 3300, 3308, 3310, 3315,  // NOLINT
-  1073745152, 3365, 3367, 3373 };  // NOLINT
-static const uint16_t kLowercaseTable5Size = 93;
-static const int32_t kLowercaseTable5[93] = {
-  1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615,  // NOLINT
-  1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631,  // NOLINT
-  1633, 1635, 1637, 1639, 1641, 1643, 1645, 1665,  // NOLINT
-  1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681,  // NOLINT
-  1683, 1685, 1687, 1827, 1829, 1831, 1833, 1835,  // NOLINT
-  1837, 1073743663, 1841, 1843, 1845, 1847, 1849, 1851,  // NOLINT
-  1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867,  // NOLINT
-  1869, 1871, 1873, 1875, 1877, 1879, 1881, 1883,  // NOLINT
-  1885, 1887, 1889, 1891, 1893, 1895, 1897, 1899,  // NOLINT
-  1901, 1903, 1073743729, 1912, 1914, 1916, 1919, 1921,  // NOLINT
-  1923, 1925, 1927, 1932, 1934, 1937, 1939, 1953,  // NOLINT
-  1955, 1957, 1959, 1961, 2042 };  // NOLINT
+  3293, 3295, 3297, 1073745123, 3300, 1073745152, 3365 };  // NOLINT
 static const uint16_t kLowercaseTable7Size = 6;
 static const int32_t kLowercaseTable7[6] = {
   1073748736, 6918, 1073748755, 6935, 1073749825, 8026 };  // NOLINT
@@ -567,9 +519,6 @@
     case 1: return LookupPredicate(kLowercaseTable1,
                                        kLowercaseTable1Size,
                                        c);
-    case 5: return LookupPredicate(kLowercaseTable5,
-                                       kLowercaseTable5Size,
-                                       c);
     case 7: return LookupPredicate(kLowercaseTable7,
                                        kLowercaseTable7Size,
                                        c);
@@ -579,76 +528,71 @@
 
 // Letter:               point.category in ['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl' ]
 
-static const uint16_t kLetterTable0Size = 435;
-static const int32_t kLetterTable0[435] = {
+static const uint16_t kLetterTable0Size = 394;
+static const int32_t kLetterTable0[394] = {
   1073741889, 90, 1073741921, 122, 170, 181, 186, 1073742016,  // NOLINT
   214, 1073742040, 246, 1073742072, 705, 1073742534, 721, 1073742560,  // NOLINT
-  740, 748, 750, 1073742704, 884, 1073742710, 887, 1073742714,  // NOLINT
-  893, 902, 1073742728, 906, 908, 1073742734, 929, 1073742755,  // NOLINT
-  1013, 1073742839, 1153, 1073742986, 1319, 1073743153, 1366, 1369,  // NOLINT
-  1073743201, 1415, 1073743312, 1514, 1073743344, 1522, 1073743392, 1610,  // NOLINT
-  1073743470, 1647, 1073743473, 1747, 1749, 1073743589, 1766, 1073743598,  // NOLINT
-  1775, 1073743610, 1788, 1791, 1808, 1073743634, 1839, 1073743693,  // NOLINT
-  1957, 1969, 1073743818, 2026, 1073743860, 2037, 2042, 1073743872,  // NOLINT
-  2069, 2074, 2084, 2088, 1073743936, 2136, 2208, 1073744034,  // NOLINT
-  2220, 1073744132, 2361, 2365, 2384, 1073744216, 2401, 1073744241,  // NOLINT
-  2423, 1073744249, 2431, 1073744261, 2444, 1073744271, 2448, 1073744275,  // NOLINT
-  2472, 1073744298, 2480, 2482, 1073744310, 2489, 2493, 2510,  // NOLINT
-  1073744348, 2525, 1073744351, 2529, 1073744368, 2545, 1073744389, 2570,  // NOLINT
-  1073744399, 2576, 1073744403, 2600, 1073744426, 2608, 1073744434, 2611,  // NOLINT
-  1073744437, 2614, 1073744440, 2617, 1073744473, 2652, 2654, 1073744498,  // NOLINT
-  2676, 1073744517, 2701, 1073744527, 2705, 1073744531, 2728, 1073744554,  // NOLINT
-  2736, 1073744562, 2739, 1073744565, 2745, 2749, 2768, 1073744608,  // NOLINT
-  2785, 1073744645, 2828, 1073744655, 2832, 1073744659, 2856, 1073744682,  // NOLINT
-  2864, 1073744690, 2867, 1073744693, 2873, 2877, 1073744732, 2909,  // NOLINT
-  1073744735, 2913, 2929, 2947, 1073744773, 2954, 1073744782, 2960,  // NOLINT
-  1073744786, 2965, 1073744793, 2970, 2972, 1073744798, 2975, 1073744803,  // NOLINT
-  2980, 1073744808, 2986, 1073744814, 3001, 3024, 1073744901, 3084,  // NOLINT
-  1073744910, 3088, 1073744914, 3112, 1073744938, 3123, 1073744949, 3129,  // NOLINT
-  3133, 1073744984, 3161, 1073744992, 3169, 1073745029, 3212, 1073745038,  // NOLINT
-  3216, 1073745042, 3240, 1073745066, 3251, 1073745077, 3257, 3261,  // NOLINT
-  3294, 1073745120, 3297, 1073745137, 3314, 1073745157, 3340, 1073745166,  // NOLINT
-  3344, 1073745170, 3386, 3389, 3406, 1073745248, 3425, 1073745274,  // NOLINT
-  3455, 1073745285, 3478, 1073745306, 3505, 1073745331, 3515, 3517,  // NOLINT
-  1073745344, 3526, 1073745409, 3632, 1073745458, 3635, 1073745472, 3654,  // NOLINT
-  1073745537, 3714, 3716, 1073745543, 3720, 3722, 3725, 1073745556,  // NOLINT
-  3735, 1073745561, 3743, 1073745569, 3747, 3749, 3751, 1073745578,  // NOLINT
-  3755, 1073745581, 3760, 1073745586, 3763, 3773, 1073745600, 3780,  // NOLINT
-  3782, 1073745628, 3807, 3840, 1073745728, 3911, 1073745737, 3948,  // NOLINT
-  1073745800, 3980, 1073745920, 4138, 4159, 1073746000, 4181, 1073746010,  // NOLINT
-  4189, 4193, 1073746021, 4198, 1073746030, 4208, 1073746037, 4225,  // NOLINT
-  4238, 1073746080, 4293, 4295, 4301, 1073746128, 4346, 1073746172,  // NOLINT
-  4680, 1073746506, 4685, 1073746512, 4694, 4696, 1073746522, 4701,  // NOLINT
-  1073746528, 4744, 1073746570, 4749, 1073746576, 4784, 1073746610, 4789,  // NOLINT
-  1073746616, 4798, 4800, 1073746626, 4805, 1073746632, 4822, 1073746648,  // NOLINT
-  4880, 1073746706, 4885, 1073746712, 4954, 1073746816, 5007, 1073746848,  // NOLINT
-  5108, 1073746945, 5740, 1073747567, 5759, 1073747585, 5786, 1073747616,  // NOLINT
-  5866, 1073747694, 5872, 1073747712, 5900, 1073747726, 5905, 1073747744,  // NOLINT
-  5937, 1073747776, 5969, 1073747808, 5996, 1073747822, 6000, 1073747840,  // NOLINT
-  6067, 6103, 6108, 1073748000, 6263, 1073748096, 6312, 6314,  // NOLINT
-  1073748144, 6389, 1073748224, 6428, 1073748304, 6509, 1073748336, 6516,  // NOLINT
-  1073748352, 6571, 1073748417, 6599, 1073748480, 6678, 1073748512, 6740,  // NOLINT
-  6823, 1073748741, 6963, 1073748805, 6987, 1073748867, 7072, 1073748910,  // NOLINT
-  7087, 1073748922, 7141, 1073748992, 7203, 1073749069, 7247, 1073749082,  // NOLINT
-  7293, 1073749225, 7404, 1073749230, 7409, 1073749237, 7414, 1073749248,  // NOLINT
-  7615, 1073749504, 7957, 1073749784, 7965, 1073749792, 8005, 1073749832,  // NOLINT
-  8013, 1073749840, 8023, 8025, 8027, 8029, 1073749855, 8061,  // NOLINT
-  1073749888, 8116, 1073749942, 8124, 8126, 1073749954, 8132, 1073749958,  // NOLINT
-  8140, 1073749968, 8147, 1073749974, 8155, 1073749984, 8172, 1073750002,  // NOLINT
-  8180, 1073750006, 8188 };  // NOLINT
-static const uint16_t kLetterTable1Size = 87;
-static const int32_t kLetterTable1[87] = {
-  113, 127, 1073741968, 156, 258, 263, 1073742090, 275,  // NOLINT
+  740, 750, 1073742714, 893, 902, 1073742728, 906, 908,  // NOLINT
+  1073742734, 929, 1073742755, 974, 1073742800, 1013, 1073742839, 1153,  // NOLINT
+  1073742986, 1299, 1073743153, 1366, 1369, 1073743201, 1415, 1073743312,  // NOLINT
+  1514, 1073743344, 1522, 1073743393, 1594, 1073743424, 1610, 1073743470,  // NOLINT
+  1647, 1073743473, 1747, 1749, 1073743589, 1766, 1073743598, 1775,  // NOLINT
+  1073743610, 1788, 1791, 1808, 1073743634, 1839, 1073743693, 1901,  // NOLINT
+  1073743744, 1957, 1969, 1073743818, 2026, 1073743860, 2037, 2042,  // NOLINT
+  1073744132, 2361, 2365, 2384, 1073744216, 2401, 1073744251, 2431,  // NOLINT
+  1073744261, 2444, 1073744271, 2448, 1073744275, 2472, 1073744298, 2480,  // NOLINT
+  2482, 1073744310, 2489, 2493, 2510, 1073744348, 2525, 1073744351,  // NOLINT
+  2529, 1073744368, 2545, 1073744389, 2570, 1073744399, 2576, 1073744403,  // NOLINT
+  2600, 1073744426, 2608, 1073744434, 2611, 1073744437, 2614, 1073744440,  // NOLINT
+  2617, 1073744473, 2652, 2654, 1073744498, 2676, 1073744517, 2701,  // NOLINT
+  1073744527, 2705, 1073744531, 2728, 1073744554, 2736, 1073744562, 2739,  // NOLINT
+  1073744565, 2745, 2749, 2768, 1073744608, 2785, 1073744645, 2828,  // NOLINT
+  1073744655, 2832, 1073744659, 2856, 1073744682, 2864, 1073744690, 2867,  // NOLINT
+  1073744693, 2873, 2877, 1073744732, 2909, 1073744735, 2913, 2929,  // NOLINT
+  2947, 1073744773, 2954, 1073744782, 2960, 1073744786, 2965, 1073744793,  // NOLINT
+  2970, 2972, 1073744798, 2975, 1073744803, 2980, 1073744808, 2986,  // NOLINT
+  1073744814, 3001, 1073744901, 3084, 1073744910, 3088, 1073744914, 3112,  // NOLINT
+  1073744938, 3123, 1073744949, 3129, 1073744992, 3169, 1073745029, 3212,  // NOLINT
+  1073745038, 3216, 1073745042, 3240, 1073745066, 3251, 1073745077, 3257,  // NOLINT
+  3261, 3294, 1073745120, 3297, 1073745157, 3340, 1073745166, 3344,  // NOLINT
+  1073745170, 3368, 1073745194, 3385, 1073745248, 3425, 1073745285, 3478,  // NOLINT
+  1073745306, 3505, 1073745331, 3515, 3517, 1073745344, 3526, 1073745409,  // NOLINT
+  3632, 1073745458, 3635, 1073745472, 3654, 1073745537, 3714, 3716,  // NOLINT
+  1073745543, 3720, 3722, 3725, 1073745556, 3735, 1073745561, 3743,  // NOLINT
+  1073745569, 3747, 3749, 3751, 1073745578, 3755, 1073745581, 3760,  // NOLINT
+  1073745586, 3763, 3773, 1073745600, 3780, 3782, 1073745628, 3805,  // NOLINT
+  3840, 1073745728, 3911, 1073745737, 3946, 1073745800, 3979, 1073745920,  // NOLINT
+  4129, 1073745955, 4135, 1073745961, 4138, 1073746000, 4181, 1073746080,  // NOLINT
+  4293, 1073746128, 4346, 4348, 1073746176, 4441, 1073746271, 4514,  // NOLINT
+  1073746344, 4601, 1073746432, 4680, 1073746506, 4685, 1073746512, 4694,  // NOLINT
+  4696, 1073746522, 4701, 1073746528, 4744, 1073746570, 4749, 1073746576,  // NOLINT
+  4784, 1073746610, 4789, 1073746616, 4798, 4800, 1073746626, 4805,  // NOLINT
+  1073746632, 4822, 1073746648, 4880, 1073746706, 4885, 1073746712, 4954,  // NOLINT
+  1073746816, 5007, 1073746848, 5108, 1073746945, 5740, 1073747567, 5750,  // NOLINT
+  1073747585, 5786, 1073747616, 5866, 1073747694, 5872, 1073747712, 5900,  // NOLINT
+  1073747726, 5905, 1073747744, 5937, 1073747776, 5969, 1073747808, 5996,  // NOLINT
+  1073747822, 6000, 1073747840, 6067, 6103, 6108, 1073748000, 6263,  // NOLINT
+  1073748096, 6312, 1073748224, 6428, 1073748304, 6509, 1073748336, 6516,  // NOLINT
+  1073748352, 6569, 1073748417, 6599, 1073748480, 6678, 1073748741, 6963,  // NOLINT
+  1073748805, 6987, 1073749248, 7615, 1073749504, 7835, 1073749664, 7929,  // NOLINT
+  1073749760, 7957, 1073749784, 7965, 1073749792, 8005, 1073749832, 8013,  // NOLINT
+  1073749840, 8023, 8025, 8027, 8029, 1073749855, 8061, 1073749888,  // NOLINT
+  8116, 1073749942, 8124, 8126, 1073749954, 8132, 1073749958, 8140,  // NOLINT
+  1073749968, 8147, 1073749974, 8155, 1073749984, 8172, 1073750002, 8180,  // NOLINT
+  1073750006, 8188 };  // NOLINT
+static const uint16_t kLetterTable1Size = 84;
+static const int32_t kLetterTable1[84] = {
+  113, 127, 1073741968, 148, 258, 263, 1073742090, 275,  // NOLINT
   277, 1073742105, 285, 292, 294, 296, 1073742122, 301,  // NOLINT
   1073742127, 313, 1073742140, 319, 1073742149, 329, 334, 1073742176,  // NOLINT
-  392, 1073744896, 3118, 1073744944, 3166, 1073744992, 3300, 1073745131,  // NOLINT
-  3310, 1073745138, 3315, 1073745152, 3365, 3367, 3373, 1073745200,  // NOLINT
-  3431, 3439, 1073745280, 3478, 1073745312, 3494, 1073745320, 3502,  // NOLINT
-  1073745328, 3510, 1073745336, 3518, 1073745344, 3526, 1073745352, 3534,  // NOLINT
-  1073745360, 3542, 1073745368, 3550, 3631, 1073745925, 4103, 1073745953,  // NOLINT
-  4137, 1073745969, 4149, 1073745976, 4156, 1073745985, 4246, 1073746077,  // NOLINT
-  4255, 1073746081, 4346, 1073746172, 4351, 1073746181, 4397, 1073746225,  // NOLINT
-  4494, 1073746336, 4538, 1073746416, 4607, 1073746944, 8191 };  // NOLINT
+  388, 1073744896, 3118, 1073744944, 3166, 1073744992, 3180, 1073745012,  // NOLINT
+  3191, 1073745024, 3300, 1073745152, 3365, 1073745200, 3429, 3439,  // NOLINT
+  1073745280, 3478, 1073745312, 3494, 1073745320, 3502, 1073745328, 3510,  // NOLINT
+  1073745336, 3518, 1073745344, 3526, 1073745352, 3534, 1073745360, 3542,  // NOLINT
+  1073745368, 3550, 1073745925, 4103, 1073745953, 4137, 1073745969, 4149,  // NOLINT
+  1073745976, 4156, 1073745985, 4246, 1073746077, 4255, 1073746081, 4346,  // NOLINT
+  1073746172, 4351, 1073746181, 4396, 1073746225, 4494, 1073746336, 4535,  // NOLINT
+  1073746416, 4607, 1073746944, 8191 };  // NOLINT
 static const uint16_t kLetterTable2Size = 4;
 static const int32_t kLetterTable2[4] = {
   1073741824, 3509, 1073745408, 8191 };  // NOLINT
@@ -657,31 +601,23 @@
   1073741824, 8191 };  // NOLINT
 static const uint16_t kLetterTable4Size = 2;
 static const int32_t kLetterTable4[2] = {
-  1073741824, 8140 };  // NOLINT
-static const uint16_t kLetterTable5Size = 88;
-static const int32_t kLetterTable5[88] = {
-  1073741824, 1164, 1073743056, 1277, 1073743104, 1548, 1073743376, 1567,  // NOLINT
-  1073743402, 1579, 1073743424, 1646, 1073743487, 1687, 1073743520, 1775,  // NOLINT
-  1073743639, 1823, 1073743650, 1928, 1073743755, 1934, 1073743760, 1939,  // NOLINT
-  1073743776, 1962, 1073743864, 2049, 1073743875, 2053, 1073743879, 2058,  // NOLINT
-  1073743884, 2082, 1073743936, 2163, 1073744002, 2227, 1073744114, 2295,  // NOLINT
-  2299, 1073744138, 2341, 1073744176, 2374, 1073744224, 2428, 1073744260,  // NOLINT
-  2482, 2511, 1073744384, 2600, 1073744448, 2626, 1073744452, 2635,  // NOLINT
-  1073744480, 2678, 2682, 1073744512, 2735, 2737, 1073744565, 2742,  // NOLINT
-  1073744569, 2749, 2752, 2754, 1073744603, 2781, 1073744608, 2794,  // NOLINT
-  1073744626, 2804, 1073744641, 2822, 1073744649, 2830, 1073744657, 2838,  // NOLINT
-  1073744672, 2854, 1073744680, 2862, 1073744832, 3042, 1073744896, 8191 };  // NOLINT
-static const uint16_t kLetterTable6Size = 6;
-static const int32_t kLetterTable6[6] = {
-  1073741824, 6051, 1073747888, 6086, 1073747915, 6139 };  // NOLINT
-static const uint16_t kLetterTable7Size = 48;
-static const int32_t kLetterTable7[48] = {
-  1073748224, 6765, 1073748592, 6873, 1073748736, 6918, 1073748755, 6935,  // NOLINT
-  6941, 1073748767, 6952, 1073748778, 6966, 1073748792, 6972, 6974,  // NOLINT
-  1073748800, 6977, 1073748803, 6980, 1073748806, 7089, 1073748947, 7485,  // NOLINT
-  1073749328, 7567, 1073749394, 7623, 1073749488, 7675, 1073749616, 7796,  // NOLINT
-  1073749622, 7932, 1073749793, 7994, 1073749825, 8026, 1073749862, 8126,  // NOLINT
-  1073749954, 8135, 1073749962, 8143, 1073749970, 8151, 1073749978, 8156 };  // NOLINT
+  1073741824, 8123 };  // NOLINT
+static const uint16_t kLetterTable5Size = 16;
+static const int32_t kLetterTable5[16] = {
+  1073741824, 1164, 1073743639, 1818, 1073743872, 2049, 1073743875, 2053,  // NOLINT
+  1073743879, 2058, 1073743884, 2082, 1073743936, 2163, 1073744896, 8191 };  // NOLINT
+static const uint16_t kLetterTable6Size = 2;
+static const int32_t kLetterTable6[2] = {
+  1073741824, 6051 };  // NOLINT
+static const uint16_t kLetterTable7Size = 50;
+static const int32_t kLetterTable7[50] = {
+  1073748224, 6701, 1073748528, 6762, 1073748592, 6873, 1073748736, 6918,  // NOLINT
+  1073748755, 6935, 6941, 1073748767, 6952, 1073748778, 6966, 1073748792,  // NOLINT
+  6972, 6974, 1073748800, 6977, 1073748803, 6980, 1073748806, 7089,  // NOLINT
+  1073748947, 7485, 1073749328, 7567, 1073749394, 7623, 1073749488, 7675,  // NOLINT
+  1073749616, 7796, 1073749622, 7932, 1073749793, 7994, 1073749825, 8026,  // NOLINT
+  1073749862, 8126, 1073749954, 8135, 1073749962, 8143, 1073749970, 8151,  // NOLINT
+  1073749978, 8156 };  // NOLINT
 bool Letter::Is(uchar c) {
   int chunk_index = c >> 13;
   switch (chunk_index) {
@@ -736,19 +672,14 @@
 
 // Number:               point.category == 'Nd'
 
-static const uint16_t kNumberTable0Size = 56;
-static const int32_t kNumberTable0[56] = {
+static const uint16_t kNumberTable0Size = 44;
+static const int32_t kNumberTable0[44] = {
   1073741872, 57, 1073743456, 1641, 1073743600, 1785, 1073743808, 1993,  // NOLINT
   1073744230, 2415, 1073744358, 2543, 1073744486, 2671, 1073744614, 2799,  // NOLINT
   1073744742, 2927, 1073744870, 3055, 1073744998, 3183, 1073745126, 3311,  // NOLINT
   1073745254, 3439, 1073745488, 3673, 1073745616, 3801, 1073745696, 3881,  // NOLINT
-  1073745984, 4169, 1073746064, 4249, 1073747936, 6121, 1073747984, 6169,  // NOLINT
-  1073748294, 6479, 1073748432, 6617, 1073748608, 6793, 1073748624, 6809,  // NOLINT
-  1073748816, 7001, 1073748912, 7097, 1073749056, 7241, 1073749072, 7257 };  // NOLINT
-static const uint16_t kNumberTable5Size = 12;
-static const int32_t kNumberTable5[12] = {
-  1073743392, 1577, 1073744080, 2265, 1073744128, 2313, 1073744336, 2521,  // NOLINT
-  1073744464, 2649, 1073744880, 3065 };  // NOLINT
+  1073745984, 4169, 1073747936, 6121, 1073747984, 6169, 1073748294, 6479,  // NOLINT
+  1073748432, 6617, 1073748816, 7001 };  // NOLINT
 static const uint16_t kNumberTable7Size = 2;
 static const int32_t kNumberTable7[2] = {
   1073749776, 7961 };  // NOLINT
@@ -758,9 +689,6 @@
     case 0: return LookupPredicate(kNumberTable0,
                                        kNumberTable0Size,
                                        c);
-    case 5: return LookupPredicate(kNumberTable5,
-                                       kNumberTable5Size,
-                                       c);
     case 7: return LookupPredicate(kNumberTable7,
                                        kNumberTable7Size,
                                        c);
@@ -812,56 +740,44 @@
 
 // CombiningMark:        point.category in ['Mn', 'Mc']
 
-static const uint16_t kCombiningMarkTable0Size = 258;
-static const int32_t kCombiningMarkTable0[258] = {
-  1073742592, 879, 1073742979, 1159, 1073743249, 1469, 1471, 1073743297,  // NOLINT
-  1474, 1073743300, 1477, 1479, 1073743376, 1562, 1073743435, 1631,  // NOLINT
+static const uint16_t kCombiningMarkTable0Size = 205;
+static const int32_t kCombiningMarkTable0[205] = {
+  1073742592, 879, 1073742979, 1158, 1073743249, 1469, 1471, 1073743297,  // NOLINT
+  1474, 1073743300, 1477, 1479, 1073743376, 1557, 1073743435, 1630,  // NOLINT
   1648, 1073743574, 1756, 1073743583, 1764, 1073743591, 1768, 1073743594,  // NOLINT
   1773, 1809, 1073743664, 1866, 1073743782, 1968, 1073743851, 2035,  // NOLINT
-  1073743894, 2073, 1073743899, 2083, 1073743909, 2087, 1073743913, 2093,  // NOLINT
-  1073743961, 2139, 1073744100, 2302, 1073744128, 2307, 1073744186, 2364,  // NOLINT
-  1073744190, 2383, 1073744209, 2391, 1073744226, 2403, 1073744257, 2435,  // NOLINT
-  2492, 1073744318, 2500, 1073744327, 2504, 1073744331, 2509, 2519,  // NOLINT
-  1073744354, 2531, 1073744385, 2563, 2620, 1073744446, 2626, 1073744455,  // NOLINT
-  2632, 1073744459, 2637, 2641, 1073744496, 2673, 2677, 1073744513,  // NOLINT
-  2691, 2748, 1073744574, 2757, 1073744583, 2761, 1073744587, 2765,  // NOLINT
-  1073744610, 2787, 1073744641, 2819, 2876, 1073744702, 2884, 1073744711,  // NOLINT
-  2888, 1073744715, 2893, 1073744726, 2903, 1073744738, 2915, 2946,  // NOLINT
-  1073744830, 3010, 1073744838, 3016, 1073744842, 3021, 3031, 1073744897,  // NOLINT
-  3075, 1073744958, 3140, 1073744966, 3144, 1073744970, 3149, 1073744981,  // NOLINT
-  3158, 1073744994, 3171, 1073745026, 3203, 3260, 1073745086, 3268,  // NOLINT
-  1073745094, 3272, 1073745098, 3277, 1073745109, 3286, 1073745122, 3299,  // NOLINT
-  1073745154, 3331, 1073745214, 3396, 1073745222, 3400, 1073745226, 3405,  // NOLINT
-  3415, 1073745250, 3427, 1073745282, 3459, 3530, 1073745359, 3540,  // NOLINT
-  3542, 1073745368, 3551, 1073745394, 3571, 3633, 1073745460, 3642,  // NOLINT
-  1073745479, 3662, 3761, 1073745588, 3769, 1073745595, 3772, 1073745608,  // NOLINT
-  3789, 1073745688, 3865, 3893, 3895, 3897, 1073745726, 3903,  // NOLINT
-  1073745777, 3972, 1073745798, 3975, 1073745805, 3991, 1073745817, 4028,  // NOLINT
-  4038, 1073745963, 4158, 1073746006, 4185, 1073746014, 4192, 1073746018,  // NOLINT
-  4196, 1073746023, 4205, 1073746033, 4212, 1073746050, 4237, 4239,  // NOLINT
-  1073746074, 4253, 1073746781, 4959, 1073747730, 5908, 1073747762, 5940,  // NOLINT
-  1073747794, 5971, 1073747826, 6003, 1073747892, 6099, 6109, 1073747979,  // NOLINT
-  6157, 6313, 1073748256, 6443, 1073748272, 6459, 1073748400, 6592,  // NOLINT
-  1073748424, 6601, 1073748503, 6683, 1073748565, 6750, 1073748576, 6780,  // NOLINT
-  6783, 1073748736, 6916, 1073748788, 6980, 1073748843, 7027, 1073748864,  // NOLINT
-  7042, 1073748897, 7085, 1073748966, 7155, 1073749028, 7223, 1073749200,  // NOLINT
-  7378, 1073749204, 7400, 7405, 1073749234, 7412, 1073749440, 7654,  // NOLINT
-  1073749500, 7679 };  // NOLINT
-static const uint16_t kCombiningMarkTable1Size = 14;
-static const int32_t kCombiningMarkTable1[14] = {
-  1073742032, 220, 225, 1073742053, 240, 1073745135, 3313, 3455,  // NOLINT
-  1073745376, 3583, 1073745962, 4143, 1073746073, 4250 };  // NOLINT
-static const uint16_t kCombiningMarkTable5Size = 47;
-static const int32_t kCombiningMarkTable5[47] = {
-  1647, 1073743476, 1661, 1695, 1073743600, 1777, 2050, 2054,  // NOLINT
-  2059, 1073743907, 2087, 1073744000, 2177, 1073744052, 2244, 1073744096,  // NOLINT
-  2289, 1073744166, 2349, 1073744199, 2387, 1073744256, 2435, 1073744307,  // NOLINT
-  2496, 1073744425, 2614, 2627, 1073744460, 2637, 2683, 2736,  // NOLINT
-  1073744562, 2740, 1073744567, 2744, 1073744574, 2751, 2753, 1073744619,  // NOLINT
-  2799, 1073744629, 2806, 1073744867, 3050, 1073744876, 3053 };  // NOLINT
+  1073744129, 2307, 2364, 1073744190, 2381, 1073744209, 2388, 1073744226,  // NOLINT
+  2403, 1073744257, 2435, 2492, 1073744318, 2500, 1073744327, 2504,  // NOLINT
+  1073744331, 2509, 2519, 1073744354, 2531, 1073744385, 2563, 2620,  // NOLINT
+  1073744446, 2626, 1073744455, 2632, 1073744459, 2637, 1073744496, 2673,  // NOLINT
+  1073744513, 2691, 2748, 1073744574, 2757, 1073744583, 2761, 1073744587,  // NOLINT
+  2765, 1073744610, 2787, 1073744641, 2819, 2876, 1073744702, 2883,  // NOLINT
+  1073744711, 2888, 1073744715, 2893, 1073744726, 2903, 2946, 1073744830,  // NOLINT
+  3010, 1073744838, 3016, 1073744842, 3021, 3031, 1073744897, 3075,  // NOLINT
+  1073744958, 3140, 1073744966, 3144, 1073744970, 3149, 1073744981, 3158,  // NOLINT
+  1073745026, 3203, 3260, 1073745086, 3268, 1073745094, 3272, 1073745098,  // NOLINT
+  3277, 1073745109, 3286, 1073745122, 3299, 1073745154, 3331, 1073745214,  // NOLINT
+  3395, 1073745222, 3400, 1073745226, 3405, 3415, 1073745282, 3459,  // NOLINT
+  3530, 1073745359, 3540, 3542, 1073745368, 3551, 1073745394, 3571,  // NOLINT
+  3633, 1073745460, 3642, 1073745479, 3662, 3761, 1073745588, 3769,  // NOLINT
+  1073745595, 3772, 1073745608, 3789, 1073745688, 3865, 3893, 3895,  // NOLINT
+  3897, 1073745726, 3903, 1073745777, 3972, 1073745798, 3975, 1073745808,  // NOLINT
+  3991, 1073745817, 4028, 4038, 1073745964, 4146, 1073745974, 4153,  // NOLINT
+  1073746006, 4185, 4959, 1073747730, 5908, 1073747762, 5940, 1073747794,  // NOLINT
+  5971, 1073747826, 6003, 1073747894, 6099, 6109, 1073747979, 6157,  // NOLINT
+  6313, 1073748256, 6443, 1073748272, 6459, 1073748400, 6592, 1073748424,  // NOLINT
+  6601, 1073748503, 6683, 1073748736, 6916, 1073748788, 6980, 1073748843,  // NOLINT
+  7027, 1073749440, 7626, 1073749502, 7679 };  // NOLINT
+static const uint16_t kCombiningMarkTable1Size = 9;
+static const int32_t kCombiningMarkTable1[9] = {
+  1073742032, 220, 225, 1073742053, 239, 1073745962, 4143, 1073746073,  // NOLINT
+  4250 };  // NOLINT
+static const uint16_t kCombiningMarkTable5Size = 5;
+static const int32_t kCombiningMarkTable5[5] = {
+  2050, 2054, 2059, 1073743907, 2087 };  // NOLINT
 static const uint16_t kCombiningMarkTable7Size = 5;
 static const int32_t kCombiningMarkTable7[5] = {
-  6942, 1073749504, 7695, 1073749536, 7718 };  // NOLINT
+  6942, 1073749504, 7695, 1073749536, 7715 };  // NOLINT
 bool CombiningMark::Is(uchar c) {
   int chunk_index = c >> 13;
   switch (chunk_index) {
@@ -910,8 +826,8 @@
 
 static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = {  // NOLINT
   {{105, 775}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable0Size = 483;  // NOLINT
-static const int32_t kToLowercaseTable0[966] = {
+static const uint16_t kToLowercaseTable0Size = 463;  // NOLINT
+static const int32_t kToLowercaseTable0[926] = {
   1073741889, 128, 90, 128, 1073742016, 128, 214, 128, 1073742040, 128, 222, 128, 256, 4, 258, 4,  // NOLINT
   260, 4, 262, 4, 264, 4, 266, 4, 268, 4, 270, 4, 272, 4, 274, 4,  // NOLINT
   276, 4, 278, 4, 280, 4, 282, 4, 284, 4, 286, 4, 288, 4, 290, 4,  // NOLINT
@@ -934,24 +850,22 @@
   542, 4, 544, -520, 546, 4, 548, 4, 550, 4, 552, 4, 554, 4, 556, 4,  // NOLINT
   558, 4, 560, 4, 562, 4, 570, 43180, 571, 4, 573, -652, 574, 43168, 577, 4,  // NOLINT
   579, -780, 580, 276, 581, 284, 582, 4, 584, 4, 586, 4, 588, 4, 590, 4,  // NOLINT
-  880, 4, 882, 4, 886, 4, 902, 152, 1073742728, 148, 906, 148, 908, 256, 1073742734, 252,  // NOLINT
-  911, 252, 1073742737, 128, 929, 128, 931, 6, 1073742756, 128, 939, 128, 975, 32, 984, 4,  // NOLINT
-  986, 4, 988, 4, 990, 4, 992, 4, 994, 4, 996, 4, 998, 4, 1000, 4,  // NOLINT
-  1002, 4, 1004, 4, 1006, 4, 1012, -240, 1015, 4, 1017, -28, 1018, 4, 1073742845, -520,  // NOLINT
-  1023, -520, 1073742848, 320, 1039, 320, 1073742864, 128, 1071, 128, 1120, 4, 1122, 4, 1124, 4,  // NOLINT
-  1126, 4, 1128, 4, 1130, 4, 1132, 4, 1134, 4, 1136, 4, 1138, 4, 1140, 4,  // NOLINT
-  1142, 4, 1144, 4, 1146, 4, 1148, 4, 1150, 4, 1152, 4, 1162, 4, 1164, 4,  // NOLINT
-  1166, 4, 1168, 4, 1170, 4, 1172, 4, 1174, 4, 1176, 4, 1178, 4, 1180, 4,  // NOLINT
-  1182, 4, 1184, 4, 1186, 4, 1188, 4, 1190, 4, 1192, 4, 1194, 4, 1196, 4,  // NOLINT
-  1198, 4, 1200, 4, 1202, 4, 1204, 4, 1206, 4, 1208, 4, 1210, 4, 1212, 4,  // NOLINT
-  1214, 4, 1216, 60, 1217, 4, 1219, 4, 1221, 4, 1223, 4, 1225, 4, 1227, 4,  // NOLINT
-  1229, 4, 1232, 4, 1234, 4, 1236, 4, 1238, 4, 1240, 4, 1242, 4, 1244, 4,  // NOLINT
-  1246, 4, 1248, 4, 1250, 4, 1252, 4, 1254, 4, 1256, 4, 1258, 4, 1260, 4,  // NOLINT
-  1262, 4, 1264, 4, 1266, 4, 1268, 4, 1270, 4, 1272, 4, 1274, 4, 1276, 4,  // NOLINT
-  1278, 4, 1280, 4, 1282, 4, 1284, 4, 1286, 4, 1288, 4, 1290, 4, 1292, 4,  // NOLINT
-  1294, 4, 1296, 4, 1298, 4, 1300, 4, 1302, 4, 1304, 4, 1306, 4, 1308, 4,  // NOLINT
-  1310, 4, 1312, 4, 1314, 4, 1316, 4, 1318, 4, 1073743153, 192, 1366, 192, 1073746080, 29056,  // NOLINT
-  4293, 29056, 4295, 29056, 4301, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4,  // NOLINT
+  902, 152, 1073742728, 148, 906, 148, 908, 256, 1073742734, 252, 911, 252, 1073742737, 128, 929, 128,  // NOLINT
+  931, 6, 1073742756, 128, 939, 128, 984, 4, 986, 4, 988, 4, 990, 4, 992, 4,  // NOLINT
+  994, 4, 996, 4, 998, 4, 1000, 4, 1002, 4, 1004, 4, 1006, 4, 1012, -240,  // NOLINT
+  1015, 4, 1017, -28, 1018, 4, 1073742845, -520, 1023, -520, 1073742848, 320, 1039, 320, 1073742864, 128,  // NOLINT
+  1071, 128, 1120, 4, 1122, 4, 1124, 4, 1126, 4, 1128, 4, 1130, 4, 1132, 4,  // NOLINT
+  1134, 4, 1136, 4, 1138, 4, 1140, 4, 1142, 4, 1144, 4, 1146, 4, 1148, 4,  // NOLINT
+  1150, 4, 1152, 4, 1162, 4, 1164, 4, 1166, 4, 1168, 4, 1170, 4, 1172, 4,  // NOLINT
+  1174, 4, 1176, 4, 1178, 4, 1180, 4, 1182, 4, 1184, 4, 1186, 4, 1188, 4,  // NOLINT
+  1190, 4, 1192, 4, 1194, 4, 1196, 4, 1198, 4, 1200, 4, 1202, 4, 1204, 4,  // NOLINT
+  1206, 4, 1208, 4, 1210, 4, 1212, 4, 1214, 4, 1216, 60, 1217, 4, 1219, 4,  // NOLINT
+  1221, 4, 1223, 4, 1225, 4, 1227, 4, 1229, 4, 1232, 4, 1234, 4, 1236, 4,  // NOLINT
+  1238, 4, 1240, 4, 1242, 4, 1244, 4, 1246, 4, 1248, 4, 1250, 4, 1252, 4,  // NOLINT
+  1254, 4, 1256, 4, 1258, 4, 1260, 4, 1262, 4, 1264, 4, 1266, 4, 1268, 4,  // NOLINT
+  1270, 4, 1272, 4, 1274, 4, 1276, 4, 1278, 4, 1280, 4, 1282, 4, 1284, 4,  // NOLINT
+  1286, 4, 1288, 4, 1290, 4, 1292, 4, 1294, 4, 1296, 4, 1298, 4, 1073743153, 192,  // NOLINT
+  1366, 192, 1073746080, 29056, 4293, 29056, 7680, 4, 7682, 4, 7684, 4, 7686, 4, 7688, 4,  // NOLINT
   7690, 4, 7692, 4, 7694, 4, 7696, 4, 7698, 4, 7700, 4, 7702, 4, 7704, 4,  // NOLINT
   7706, 4, 7708, 4, 7710, 4, 7712, 4, 7714, 4, 7716, 4, 7718, 4, 7720, 4,  // NOLINT
   7722, 4, 7724, 4, 7726, 4, 7728, 4, 7730, 4, 7732, 4, 7734, 4, 7736, 4,  // NOLINT
@@ -960,52 +874,33 @@
   7770, 4, 7772, 4, 7774, 4, 7776, 4, 7778, 4, 7780, 4, 7782, 4, 7784, 4,  // NOLINT
   7786, 4, 7788, 4, 7790, 4, 7792, 4, 7794, 4, 7796, 4, 7798, 4, 7800, 4,  // NOLINT
   7802, 4, 7804, 4, 7806, 4, 7808, 4, 7810, 4, 7812, 4, 7814, 4, 7816, 4,  // NOLINT
-  7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7838, -30460, 7840, 4,  // NOLINT
-  7842, 4, 7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4,  // NOLINT
-  7858, 4, 7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4,  // NOLINT
-  7874, 4, 7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4,  // NOLINT
-  7890, 4, 7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4,  // NOLINT
-  7906, 4, 7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4,  // NOLINT
-  7922, 4, 7924, 4, 7926, 4, 7928, 4, 7930, 4, 7932, 4, 7934, 4, 1073749768, -32,  // NOLINT
-  7951, -32, 1073749784, -32, 7965, -32, 1073749800, -32, 7983, -32, 1073749816, -32, 7999, -32, 1073749832, -32,  // NOLINT
-  8013, -32, 8025, -32, 8027, -32, 8029, -32, 8031, -32, 1073749864, -32, 8047, -32, 1073749896, -32,  // NOLINT
-  8079, -32, 1073749912, -32, 8095, -32, 1073749928, -32, 8111, -32, 1073749944, -32, 8121, -32, 1073749946, -296,  // NOLINT
-  8123, -296, 8124, -36, 1073749960, -344, 8139, -344, 8140, -36, 1073749976, -32, 8153, -32, 1073749978, -400,  // NOLINT
-  8155, -400, 1073749992, -32, 8169, -32, 1073749994, -448, 8171, -448, 8172, -28, 1073750008, -512, 8185, -512,  // NOLINT
-  1073750010, -504, 8187, -504, 8188, -36 };  // NOLINT
+  7818, 4, 7820, 4, 7822, 4, 7824, 4, 7826, 4, 7828, 4, 7840, 4, 7842, 4,  // NOLINT
+  7844, 4, 7846, 4, 7848, 4, 7850, 4, 7852, 4, 7854, 4, 7856, 4, 7858, 4,  // NOLINT
+  7860, 4, 7862, 4, 7864, 4, 7866, 4, 7868, 4, 7870, 4, 7872, 4, 7874, 4,  // NOLINT
+  7876, 4, 7878, 4, 7880, 4, 7882, 4, 7884, 4, 7886, 4, 7888, 4, 7890, 4,  // NOLINT
+  7892, 4, 7894, 4, 7896, 4, 7898, 4, 7900, 4, 7902, 4, 7904, 4, 7906, 4,  // NOLINT
+  7908, 4, 7910, 4, 7912, 4, 7914, 4, 7916, 4, 7918, 4, 7920, 4, 7922, 4,  // NOLINT
+  7924, 4, 7926, 4, 7928, 4, 1073749768, -32, 7951, -32, 1073749784, -32, 7965, -32, 1073749800, -32,  // NOLINT
+  7983, -32, 1073749816, -32, 7999, -32, 1073749832, -32, 8013, -32, 8025, -32, 8027, -32, 8029, -32,  // NOLINT
+  8031, -32, 1073749864, -32, 8047, -32, 1073749896, -32, 8079, -32, 1073749912, -32, 8095, -32, 1073749928, -32,  // NOLINT
+  8111, -32, 1073749944, -32, 8121, -32, 1073749946, -296, 8123, -296, 8124, -36, 1073749960, -344, 8139, -344,  // NOLINT
+  8140, -36, 1073749976, -32, 8153, -32, 1073749978, -400, 8155, -400, 1073749992, -32, 8169, -32, 1073749994, -448,  // NOLINT
+  8171, -448, 8172, -28, 1073750008, -512, 8185, -512, 1073750010, -504, 8187, -504, 8188, -36 };  // NOLINT
 static const uint16_t kToLowercaseMultiStrings0Size = 2;  // NOLINT
 static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable1Size = 79;  // NOLINT
-static const int32_t kToLowercaseTable1[158] = {
+static const uint16_t kToLowercaseTable1Size = 69;  // NOLINT
+static const int32_t kToLowercaseTable1[138] = {
   294, -30068, 298, -33532, 299, -33048, 306, 112, 1073742176, 64, 367, 64, 387, 4, 1073743030, 104,  // NOLINT
   1231, 104, 1073744896, 192, 3118, 192, 3168, 4, 3170, -42972, 3171, -15256, 3172, -42908, 3175, 4,  // NOLINT
-  3177, 4, 3179, 4, 3181, -43120, 3182, -42996, 3183, -43132, 3184, -43128, 3186, 4, 3189, 4,  // NOLINT
-  1073745022, -43260, 3199, -43260, 3200, 4, 3202, 4, 3204, 4, 3206, 4, 3208, 4, 3210, 4,  // NOLINT
-  3212, 4, 3214, 4, 3216, 4, 3218, 4, 3220, 4, 3222, 4, 3224, 4, 3226, 4,  // NOLINT
-  3228, 4, 3230, 4, 3232, 4, 3234, 4, 3236, 4, 3238, 4, 3240, 4, 3242, 4,  // NOLINT
-  3244, 4, 3246, 4, 3248, 4, 3250, 4, 3252, 4, 3254, 4, 3256, 4, 3258, 4,  // NOLINT
-  3260, 4, 3262, 4, 3264, 4, 3266, 4, 3268, 4, 3270, 4, 3272, 4, 3274, 4,  // NOLINT
-  3276, 4, 3278, 4, 3280, 4, 3282, 4, 3284, 4, 3286, 4, 3288, 4, 3290, 4,  // NOLINT
-  3292, 4, 3294, 4, 3296, 4, 3298, 4, 3307, 4, 3309, 4, 3314, 4 };  // NOLINT
+  3177, 4, 3179, 4, 3189, 4, 3200, 4, 3202, 4, 3204, 4, 3206, 4, 3208, 4,  // NOLINT
+  3210, 4, 3212, 4, 3214, 4, 3216, 4, 3218, 4, 3220, 4, 3222, 4, 3224, 4,  // NOLINT
+  3226, 4, 3228, 4, 3230, 4, 3232, 4, 3234, 4, 3236, 4, 3238, 4, 3240, 4,  // NOLINT
+  3242, 4, 3244, 4, 3246, 4, 3248, 4, 3250, 4, 3252, 4, 3254, 4, 3256, 4,  // NOLINT
+  3258, 4, 3260, 4, 3262, 4, 3264, 4, 3266, 4, 3268, 4, 3270, 4, 3272, 4,  // NOLINT
+  3274, 4, 3276, 4, 3278, 4, 3280, 4, 3282, 4, 3284, 4, 3286, 4, 3288, 4,  // NOLINT
+  3290, 4, 3292, 4, 3294, 4, 3296, 4, 3298, 4 };  // NOLINT
 static const uint16_t kToLowercaseMultiStrings1Size = 1;  // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings5[1] = {  // NOLINT
-  {{kSentinel}} }; // NOLINT
-static const uint16_t kToLowercaseTable5Size = 91;  // NOLINT
-static const int32_t kToLowercaseTable5[182] = {
-  1600, 4, 1602, 4, 1604, 4, 1606, 4, 1608, 4, 1610, 4, 1612, 4, 1614, 4,  // NOLINT
-  1616, 4, 1618, 4, 1620, 4, 1622, 4, 1624, 4, 1626, 4, 1628, 4, 1630, 4,  // NOLINT
-  1632, 4, 1634, 4, 1636, 4, 1638, 4, 1640, 4, 1642, 4, 1644, 4, 1664, 4,  // NOLINT
-  1666, 4, 1668, 4, 1670, 4, 1672, 4, 1674, 4, 1676, 4, 1678, 4, 1680, 4,  // NOLINT
-  1682, 4, 1684, 4, 1686, 4, 1826, 4, 1828, 4, 1830, 4, 1832, 4, 1834, 4,  // NOLINT
-  1836, 4, 1838, 4, 1842, 4, 1844, 4, 1846, 4, 1848, 4, 1850, 4, 1852, 4,  // NOLINT
-  1854, 4, 1856, 4, 1858, 4, 1860, 4, 1862, 4, 1864, 4, 1866, 4, 1868, 4,  // NOLINT
-  1870, 4, 1872, 4, 1874, 4, 1876, 4, 1878, 4, 1880, 4, 1882, 4, 1884, 4,  // NOLINT
-  1886, 4, 1888, 4, 1890, 4, 1892, 4, 1894, 4, 1896, 4, 1898, 4, 1900, 4,  // NOLINT
-  1902, 4, 1913, 4, 1915, 4, 1917, -141328, 1918, 4, 1920, 4, 1922, 4, 1924, 4,  // NOLINT
-  1926, 4, 1931, 4, 1933, -169120, 1936, 4, 1938, 4, 1952, 4, 1954, 4, 1956, 4,  // NOLINT
-  1958, 4, 1960, 4, 1962, -169232 };  // NOLINT
-static const uint16_t kToLowercaseMultiStrings5Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
 static const uint16_t kToLowercaseTable7Size = 2;  // NOLINT
@@ -1032,13 +927,6 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
-    case 5: return LookupMapping<true>(kToLowercaseTable5,
-                                           kToLowercaseTable5Size,
-                                           kToLowercaseMultiStrings5,
-                                           c,
-                                           n,
-                                           result,
-                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kToLowercaseTable7,
                                            kToLowercaseTable7Size,
                                            kToLowercaseMultiStrings7,
@@ -1067,8 +955,8 @@
   {{933, 776, 768}}, {{929, 787, kSentinel}}, {{933, 834, kSentinel}}, {{933, 776, 834}},  // NOLINT
   {{8186, 921, kSentinel}}, {{937, 921, kSentinel}}, {{911, 921, kSentinel}}, {{937, 834, kSentinel}},  // NOLINT
   {{937, 834, 921}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable0Size = 580;  // NOLINT
-static const int32_t kToUppercaseTable0[1160] = {
+static const uint16_t kToUppercaseTable0Size = 554;  // NOLINT
+static const int32_t kToUppercaseTable0[1108] = {
   1073741921, -128, 122, -128, 181, 2972, 223, 1, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128,  // NOLINT
   255, 484, 257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4,  // NOLINT
   271, -4, 273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4,  // NOLINT
@@ -1088,92 +976,72 @@
   517, -4, 519, -4, 521, -4, 523, -4, 525, -4, 527, -4, 529, -4, 531, -4,  // NOLINT
   533, -4, 535, -4, 537, -4, 539, -4, 541, -4, 543, -4, 547, -4, 549, -4,  // NOLINT
   551, -4, 553, -4, 555, -4, 557, -4, 559, -4, 561, -4, 563, -4, 572, -4,  // NOLINT
-  1073742399, 43260, 576, 43260, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4,  // NOLINT
-  592, 43132, 593, 43120, 594, 43128, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808,  // NOLINT
-  603, -812, 608, -820, 611, -828, 613, 169120, 614, 169232, 616, -836, 617, -844, 619, 42972,  // NOLINT
-  623, -844, 625, 42996, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872,  // NOLINT
-  649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 881, -4, 883, -4,  // NOLINT
-  887, -4, 1073742715, 520, 893, 520, 912, 13, 940, -152, 1073742765, -148, 943, -148, 944, 17,  // NOLINT
-  1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252,  // NOLINT
-  976, -248, 977, -228, 981, -188, 982, -216, 983, -32, 985, -4, 987, -4, 989, -4,  // NOLINT
-  991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4,  // NOLINT
-  1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128,  // NOLINT
-  1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4,  // NOLINT
-  1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4,  // NOLINT
-  1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4,  // NOLINT
-  1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4,  // NOLINT
-  1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4,  // NOLINT
-  1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4,  // NOLINT
-  1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4,  // NOLINT
-  1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4,  // NOLINT
-  1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4,  // NOLINT
-  1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4,  // NOLINT
-  1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4,  // NOLINT
-  1299, -4, 1301, -4, 1303, -4, 1305, -4, 1307, -4, 1309, -4, 1311, -4, 1313, -4,  // NOLINT
-  1315, -4, 1317, -4, 1319, -4, 1073743201, -192, 1414, -192, 1415, 21, 7545, 141328, 7549, 15256,  // NOLINT
-  7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4,  // NOLINT
-  7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4,  // NOLINT
-  7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4,  // NOLINT
-  7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4,  // NOLINT
-  7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4,  // NOLINT
-  7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4,  // NOLINT
-  7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4,  // NOLINT
-  7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4,  // NOLINT
-  7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4,  // NOLINT
-  7825, -4, 7827, -4, 7829, -4, 7830, 25, 7831, 29, 7832, 33, 7833, 37, 7834, 41,  // NOLINT
-  7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4, 7853, -4,  // NOLINT
-  7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4, 7869, -4,  // NOLINT
-  7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4, 7885, -4,  // NOLINT
-  7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4, 7901, -4,  // NOLINT
-  7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4, 7917, -4,  // NOLINT
-  7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 7931, -4, 7933, -4,  // NOLINT
-  7935, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32,  // NOLINT
-  7991, 32, 1073749824, 32, 8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53,  // NOLINT
-  8021, 32, 8022, 57, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344,  // NOLINT
-  8053, 344, 1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504,  // NOLINT
-  8061, 504, 8064, 61, 8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85,  // NOLINT
-  8071, 89, 8072, 61, 8073, 65, 8074, 69, 8075, 73, 8076, 77, 8077, 81, 8078, 85,  // NOLINT
-  8079, 89, 8080, 93, 8081, 97, 8082, 101, 8083, 105, 8084, 109, 8085, 113, 8086, 117,  // NOLINT
-  8087, 121, 8088, 93, 8089, 97, 8090, 101, 8091, 105, 8092, 109, 8093, 113, 8094, 117,  // NOLINT
-  8095, 121, 8096, 125, 8097, 129, 8098, 133, 8099, 137, 8100, 141, 8101, 145, 8102, 149,  // NOLINT
-  8103, 153, 8104, 125, 8105, 129, 8106, 133, 8107, 137, 8108, 141, 8109, 145, 8110, 149,  // NOLINT
-  8111, 153, 1073749936, 32, 8113, 32, 8114, 157, 8115, 161, 8116, 165, 8118, 169, 8119, 173,  // NOLINT
-  8124, 161, 8126, -28820, 8130, 177, 8131, 181, 8132, 185, 8134, 189, 8135, 193, 8140, 181,  // NOLINT
-  1073749968, 32, 8145, 32, 8146, 197, 8147, 13, 8150, 201, 8151, 205, 1073749984, 32, 8161, 32,  // NOLINT
-  8162, 209, 8163, 17, 8164, 213, 8165, 28, 8166, 217, 8167, 221, 8178, 225, 8179, 229,  // NOLINT
-  8180, 233, 8182, 237, 8183, 241, 8188, 229 };  // NOLINT
+  578, -4, 583, -4, 585, -4, 587, -4, 589, -4, 591, -4, 595, -840, 596, -824,  // NOLINT
+  1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 616, -836, 617, -844,  // NOLINT
+  619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908, 640, -872, 643, -872, 648, -872,  // NOLINT
+  649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876, 837, 336, 1073742715, 520, 893, 520,  // NOLINT
+  912, 13, 940, -152, 1073742765, -148, 943, -148, 944, 17, 1073742769, -128, 961, -128, 962, -124,  // NOLINT
+  1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228, 981, -188,  // NOLINT
+  982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4, 997, -4,  // NOLINT
+  999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28,  // NOLINT
+  1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4,  // NOLINT
+  1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4,  // NOLINT
+  1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4,  // NOLINT
+  1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4,  // NOLINT
+  1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4,  // NOLINT
+  1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4,  // NOLINT
+  1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4,  // NOLINT
+  1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4,  // NOLINT
+  1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4,  // NOLINT
+  1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4,  // NOLINT
+  1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4,  // NOLINT
+  1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192, 1415, 21,  // NOLINT
+  7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4,  // NOLINT
+  7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4,  // NOLINT
+  7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4,  // NOLINT
+  7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4,  // NOLINT
+  7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4,  // NOLINT
+  7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4,  // NOLINT
+  7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4,  // NOLINT
+  7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4,  // NOLINT
+  7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4,  // NOLINT
+  7823, -4, 7825, -4, 7827, -4, 7829, -4, 7830, 25, 7831, 29, 7832, 33, 7833, 37,  // NOLINT
+  7834, 41, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4, 7849, -4, 7851, -4,  // NOLINT
+  7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4, 7865, -4, 7867, -4,  // NOLINT
+  7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4, 7881, -4, 7883, -4,  // NOLINT
+  7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4, 7897, -4, 7899, -4,  // NOLINT
+  7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4, 7913, -4, 7915, -4,  // NOLINT
+  7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4, 7929, -4, 1073749760, 32,  // NOLINT
+  7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32,  // NOLINT
+  8005, 32, 8016, 45, 8017, 32, 8018, 49, 8019, 32, 8020, 53, 8021, 32, 8022, 57,  // NOLINT
+  8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400,  // NOLINT
+  8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 8064, 61,  // NOLINT
+  8065, 65, 8066, 69, 8067, 73, 8068, 77, 8069, 81, 8070, 85, 8071, 89, 8072, 61,  // NOLINT
+  8073, 65, 8074, 69, 8075, 73, 8076, 77, 8077, 81, 8078, 85, 8079, 89, 8080, 93,  // NOLINT
+  8081, 97, 8082, 101, 8083, 105, 8084, 109, 8085, 113, 8086, 117, 8087, 121, 8088, 93,  // NOLINT
+  8089, 97, 8090, 101, 8091, 105, 8092, 109, 8093, 113, 8094, 117, 8095, 121, 8096, 125,  // NOLINT
+  8097, 129, 8098, 133, 8099, 137, 8100, 141, 8101, 145, 8102, 149, 8103, 153, 8104, 125,  // NOLINT
+  8105, 129, 8106, 133, 8107, 137, 8108, 141, 8109, 145, 8110, 149, 8111, 153, 1073749936, 32,  // NOLINT
+  8113, 32, 8114, 157, 8115, 161, 8116, 165, 8118, 169, 8119, 173, 8124, 161, 8126, -28820,  // NOLINT
+  8130, 177, 8131, 181, 8132, 185, 8134, 189, 8135, 193, 8140, 181, 1073749968, 32, 8145, 32,  // NOLINT
+  8146, 197, 8147, 13, 8150, 201, 8151, 205, 1073749984, 32, 8161, 32, 8162, 209, 8163, 17,  // NOLINT
+  8164, 213, 8165, 28, 8166, 217, 8167, 221, 8178, 225, 8179, 229, 8180, 233, 8182, 237,  // NOLINT
+  8183, 241, 8188, 229 };  // NOLINT
 static const uint16_t kToUppercaseMultiStrings0Size = 62;  // NOLINT
 static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable1Size = 73;  // NOLINT
-static const int32_t kToUppercaseTable1[146] = {
+static const uint16_t kToUppercaseTable1Size = 67;  // NOLINT
+static const int32_t kToUppercaseTable1[134] = {
   334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192,  // NOLINT
-  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4,  // NOLINT
-  3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4,  // NOLINT
-  3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4,  // NOLINT
-  3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4,  // NOLINT
-  3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4,  // NOLINT
-  3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4,  // NOLINT
-  3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4,  // NOLINT
-  3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056,  // NOLINT
-  3373, -29056 };  // NOLINT
+  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3190, -4, 3201, -4,  // NOLINT
+  3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4,  // NOLINT
+  3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, 3233, -4,  // NOLINT
+  3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, 3249, -4,  // NOLINT
+  3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, 3265, -4,  // NOLINT
+  3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, 3281, -4,  // NOLINT
+  3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4,  // NOLINT
+  3299, -4, 1073745152, -29056, 3365, -29056 };  // NOLINT
 static const uint16_t kToUppercaseMultiStrings1Size = 1;  // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings5[1] = {  // NOLINT
-  {{kSentinel}} }; // NOLINT
-static const uint16_t kToUppercaseTable5Size = 88;  // NOLINT
-static const int32_t kToUppercaseTable5[176] = {
-  1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4, 1615, -4,  // NOLINT
-  1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4, 1629, -4, 1631, -4,  // NOLINT
-  1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4, 1643, -4, 1645, -4, 1665, -4,  // NOLINT
-  1667, -4, 1669, -4, 1671, -4, 1673, -4, 1675, -4, 1677, -4, 1679, -4, 1681, -4,  // NOLINT
-  1683, -4, 1685, -4, 1687, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4,  // NOLINT
-  1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4, 1853, -4,  // NOLINT
-  1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4, 1867, -4, 1869, -4,  // NOLINT
-  1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4, 1881, -4, 1883, -4, 1885, -4,  // NOLINT
-  1887, -4, 1889, -4, 1891, -4, 1893, -4, 1895, -4, 1897, -4, 1899, -4, 1901, -4,  // NOLINT
-  1903, -4, 1914, -4, 1916, -4, 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4,  // NOLINT
-  1932, -4, 1937, -4, 1939, -4, 1953, -4, 1955, -4, 1957, -4, 1959, -4, 1961, -4 };  // NOLINT
-static const uint16_t kToUppercaseMultiStrings5Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] = {  // NOLINT
   {{70, 70, kSentinel}}, {{70, 73, kSentinel}}, {{70, 76, kSentinel}}, {{70, 70, 73}},  // NOLINT
   {{70, 70, 76}}, {{83, 84, kSentinel}}, {{1348, 1350, kSentinel}}, {{1348, 1333, kSentinel}},  // NOLINT
@@ -1203,13 +1071,6 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
-    case 5: return LookupMapping<true>(kToUppercaseTable5,
-                                           kToUppercaseTable5Size,
-                                           kToUppercaseMultiStrings5,
-                                           c,
-                                           n,
-                                           result,
-                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kToUppercaseTable7,
                                            kToUppercaseTable7Size,
                                            kToUppercaseMultiStrings7,
@@ -1223,8 +1084,8 @@
 
 static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable0Size = 488;  // NOLINT
-static const int32_t kEcma262CanonicalizeTable0[976] = {
+static const uint16_t kEcma262CanonicalizeTable0Size = 462;  // NOLINT
+static const int32_t kEcma262CanonicalizeTable0[924] = {
   1073741921, -128, 122, -128, 181, 2972, 1073742048, -128, 246, -128, 1073742072, -128, 254, -128, 255, 484,  // NOLINT
   257, -4, 259, -4, 261, -4, 263, -4, 265, -4, 267, -4, 269, -4, 271, -4,  // NOLINT
   273, -4, 275, -4, 277, -4, 279, -4, 281, -4, 283, -4, 285, -4, 287, -4,  // NOLINT
@@ -1243,81 +1104,61 @@
   511, -4, 513, -4, 515, -4, 517, -4, 519, -4, 521, -4, 523, -4, 525, -4,  // NOLINT
   527, -4, 529, -4, 531, -4, 533, -4, 535, -4, 537, -4, 539, -4, 541, -4,  // NOLINT
   543, -4, 547, -4, 549, -4, 551, -4, 553, -4, 555, -4, 557, -4, 559, -4,  // NOLINT
-  561, -4, 563, -4, 572, -4, 1073742399, 43260, 576, 43260, 578, -4, 583, -4, 585, -4,  // NOLINT
-  587, -4, 589, -4, 591, -4, 592, 43132, 593, 43120, 594, 43128, 595, -840, 596, -824,  // NOLINT
-  1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820, 611, -828, 613, 169120, 614, 169232,  // NOLINT
-  616, -836, 617, -844, 619, 42972, 623, -844, 625, 42996, 626, -852, 629, -856, 637, 42908,  // NOLINT
+  561, -4, 563, -4, 572, -4, 578, -4, 583, -4, 585, -4, 587, -4, 589, -4,  // NOLINT
+  591, -4, 595, -840, 596, -824, 1073742422, -820, 599, -820, 601, -808, 603, -812, 608, -820,  // NOLINT
+  611, -828, 616, -836, 617, -844, 619, 42972, 623, -844, 626, -852, 629, -856, 637, 42908,  // NOLINT
   640, -872, 643, -872, 648, -872, 649, -276, 1073742474, -868, 651, -868, 652, -284, 658, -876,  // NOLINT
-  837, 336, 881, -4, 883, -4, 887, -4, 1073742715, 520, 893, 520, 940, -152, 1073742765, -148,  // NOLINT
-  943, -148, 1073742769, -128, 961, -128, 962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252,  // NOLINT
-  974, -252, 976, -248, 977, -228, 981, -188, 982, -216, 983, -32, 985, -4, 987, -4,  // NOLINT
-  989, -4, 991, -4, 993, -4, 995, -4, 997, -4, 999, -4, 1001, -4, 1003, -4,  // NOLINT
-  1005, -4, 1007, -4, 1008, -344, 1009, -320, 1010, 28, 1013, -384, 1016, -4, 1019, -4,  // NOLINT
-  1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320, 1121, -4, 1123, -4, 1125, -4, 1127, -4,  // NOLINT
-  1129, -4, 1131, -4, 1133, -4, 1135, -4, 1137, -4, 1139, -4, 1141, -4, 1143, -4,  // NOLINT
-  1145, -4, 1147, -4, 1149, -4, 1151, -4, 1153, -4, 1163, -4, 1165, -4, 1167, -4,  // NOLINT
-  1169, -4, 1171, -4, 1173, -4, 1175, -4, 1177, -4, 1179, -4, 1181, -4, 1183, -4,  // NOLINT
-  1185, -4, 1187, -4, 1189, -4, 1191, -4, 1193, -4, 1195, -4, 1197, -4, 1199, -4,  // NOLINT
-  1201, -4, 1203, -4, 1205, -4, 1207, -4, 1209, -4, 1211, -4, 1213, -4, 1215, -4,  // NOLINT
-  1218, -4, 1220, -4, 1222, -4, 1224, -4, 1226, -4, 1228, -4, 1230, -4, 1231, -60,  // NOLINT
-  1233, -4, 1235, -4, 1237, -4, 1239, -4, 1241, -4, 1243, -4, 1245, -4, 1247, -4,  // NOLINT
-  1249, -4, 1251, -4, 1253, -4, 1255, -4, 1257, -4, 1259, -4, 1261, -4, 1263, -4,  // NOLINT
-  1265, -4, 1267, -4, 1269, -4, 1271, -4, 1273, -4, 1275, -4, 1277, -4, 1279, -4,  // NOLINT
-  1281, -4, 1283, -4, 1285, -4, 1287, -4, 1289, -4, 1291, -4, 1293, -4, 1295, -4,  // NOLINT
-  1297, -4, 1299, -4, 1301, -4, 1303, -4, 1305, -4, 1307, -4, 1309, -4, 1311, -4,  // NOLINT
-  1313, -4, 1315, -4, 1317, -4, 1319, -4, 1073743201, -192, 1414, -192, 7545, 141328, 7549, 15256,  // NOLINT
-  7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4, 7695, -4,  // NOLINT
-  7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4, 7711, -4,  // NOLINT
-  7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4, 7727, -4,  // NOLINT
-  7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4, 7743, -4,  // NOLINT
-  7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4, 7759, -4,  // NOLINT
-  7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4, 7775, -4,  // NOLINT
-  7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4, 7791, -4,  // NOLINT
-  7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4, 7807, -4,  // NOLINT
-  7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4, 7823, -4,  // NOLINT
-  7825, -4, 7827, -4, 7829, -4, 7835, -236, 7841, -4, 7843, -4, 7845, -4, 7847, -4,  // NOLINT
-  7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4, 7863, -4,  // NOLINT
-  7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4, 7879, -4,  // NOLINT
-  7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4, 7895, -4,  // NOLINT
-  7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4, 7911, -4,  // NOLINT
-  7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4, 7927, -4,  // NOLINT
-  7929, -4, 7931, -4, 7933, -4, 7935, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32,  // NOLINT
-  1073749792, 32, 7975, 32, 1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8017, 32, 8019, 32,  // NOLINT
-  8021, 32, 8023, 32, 1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344,  // NOLINT
-  1073749878, 400, 8055, 400, 1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504,  // NOLINT
-  1073749936, 32, 8113, 32, 8126, -28820, 1073749968, 32, 8145, 32, 1073749984, 32, 8161, 32, 8165, 28 };  // NOLINT
+  837, 336, 1073742715, 520, 893, 520, 940, -152, 1073742765, -148, 943, -148, 1073742769, -128, 961, -128,  // NOLINT
+  962, -124, 1073742787, -128, 971, -128, 972, -256, 1073742797, -252, 974, -252, 976, -248, 977, -228,  // NOLINT
+  981, -188, 982, -216, 985, -4, 987, -4, 989, -4, 991, -4, 993, -4, 995, -4,  // NOLINT
+  997, -4, 999, -4, 1001, -4, 1003, -4, 1005, -4, 1007, -4, 1008, -344, 1009, -320,  // NOLINT
+  1010, 28, 1013, -384, 1016, -4, 1019, -4, 1073742896, -128, 1103, -128, 1073742928, -320, 1119, -320,  // NOLINT
+  1121, -4, 1123, -4, 1125, -4, 1127, -4, 1129, -4, 1131, -4, 1133, -4, 1135, -4,  // NOLINT
+  1137, -4, 1139, -4, 1141, -4, 1143, -4, 1145, -4, 1147, -4, 1149, -4, 1151, -4,  // NOLINT
+  1153, -4, 1163, -4, 1165, -4, 1167, -4, 1169, -4, 1171, -4, 1173, -4, 1175, -4,  // NOLINT
+  1177, -4, 1179, -4, 1181, -4, 1183, -4, 1185, -4, 1187, -4, 1189, -4, 1191, -4,  // NOLINT
+  1193, -4, 1195, -4, 1197, -4, 1199, -4, 1201, -4, 1203, -4, 1205, -4, 1207, -4,  // NOLINT
+  1209, -4, 1211, -4, 1213, -4, 1215, -4, 1218, -4, 1220, -4, 1222, -4, 1224, -4,  // NOLINT
+  1226, -4, 1228, -4, 1230, -4, 1231, -60, 1233, -4, 1235, -4, 1237, -4, 1239, -4,  // NOLINT
+  1241, -4, 1243, -4, 1245, -4, 1247, -4, 1249, -4, 1251, -4, 1253, -4, 1255, -4,  // NOLINT
+  1257, -4, 1259, -4, 1261, -4, 1263, -4, 1265, -4, 1267, -4, 1269, -4, 1271, -4,  // NOLINT
+  1273, -4, 1275, -4, 1277, -4, 1279, -4, 1281, -4, 1283, -4, 1285, -4, 1287, -4,  // NOLINT
+  1289, -4, 1291, -4, 1293, -4, 1295, -4, 1297, -4, 1299, -4, 1073743201, -192, 1414, -192,  // NOLINT
+  7549, 15256, 7681, -4, 7683, -4, 7685, -4, 7687, -4, 7689, -4, 7691, -4, 7693, -4,  // NOLINT
+  7695, -4, 7697, -4, 7699, -4, 7701, -4, 7703, -4, 7705, -4, 7707, -4, 7709, -4,  // NOLINT
+  7711, -4, 7713, -4, 7715, -4, 7717, -4, 7719, -4, 7721, -4, 7723, -4, 7725, -4,  // NOLINT
+  7727, -4, 7729, -4, 7731, -4, 7733, -4, 7735, -4, 7737, -4, 7739, -4, 7741, -4,  // NOLINT
+  7743, -4, 7745, -4, 7747, -4, 7749, -4, 7751, -4, 7753, -4, 7755, -4, 7757, -4,  // NOLINT
+  7759, -4, 7761, -4, 7763, -4, 7765, -4, 7767, -4, 7769, -4, 7771, -4, 7773, -4,  // NOLINT
+  7775, -4, 7777, -4, 7779, -4, 7781, -4, 7783, -4, 7785, -4, 7787, -4, 7789, -4,  // NOLINT
+  7791, -4, 7793, -4, 7795, -4, 7797, -4, 7799, -4, 7801, -4, 7803, -4, 7805, -4,  // NOLINT
+  7807, -4, 7809, -4, 7811, -4, 7813, -4, 7815, -4, 7817, -4, 7819, -4, 7821, -4,  // NOLINT
+  7823, -4, 7825, -4, 7827, -4, 7829, -4, 7835, -236, 7841, -4, 7843, -4, 7845, -4,  // NOLINT
+  7847, -4, 7849, -4, 7851, -4, 7853, -4, 7855, -4, 7857, -4, 7859, -4, 7861, -4,  // NOLINT
+  7863, -4, 7865, -4, 7867, -4, 7869, -4, 7871, -4, 7873, -4, 7875, -4, 7877, -4,  // NOLINT
+  7879, -4, 7881, -4, 7883, -4, 7885, -4, 7887, -4, 7889, -4, 7891, -4, 7893, -4,  // NOLINT
+  7895, -4, 7897, -4, 7899, -4, 7901, -4, 7903, -4, 7905, -4, 7907, -4, 7909, -4,  // NOLINT
+  7911, -4, 7913, -4, 7915, -4, 7917, -4, 7919, -4, 7921, -4, 7923, -4, 7925, -4,  // NOLINT
+  7927, -4, 7929, -4, 1073749760, 32, 7943, 32, 1073749776, 32, 7957, 32, 1073749792, 32, 7975, 32,  // NOLINT
+  1073749808, 32, 7991, 32, 1073749824, 32, 8005, 32, 8017, 32, 8019, 32, 8021, 32, 8023, 32,  // NOLINT
+  1073749856, 32, 8039, 32, 1073749872, 296, 8049, 296, 1073749874, 344, 8053, 344, 1073749878, 400, 8055, 400,  // NOLINT
+  1073749880, 512, 8057, 512, 1073749882, 448, 8059, 448, 1073749884, 504, 8061, 504, 1073749936, 32, 8113, 32,  // NOLINT
+  8126, -28820, 1073749968, 32, 8145, 32, 1073749984, 32, 8161, 32, 8165, 28 };  // NOLINT
 static const uint16_t kEcma262CanonicalizeMultiStrings0Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable1Size = 73;  // NOLINT
-static const int32_t kEcma262CanonicalizeTable1[146] = {
+static const uint16_t kEcma262CanonicalizeTable1Size = 67;  // NOLINT
+static const int32_t kEcma262CanonicalizeTable1[134] = {
   334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104, 1257, -104, 1073744944, -192, 3166, -192,  // NOLINT
-  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4, 3190, -4,  // NOLINT
-  3201, -4, 3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4,  // NOLINT
-  3217, -4, 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4,  // NOLINT
-  3233, -4, 3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4,  // NOLINT
-  3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4,  // NOLINT
-  3265, -4, 3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4,  // NOLINT
-  3281, -4, 3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4,  // NOLINT
-  3297, -4, 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056, 3365, -29056, 3367, -29056,  // NOLINT
-  3373, -29056 };  // NOLINT
+  3169, -4, 3173, -43180, 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3190, -4, 3201, -4,  // NOLINT
+  3203, -4, 3205, -4, 3207, -4, 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4,  // NOLINT
+  3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4, 3229, -4, 3231, -4, 3233, -4,  // NOLINT
+  3235, -4, 3237, -4, 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4, 3249, -4,  // NOLINT
+  3251, -4, 3253, -4, 3255, -4, 3257, -4, 3259, -4, 3261, -4, 3263, -4, 3265, -4,  // NOLINT
+  3267, -4, 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4, 3279, -4, 3281, -4,  // NOLINT
+  3283, -4, 3285, -4, 3287, -4, 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4,  // NOLINT
+  3299, -4, 1073745152, -29056, 3365, -29056 };  // NOLINT
 static const uint16_t kEcma262CanonicalizeMultiStrings1Size = 1;  // NOLINT
-static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings5[1] = {  // NOLINT
-  {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable5Size = 88;  // NOLINT
-static const int32_t kEcma262CanonicalizeTable5[176] = {
-  1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4, 1615, -4,  // NOLINT
-  1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4, 1629, -4, 1631, -4,  // NOLINT
-  1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4, 1643, -4, 1645, -4, 1665, -4,  // NOLINT
-  1667, -4, 1669, -4, 1671, -4, 1673, -4, 1675, -4, 1677, -4, 1679, -4, 1681, -4,  // NOLINT
-  1683, -4, 1685, -4, 1687, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4,  // NOLINT
-  1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4, 1853, -4,  // NOLINT
-  1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4, 1867, -4, 1869, -4,  // NOLINT
-  1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4, 1881, -4, 1883, -4, 1885, -4,  // NOLINT
-  1887, -4, 1889, -4, 1891, -4, 1893, -4, 1895, -4, 1897, -4, 1899, -4, 1901, -4,  // NOLINT
-  1903, -4, 1914, -4, 1916, -4, 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4,  // NOLINT
-  1932, -4, 1937, -4, 1939, -4, 1953, -4, 1955, -4, 1957, -4, 1959, -4, 1961, -4 };  // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings5Size = 1;  // NOLINT
 static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings7[1] = {  // NOLINT
   {{kSentinel}} }; // NOLINT
 static const uint16_t kEcma262CanonicalizeTable7Size = 2;  // NOLINT
@@ -1344,13 +1185,6 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
-    case 5: return LookupMapping<true>(kEcma262CanonicalizeTable5,
-                                           kEcma262CanonicalizeTable5Size,
-                                           kEcma262CanonicalizeMultiStrings5,
-                                           c,
-                                           n,
-                                           result,
-                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kEcma262CanonicalizeTable7,
                                            kEcma262CanonicalizeTable7Size,
                                            kEcma262CanonicalizeMultiStrings7,
@@ -1362,7 +1196,7 @@
   }
 }
 
-static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings0[497] = {  // NOLINT
+static const MultiCharacterSpecialCase<4> kEcma262UnCanonicalizeMultiStrings0[469] = {  // NOLINT
   {{65, 97, kSentinel}}, {{90, 122, kSentinel}}, {{181, 924, 956, kSentinel}}, {{192, 224, kSentinel}},  // NOLINT
   {{214, 246, kSentinel}}, {{216, 248, kSentinel}}, {{222, 254, kSentinel}}, {{255, 376, kSentinel}},  // NOLINT
   {{256, 257, kSentinel}}, {{258, 259, kSentinel}}, {{260, 261, kSentinel}}, {{262, 263, kSentinel}},  // NOLINT
@@ -1404,19 +1238,16 @@
   {{546, 547, kSentinel}}, {{548, 549, kSentinel}}, {{550, 551, kSentinel}}, {{552, 553, kSentinel}},  // NOLINT
   {{554, 555, kSentinel}}, {{556, 557, kSentinel}}, {{558, 559, kSentinel}}, {{560, 561, kSentinel}},  // NOLINT
   {{562, 563, kSentinel}}, {{570, 11365, kSentinel}}, {{571, 572, kSentinel}}, {{574, 11366, kSentinel}},  // NOLINT
-  {{575, 11390, kSentinel}}, {{576, 11391, kSentinel}}, {{577, 578, kSentinel}}, {{580, 649, kSentinel}},  // NOLINT
-  {{581, 652, kSentinel}}, {{582, 583, kSentinel}}, {{584, 585, kSentinel}}, {{586, 587, kSentinel}},  // NOLINT
-  {{588, 589, kSentinel}}, {{590, 591, kSentinel}}, {{592, 11375, kSentinel}}, {{593, 11373, kSentinel}},  // NOLINT
-  {{594, 11376, kSentinel}}, {{613, 42893, kSentinel}}, {{614, 42922, kSentinel}}, {{619, 11362, kSentinel}},  // NOLINT
-  {{625, 11374, kSentinel}}, {{637, 11364, kSentinel}}, {{837, 921, 953, 8126}}, {{880, 881, kSentinel}},  // NOLINT
-  {{882, 883, kSentinel}}, {{886, 887, kSentinel}}, {{891, 1021, kSentinel}}, {{893, 1023, kSentinel}},  // NOLINT
-  {{902, 940, kSentinel}}, {{904, 941, kSentinel}}, {{906, 943, kSentinel}}, {{908, 972, kSentinel}},  // NOLINT
-  {{910, 973, kSentinel}}, {{911, 974, kSentinel}}, {{913, 945, kSentinel}}, {{914, 946, 976, kSentinel}},  // NOLINT
-  {{915, 947, kSentinel}}, {{916, 948, kSentinel}}, {{917, 949, 1013, kSentinel}}, {{918, 950, kSentinel}},  // NOLINT
-  {{919, 951, kSentinel}}, {{920, 952, 977, kSentinel}}, {{922, 954, 1008, kSentinel}}, {{923, 955, kSentinel}},  // NOLINT
-  {{925, 957, kSentinel}}, {{927, 959, kSentinel}}, {{928, 960, 982, kSentinel}}, {{929, 961, 1009, kSentinel}},  // NOLINT
-  {{931, 962, 963, kSentinel}}, {{932, 964, kSentinel}}, {{933, 965, kSentinel}}, {{934, 966, 981, kSentinel}},  // NOLINT
-  {{935, 967, kSentinel}}, {{939, 971, kSentinel}}, {{975, 983, kSentinel}}, {{984, 985, kSentinel}},  // NOLINT
+  {{577, 578, kSentinel}}, {{580, 649, kSentinel}}, {{581, 652, kSentinel}}, {{582, 583, kSentinel}},  // NOLINT
+  {{584, 585, kSentinel}}, {{586, 587, kSentinel}}, {{588, 589, kSentinel}}, {{590, 591, kSentinel}},  // NOLINT
+  {{619, 11362, kSentinel}}, {{637, 11364, kSentinel}}, {{837, 921, 953, 8126}}, {{891, 1021, kSentinel}},  // NOLINT
+  {{893, 1023, kSentinel}}, {{902, 940, kSentinel}}, {{904, 941, kSentinel}}, {{906, 943, kSentinel}},  // NOLINT
+  {{908, 972, kSentinel}}, {{910, 973, kSentinel}}, {{911, 974, kSentinel}}, {{913, 945, kSentinel}},  // NOLINT
+  {{914, 946, 976, kSentinel}}, {{915, 947, kSentinel}}, {{916, 948, kSentinel}}, {{917, 949, 1013, kSentinel}},  // NOLINT
+  {{918, 950, kSentinel}}, {{919, 951, kSentinel}}, {{920, 952, 977, kSentinel}}, {{922, 954, 1008, kSentinel}},  // NOLINT
+  {{923, 955, kSentinel}}, {{925, 957, kSentinel}}, {{927, 959, kSentinel}}, {{928, 960, 982, kSentinel}},  // NOLINT
+  {{929, 961, 1009, kSentinel}}, {{931, 962, 963, kSentinel}}, {{932, 964, kSentinel}}, {{933, 965, kSentinel}},  // NOLINT
+  {{934, 966, 981, kSentinel}}, {{935, 967, kSentinel}}, {{939, 971, kSentinel}}, {{984, 985, kSentinel}},  // NOLINT
   {{986, 987, kSentinel}}, {{988, 989, kSentinel}}, {{990, 991, kSentinel}}, {{992, 993, kSentinel}},  // NOLINT
   {{994, 995, kSentinel}}, {{996, 997, kSentinel}}, {{998, 999, kSentinel}}, {{1000, 1001, kSentinel}},  // NOLINT
   {{1002, 1003, kSentinel}}, {{1004, 1005, kSentinel}}, {{1006, 1007, kSentinel}}, {{1010, 1017, kSentinel}},  // NOLINT
@@ -1443,42 +1274,38 @@
   {{1276, 1277, kSentinel}}, {{1278, 1279, kSentinel}}, {{1280, 1281, kSentinel}}, {{1282, 1283, kSentinel}},  // NOLINT
   {{1284, 1285, kSentinel}}, {{1286, 1287, kSentinel}}, {{1288, 1289, kSentinel}}, {{1290, 1291, kSentinel}},  // NOLINT
   {{1292, 1293, kSentinel}}, {{1294, 1295, kSentinel}}, {{1296, 1297, kSentinel}}, {{1298, 1299, kSentinel}},  // NOLINT
-  {{1300, 1301, kSentinel}}, {{1302, 1303, kSentinel}}, {{1304, 1305, kSentinel}}, {{1306, 1307, kSentinel}},  // NOLINT
-  {{1308, 1309, kSentinel}}, {{1310, 1311, kSentinel}}, {{1312, 1313, kSentinel}}, {{1314, 1315, kSentinel}},  // NOLINT
-  {{1316, 1317, kSentinel}}, {{1318, 1319, kSentinel}}, {{1329, 1377, kSentinel}}, {{1366, 1414, kSentinel}},  // NOLINT
-  {{4256, 11520, kSentinel}}, {{4293, 11557, kSentinel}}, {{4295, 11559, kSentinel}}, {{4301, 11565, kSentinel}},  // NOLINT
-  {{7545, 42877, kSentinel}}, {{7549, 11363, kSentinel}}, {{7680, 7681, kSentinel}}, {{7682, 7683, kSentinel}},  // NOLINT
-  {{7684, 7685, kSentinel}}, {{7686, 7687, kSentinel}}, {{7688, 7689, kSentinel}}, {{7690, 7691, kSentinel}},  // NOLINT
-  {{7692, 7693, kSentinel}}, {{7694, 7695, kSentinel}}, {{7696, 7697, kSentinel}}, {{7698, 7699, kSentinel}},  // NOLINT
-  {{7700, 7701, kSentinel}}, {{7702, 7703, kSentinel}}, {{7704, 7705, kSentinel}}, {{7706, 7707, kSentinel}},  // NOLINT
-  {{7708, 7709, kSentinel}}, {{7710, 7711, kSentinel}}, {{7712, 7713, kSentinel}}, {{7714, 7715, kSentinel}},  // NOLINT
-  {{7716, 7717, kSentinel}}, {{7718, 7719, kSentinel}}, {{7720, 7721, kSentinel}}, {{7722, 7723, kSentinel}},  // NOLINT
-  {{7724, 7725, kSentinel}}, {{7726, 7727, kSentinel}}, {{7728, 7729, kSentinel}}, {{7730, 7731, kSentinel}},  // NOLINT
-  {{7732, 7733, kSentinel}}, {{7734, 7735, kSentinel}}, {{7736, 7737, kSentinel}}, {{7738, 7739, kSentinel}},  // NOLINT
-  {{7740, 7741, kSentinel}}, {{7742, 7743, kSentinel}}, {{7744, 7745, kSentinel}}, {{7746, 7747, kSentinel}},  // NOLINT
-  {{7748, 7749, kSentinel}}, {{7750, 7751, kSentinel}}, {{7752, 7753, kSentinel}}, {{7754, 7755, kSentinel}},  // NOLINT
-  {{7756, 7757, kSentinel}}, {{7758, 7759, kSentinel}}, {{7760, 7761, kSentinel}}, {{7762, 7763, kSentinel}},  // NOLINT
-  {{7764, 7765, kSentinel}}, {{7766, 7767, kSentinel}}, {{7768, 7769, kSentinel}}, {{7770, 7771, kSentinel}},  // NOLINT
-  {{7772, 7773, kSentinel}}, {{7774, 7775, kSentinel}}, {{7776, 7777, 7835, kSentinel}}, {{7778, 7779, kSentinel}},  // NOLINT
-  {{7780, 7781, kSentinel}}, {{7782, 7783, kSentinel}}, {{7784, 7785, kSentinel}}, {{7786, 7787, kSentinel}},  // NOLINT
-  {{7788, 7789, kSentinel}}, {{7790, 7791, kSentinel}}, {{7792, 7793, kSentinel}}, {{7794, 7795, kSentinel}},  // NOLINT
-  {{7796, 7797, kSentinel}}, {{7798, 7799, kSentinel}}, {{7800, 7801, kSentinel}}, {{7802, 7803, kSentinel}},  // NOLINT
-  {{7804, 7805, kSentinel}}, {{7806, 7807, kSentinel}}, {{7808, 7809, kSentinel}}, {{7810, 7811, kSentinel}},  // NOLINT
-  {{7812, 7813, kSentinel}}, {{7814, 7815, kSentinel}}, {{7816, 7817, kSentinel}}, {{7818, 7819, kSentinel}},  // NOLINT
-  {{7820, 7821, kSentinel}}, {{7822, 7823, kSentinel}}, {{7824, 7825, kSentinel}}, {{7826, 7827, kSentinel}},  // NOLINT
-  {{7828, 7829, kSentinel}}, {{7840, 7841, kSentinel}}, {{7842, 7843, kSentinel}}, {{7844, 7845, kSentinel}},  // NOLINT
-  {{7846, 7847, kSentinel}}, {{7848, 7849, kSentinel}}, {{7850, 7851, kSentinel}}, {{7852, 7853, kSentinel}},  // NOLINT
-  {{7854, 7855, kSentinel}}, {{7856, 7857, kSentinel}}, {{7858, 7859, kSentinel}}, {{7860, 7861, kSentinel}},  // NOLINT
-  {{7862, 7863, kSentinel}}, {{7864, 7865, kSentinel}}, {{7866, 7867, kSentinel}}, {{7868, 7869, kSentinel}},  // NOLINT
-  {{7870, 7871, kSentinel}}, {{7872, 7873, kSentinel}}, {{7874, 7875, kSentinel}}, {{7876, 7877, kSentinel}},  // NOLINT
-  {{7878, 7879, kSentinel}}, {{7880, 7881, kSentinel}}, {{7882, 7883, kSentinel}}, {{7884, 7885, kSentinel}},  // NOLINT
-  {{7886, 7887, kSentinel}}, {{7888, 7889, kSentinel}}, {{7890, 7891, kSentinel}}, {{7892, 7893, kSentinel}},  // NOLINT
-  {{7894, 7895, kSentinel}}, {{7896, 7897, kSentinel}}, {{7898, 7899, kSentinel}}, {{7900, 7901, kSentinel}},  // NOLINT
-  {{7902, 7903, kSentinel}}, {{7904, 7905, kSentinel}}, {{7906, 7907, kSentinel}}, {{7908, 7909, kSentinel}},  // NOLINT
-  {{7910, 7911, kSentinel}}, {{7912, 7913, kSentinel}}, {{7914, 7915, kSentinel}}, {{7916, 7917, kSentinel}},  // NOLINT
-  {{7918, 7919, kSentinel}}, {{7920, 7921, kSentinel}}, {{7922, 7923, kSentinel}}, {{7924, 7925, kSentinel}},  // NOLINT
-  {{7926, 7927, kSentinel}}, {{7928, 7929, kSentinel}}, {{7930, 7931, kSentinel}}, {{7932, 7933, kSentinel}},  // NOLINT
-  {{7934, 7935, kSentinel}}, {{7936, 7944, kSentinel}}, {{7943, 7951, kSentinel}}, {{7952, 7960, kSentinel}},  // NOLINT
+  {{1329, 1377, kSentinel}}, {{1366, 1414, kSentinel}}, {{4256, 11520, kSentinel}}, {{4293, 11557, kSentinel}},  // NOLINT
+  {{7549, 11363, kSentinel}}, {{7680, 7681, kSentinel}}, {{7682, 7683, kSentinel}}, {{7684, 7685, kSentinel}},  // NOLINT
+  {{7686, 7687, kSentinel}}, {{7688, 7689, kSentinel}}, {{7690, 7691, kSentinel}}, {{7692, 7693, kSentinel}},  // NOLINT
+  {{7694, 7695, kSentinel}}, {{7696, 7697, kSentinel}}, {{7698, 7699, kSentinel}}, {{7700, 7701, kSentinel}},  // NOLINT
+  {{7702, 7703, kSentinel}}, {{7704, 7705, kSentinel}}, {{7706, 7707, kSentinel}}, {{7708, 7709, kSentinel}},  // NOLINT
+  {{7710, 7711, kSentinel}}, {{7712, 7713, kSentinel}}, {{7714, 7715, kSentinel}}, {{7716, 7717, kSentinel}},  // NOLINT
+  {{7718, 7719, kSentinel}}, {{7720, 7721, kSentinel}}, {{7722, 7723, kSentinel}}, {{7724, 7725, kSentinel}},  // NOLINT
+  {{7726, 7727, kSentinel}}, {{7728, 7729, kSentinel}}, {{7730, 7731, kSentinel}}, {{7732, 7733, kSentinel}},  // NOLINT
+  {{7734, 7735, kSentinel}}, {{7736, 7737, kSentinel}}, {{7738, 7739, kSentinel}}, {{7740, 7741, kSentinel}},  // NOLINT
+  {{7742, 7743, kSentinel}}, {{7744, 7745, kSentinel}}, {{7746, 7747, kSentinel}}, {{7748, 7749, kSentinel}},  // NOLINT
+  {{7750, 7751, kSentinel}}, {{7752, 7753, kSentinel}}, {{7754, 7755, kSentinel}}, {{7756, 7757, kSentinel}},  // NOLINT
+  {{7758, 7759, kSentinel}}, {{7760, 7761, kSentinel}}, {{7762, 7763, kSentinel}}, {{7764, 7765, kSentinel}},  // NOLINT
+  {{7766, 7767, kSentinel}}, {{7768, 7769, kSentinel}}, {{7770, 7771, kSentinel}}, {{7772, 7773, kSentinel}},  // NOLINT
+  {{7774, 7775, kSentinel}}, {{7776, 7777, 7835, kSentinel}}, {{7778, 7779, kSentinel}}, {{7780, 7781, kSentinel}},  // NOLINT
+  {{7782, 7783, kSentinel}}, {{7784, 7785, kSentinel}}, {{7786, 7787, kSentinel}}, {{7788, 7789, kSentinel}},  // NOLINT
+  {{7790, 7791, kSentinel}}, {{7792, 7793, kSentinel}}, {{7794, 7795, kSentinel}}, {{7796, 7797, kSentinel}},  // NOLINT
+  {{7798, 7799, kSentinel}}, {{7800, 7801, kSentinel}}, {{7802, 7803, kSentinel}}, {{7804, 7805, kSentinel}},  // NOLINT
+  {{7806, 7807, kSentinel}}, {{7808, 7809, kSentinel}}, {{7810, 7811, kSentinel}}, {{7812, 7813, kSentinel}},  // NOLINT
+  {{7814, 7815, kSentinel}}, {{7816, 7817, kSentinel}}, {{7818, 7819, kSentinel}}, {{7820, 7821, kSentinel}},  // NOLINT
+  {{7822, 7823, kSentinel}}, {{7824, 7825, kSentinel}}, {{7826, 7827, kSentinel}}, {{7828, 7829, kSentinel}},  // NOLINT
+  {{7840, 7841, kSentinel}}, {{7842, 7843, kSentinel}}, {{7844, 7845, kSentinel}}, {{7846, 7847, kSentinel}},  // NOLINT
+  {{7848, 7849, kSentinel}}, {{7850, 7851, kSentinel}}, {{7852, 7853, kSentinel}}, {{7854, 7855, kSentinel}},  // NOLINT
+  {{7856, 7857, kSentinel}}, {{7858, 7859, kSentinel}}, {{7860, 7861, kSentinel}}, {{7862, 7863, kSentinel}},  // NOLINT
+  {{7864, 7865, kSentinel}}, {{7866, 7867, kSentinel}}, {{7868, 7869, kSentinel}}, {{7870, 7871, kSentinel}},  // NOLINT
+  {{7872, 7873, kSentinel}}, {{7874, 7875, kSentinel}}, {{7876, 7877, kSentinel}}, {{7878, 7879, kSentinel}},  // NOLINT
+  {{7880, 7881, kSentinel}}, {{7882, 7883, kSentinel}}, {{7884, 7885, kSentinel}}, {{7886, 7887, kSentinel}},  // NOLINT
+  {{7888, 7889, kSentinel}}, {{7890, 7891, kSentinel}}, {{7892, 7893, kSentinel}}, {{7894, 7895, kSentinel}},  // NOLINT
+  {{7896, 7897, kSentinel}}, {{7898, 7899, kSentinel}}, {{7900, 7901, kSentinel}}, {{7902, 7903, kSentinel}},  // NOLINT
+  {{7904, 7905, kSentinel}}, {{7906, 7907, kSentinel}}, {{7908, 7909, kSentinel}}, {{7910, 7911, kSentinel}},  // NOLINT
+  {{7912, 7913, kSentinel}}, {{7914, 7915, kSentinel}}, {{7916, 7917, kSentinel}}, {{7918, 7919, kSentinel}},  // NOLINT
+  {{7920, 7921, kSentinel}}, {{7922, 7923, kSentinel}}, {{7924, 7925, kSentinel}}, {{7926, 7927, kSentinel}},  // NOLINT
+  {{7928, 7929, kSentinel}}, {{7936, 7944, kSentinel}}, {{7943, 7951, kSentinel}}, {{7952, 7960, kSentinel}},  // NOLINT
   {{7957, 7965, kSentinel}}, {{7968, 7976, kSentinel}}, {{7975, 7983, kSentinel}}, {{7984, 7992, kSentinel}},  // NOLINT
   {{7991, 7999, kSentinel}}, {{8000, 8008, kSentinel}}, {{8005, 8013, kSentinel}}, {{8017, 8025, kSentinel}},  // NOLINT
   {{8019, 8027, kSentinel}}, {{8021, 8029, kSentinel}}, {{8023, 8031, kSentinel}}, {{8032, 8040, kSentinel}},  // NOLINT
@@ -1488,8 +1315,8 @@
   {{8061, 8187, kSentinel}}, {{8112, 8120, kSentinel}}, {{8113, 8121, kSentinel}}, {{8144, 8152, kSentinel}},  // NOLINT
   {{8145, 8153, kSentinel}}, {{8160, 8168, kSentinel}}, {{8161, 8169, kSentinel}}, {{8165, 8172, kSentinel}},  // NOLINT
   {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable0Size = 990;  // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable0[1980] = {
+static const uint16_t kEcma262UnCanonicalizeTable0Size = 945;  // NOLINT
+static const int32_t kEcma262UnCanonicalizeTable0[1890] = {
   1073741889, 1, 90, 5, 1073741921, 1, 122, 5, 181, 9, 1073742016, 13, 214, 17, 1073742040, 21,  // NOLINT
   222, 25, 1073742048, 13, 246, 17, 1073742072, 21, 254, 25, 255, 29, 256, 33, 257, 33,  // NOLINT
   258, 37, 259, 37, 260, 41, 261, 41, 262, 45, 263, 45, 264, 49, 265, 49,  // NOLINT
@@ -1528,187 +1355,127 @@
   539, 597, 540, 601, 541, 601, 542, 605, 543, 605, 544, 365, 546, 609, 547, 609,  // NOLINT
   548, 613, 549, 613, 550, 617, 551, 617, 552, 621, 553, 621, 554, 625, 555, 625,  // NOLINT
   556, 629, 557, 629, 558, 633, 559, 633, 560, 637, 561, 637, 562, 641, 563, 641,  // NOLINT
-  570, 645, 571, 649, 572, 649, 573, 353, 574, 653, 1073742399, 657, 576, 661, 577, 665,  // NOLINT
-  578, 665, 579, 277, 580, 669, 581, 673, 582, 677, 583, 677, 584, 681, 585, 681,  // NOLINT
-  586, 685, 587, 685, 588, 689, 589, 689, 590, 693, 591, 693, 592, 697, 593, 701,  // NOLINT
-  594, 705, 595, 281, 596, 293, 1073742422, 301, 599, 305, 601, 317, 603, 321, 608, 329,  // NOLINT
-  611, 333, 613, 709, 614, 713, 616, 345, 617, 341, 619, 717, 623, 357, 625, 721,  // NOLINT
-  626, 361, 629, 369, 637, 725, 640, 385, 643, 393, 648, 401, 649, 669, 1073742474, 409,  // NOLINT
-  651, 413, 652, 673, 658, 425, 837, 729, 880, 733, 881, 733, 882, 737, 883, 737,  // NOLINT
-  886, 741, 887, 741, 1073742715, 745, 893, 749, 902, 753, 1073742728, 757, 906, 761, 908, 765,  // NOLINT
-  1073742734, 769, 911, 773, 913, 777, 914, 781, 1073742739, 785, 916, 789, 917, 793, 1073742742, 797,  // NOLINT
-  919, 801, 920, 805, 921, 729, 922, 809, 923, 813, 924, 9, 1073742749, 817, 927, 821,  // NOLINT
-  928, 825, 929, 829, 931, 833, 1073742756, 837, 933, 841, 934, 845, 1073742759, 849, 939, 853,  // NOLINT
-  940, 753, 1073742765, 757, 943, 761, 945, 777, 946, 781, 1073742771, 785, 948, 789, 949, 793,  // NOLINT
-  1073742774, 797, 951, 801, 952, 805, 953, 729, 954, 809, 955, 813, 956, 9, 1073742781, 817,  // NOLINT
-  959, 821, 960, 825, 961, 829, 962, 833, 963, 833, 1073742788, 837, 965, 841, 966, 845,  // NOLINT
-  1073742791, 849, 971, 853, 972, 765, 1073742797, 769, 974, 773, 975, 857, 976, 781, 977, 805,  // NOLINT
-  981, 845, 982, 825, 983, 857, 984, 861, 985, 861, 986, 865, 987, 865, 988, 869,  // NOLINT
-  989, 869, 990, 873, 991, 873, 992, 877, 993, 877, 994, 881, 995, 881, 996, 885,  // NOLINT
-  997, 885, 998, 889, 999, 889, 1000, 893, 1001, 893, 1002, 897, 1003, 897, 1004, 901,  // NOLINT
-  1005, 901, 1006, 905, 1007, 905, 1008, 809, 1009, 829, 1010, 909, 1013, 793, 1015, 913,  // NOLINT
-  1016, 913, 1017, 909, 1018, 917, 1019, 917, 1073742845, 745, 1023, 749, 1073742848, 921, 1039, 925,  // NOLINT
-  1073742864, 929, 1071, 933, 1073742896, 929, 1103, 933, 1073742928, 921, 1119, 925, 1120, 937, 1121, 937,  // NOLINT
-  1122, 941, 1123, 941, 1124, 945, 1125, 945, 1126, 949, 1127, 949, 1128, 953, 1129, 953,  // NOLINT
-  1130, 957, 1131, 957, 1132, 961, 1133, 961, 1134, 965, 1135, 965, 1136, 969, 1137, 969,  // NOLINT
-  1138, 973, 1139, 973, 1140, 977, 1141, 977, 1142, 981, 1143, 981, 1144, 985, 1145, 985,  // NOLINT
-  1146, 989, 1147, 989, 1148, 993, 1149, 993, 1150, 997, 1151, 997, 1152, 1001, 1153, 1001,  // NOLINT
-  1162, 1005, 1163, 1005, 1164, 1009, 1165, 1009, 1166, 1013, 1167, 1013, 1168, 1017, 1169, 1017,  // NOLINT
-  1170, 1021, 1171, 1021, 1172, 1025, 1173, 1025, 1174, 1029, 1175, 1029, 1176, 1033, 1177, 1033,  // NOLINT
-  1178, 1037, 1179, 1037, 1180, 1041, 1181, 1041, 1182, 1045, 1183, 1045, 1184, 1049, 1185, 1049,  // NOLINT
-  1186, 1053, 1187, 1053, 1188, 1057, 1189, 1057, 1190, 1061, 1191, 1061, 1192, 1065, 1193, 1065,  // NOLINT
-  1194, 1069, 1195, 1069, 1196, 1073, 1197, 1073, 1198, 1077, 1199, 1077, 1200, 1081, 1201, 1081,  // NOLINT
-  1202, 1085, 1203, 1085, 1204, 1089, 1205, 1089, 1206, 1093, 1207, 1093, 1208, 1097, 1209, 1097,  // NOLINT
-  1210, 1101, 1211, 1101, 1212, 1105, 1213, 1105, 1214, 1109, 1215, 1109, 1216, 1113, 1217, 1117,  // NOLINT
-  1218, 1117, 1219, 1121, 1220, 1121, 1221, 1125, 1222, 1125, 1223, 1129, 1224, 1129, 1225, 1133,  // NOLINT
-  1226, 1133, 1227, 1137, 1228, 1137, 1229, 1141, 1230, 1141, 1231, 1113, 1232, 1145, 1233, 1145,  // NOLINT
-  1234, 1149, 1235, 1149, 1236, 1153, 1237, 1153, 1238, 1157, 1239, 1157, 1240, 1161, 1241, 1161,  // NOLINT
-  1242, 1165, 1243, 1165, 1244, 1169, 1245, 1169, 1246, 1173, 1247, 1173, 1248, 1177, 1249, 1177,  // NOLINT
-  1250, 1181, 1251, 1181, 1252, 1185, 1253, 1185, 1254, 1189, 1255, 1189, 1256, 1193, 1257, 1193,  // NOLINT
-  1258, 1197, 1259, 1197, 1260, 1201, 1261, 1201, 1262, 1205, 1263, 1205, 1264, 1209, 1265, 1209,  // NOLINT
-  1266, 1213, 1267, 1213, 1268, 1217, 1269, 1217, 1270, 1221, 1271, 1221, 1272, 1225, 1273, 1225,  // NOLINT
-  1274, 1229, 1275, 1229, 1276, 1233, 1277, 1233, 1278, 1237, 1279, 1237, 1280, 1241, 1281, 1241,  // NOLINT
-  1282, 1245, 1283, 1245, 1284, 1249, 1285, 1249, 1286, 1253, 1287, 1253, 1288, 1257, 1289, 1257,  // NOLINT
-  1290, 1261, 1291, 1261, 1292, 1265, 1293, 1265, 1294, 1269, 1295, 1269, 1296, 1273, 1297, 1273,  // NOLINT
-  1298, 1277, 1299, 1277, 1300, 1281, 1301, 1281, 1302, 1285, 1303, 1285, 1304, 1289, 1305, 1289,  // NOLINT
-  1306, 1293, 1307, 1293, 1308, 1297, 1309, 1297, 1310, 1301, 1311, 1301, 1312, 1305, 1313, 1305,  // NOLINT
-  1314, 1309, 1315, 1309, 1316, 1313, 1317, 1313, 1318, 1317, 1319, 1317, 1073743153, 1321, 1366, 1325,  // NOLINT
-  1073743201, 1321, 1414, 1325, 1073746080, 1329, 4293, 1333, 4295, 1337, 4301, 1341, 7545, 1345, 7549, 1349,  // NOLINT
-  7680, 1353, 7681, 1353, 7682, 1357, 7683, 1357, 7684, 1361, 7685, 1361, 7686, 1365, 7687, 1365,  // NOLINT
-  7688, 1369, 7689, 1369, 7690, 1373, 7691, 1373, 7692, 1377, 7693, 1377, 7694, 1381, 7695, 1381,  // NOLINT
-  7696, 1385, 7697, 1385, 7698, 1389, 7699, 1389, 7700, 1393, 7701, 1393, 7702, 1397, 7703, 1397,  // NOLINT
-  7704, 1401, 7705, 1401, 7706, 1405, 7707, 1405, 7708, 1409, 7709, 1409, 7710, 1413, 7711, 1413,  // NOLINT
-  7712, 1417, 7713, 1417, 7714, 1421, 7715, 1421, 7716, 1425, 7717, 1425, 7718, 1429, 7719, 1429,  // NOLINT
-  7720, 1433, 7721, 1433, 7722, 1437, 7723, 1437, 7724, 1441, 7725, 1441, 7726, 1445, 7727, 1445,  // NOLINT
-  7728, 1449, 7729, 1449, 7730, 1453, 7731, 1453, 7732, 1457, 7733, 1457, 7734, 1461, 7735, 1461,  // NOLINT
-  7736, 1465, 7737, 1465, 7738, 1469, 7739, 1469, 7740, 1473, 7741, 1473, 7742, 1477, 7743, 1477,  // NOLINT
-  7744, 1481, 7745, 1481, 7746, 1485, 7747, 1485, 7748, 1489, 7749, 1489, 7750, 1493, 7751, 1493,  // NOLINT
-  7752, 1497, 7753, 1497, 7754, 1501, 7755, 1501, 7756, 1505, 7757, 1505, 7758, 1509, 7759, 1509,  // NOLINT
-  7760, 1513, 7761, 1513, 7762, 1517, 7763, 1517, 7764, 1521, 7765, 1521, 7766, 1525, 7767, 1525,  // NOLINT
-  7768, 1529, 7769, 1529, 7770, 1533, 7771, 1533, 7772, 1537, 7773, 1537, 7774, 1541, 7775, 1541,  // NOLINT
-  7776, 1545, 7777, 1545, 7778, 1549, 7779, 1549, 7780, 1553, 7781, 1553, 7782, 1557, 7783, 1557,  // NOLINT
-  7784, 1561, 7785, 1561, 7786, 1565, 7787, 1565, 7788, 1569, 7789, 1569, 7790, 1573, 7791, 1573,  // NOLINT
-  7792, 1577, 7793, 1577, 7794, 1581, 7795, 1581, 7796, 1585, 7797, 1585, 7798, 1589, 7799, 1589,  // NOLINT
-  7800, 1593, 7801, 1593, 7802, 1597, 7803, 1597, 7804, 1601, 7805, 1601, 7806, 1605, 7807, 1605,  // NOLINT
-  7808, 1609, 7809, 1609, 7810, 1613, 7811, 1613, 7812, 1617, 7813, 1617, 7814, 1621, 7815, 1621,  // NOLINT
-  7816, 1625, 7817, 1625, 7818, 1629, 7819, 1629, 7820, 1633, 7821, 1633, 7822, 1637, 7823, 1637,  // NOLINT
-  7824, 1641, 7825, 1641, 7826, 1645, 7827, 1645, 7828, 1649, 7829, 1649, 7835, 1545, 7840, 1653,  // NOLINT
-  7841, 1653, 7842, 1657, 7843, 1657, 7844, 1661, 7845, 1661, 7846, 1665, 7847, 1665, 7848, 1669,  // NOLINT
-  7849, 1669, 7850, 1673, 7851, 1673, 7852, 1677, 7853, 1677, 7854, 1681, 7855, 1681, 7856, 1685,  // NOLINT
-  7857, 1685, 7858, 1689, 7859, 1689, 7860, 1693, 7861, 1693, 7862, 1697, 7863, 1697, 7864, 1701,  // NOLINT
-  7865, 1701, 7866, 1705, 7867, 1705, 7868, 1709, 7869, 1709, 7870, 1713, 7871, 1713, 7872, 1717,  // NOLINT
-  7873, 1717, 7874, 1721, 7875, 1721, 7876, 1725, 7877, 1725, 7878, 1729, 7879, 1729, 7880, 1733,  // NOLINT
-  7881, 1733, 7882, 1737, 7883, 1737, 7884, 1741, 7885, 1741, 7886, 1745, 7887, 1745, 7888, 1749,  // NOLINT
-  7889, 1749, 7890, 1753, 7891, 1753, 7892, 1757, 7893, 1757, 7894, 1761, 7895, 1761, 7896, 1765,  // NOLINT
-  7897, 1765, 7898, 1769, 7899, 1769, 7900, 1773, 7901, 1773, 7902, 1777, 7903, 1777, 7904, 1781,  // NOLINT
-  7905, 1781, 7906, 1785, 7907, 1785, 7908, 1789, 7909, 1789, 7910, 1793, 7911, 1793, 7912, 1797,  // NOLINT
-  7913, 1797, 7914, 1801, 7915, 1801, 7916, 1805, 7917, 1805, 7918, 1809, 7919, 1809, 7920, 1813,  // NOLINT
-  7921, 1813, 7922, 1817, 7923, 1817, 7924, 1821, 7925, 1821, 7926, 1825, 7927, 1825, 7928, 1829,  // NOLINT
-  7929, 1829, 7930, 1833, 7931, 1833, 7932, 1837, 7933, 1837, 7934, 1841, 7935, 1841, 1073749760, 1845,  // NOLINT
-  7943, 1849, 1073749768, 1845, 7951, 1849, 1073749776, 1853, 7957, 1857, 1073749784, 1853, 7965, 1857, 1073749792, 1861,  // NOLINT
-  7975, 1865, 1073749800, 1861, 7983, 1865, 1073749808, 1869, 7991, 1873, 1073749816, 1869, 7999, 1873, 1073749824, 1877,  // NOLINT
-  8005, 1881, 1073749832, 1877, 8013, 1881, 8017, 1885, 8019, 1889, 8021, 1893, 8023, 1897, 8025, 1885,  // NOLINT
-  8027, 1889, 8029, 1893, 8031, 1897, 1073749856, 1901, 8039, 1905, 1073749864, 1901, 8047, 1905, 1073749872, 1909,  // NOLINT
-  8049, 1913, 1073749874, 1917, 8053, 1921, 1073749878, 1925, 8055, 1929, 1073749880, 1933, 8057, 1937, 1073749882, 1941,  // NOLINT
-  8059, 1945, 1073749884, 1949, 8061, 1953, 1073749936, 1957, 8113, 1961, 1073749944, 1957, 8121, 1961, 1073749946, 1909,  // NOLINT
-  8123, 1913, 8126, 729, 1073749960, 1917, 8139, 1921, 1073749968, 1965, 8145, 1969, 1073749976, 1965, 8153, 1969,  // NOLINT
-  1073749978, 1925, 8155, 1929, 1073749984, 1973, 8161, 1977, 8165, 1981, 1073749992, 1973, 8169, 1977, 1073749994, 1941,  // NOLINT
-  8171, 1945, 8172, 1981, 1073750008, 1933, 8185, 1937, 1073750010, 1949, 8187, 1953 };  // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 497;  // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings1[83] = {  // NOLINT
+  570, 645, 571, 649, 572, 649, 573, 353, 574, 653, 577, 657, 578, 657, 579, 277,  // NOLINT
+  580, 661, 581, 665, 582, 669, 583, 669, 584, 673, 585, 673, 586, 677, 587, 677,  // NOLINT
+  588, 681, 589, 681, 590, 685, 591, 685, 595, 281, 596, 293, 1073742422, 301, 599, 305,  // NOLINT
+  601, 317, 603, 321, 608, 329, 611, 333, 616, 345, 617, 341, 619, 689, 623, 357,  // NOLINT
+  626, 361, 629, 369, 637, 693, 640, 385, 643, 393, 648, 401, 649, 661, 1073742474, 409,  // NOLINT
+  651, 413, 652, 665, 658, 425, 837, 697, 1073742715, 701, 893, 705, 902, 709, 1073742728, 713,  // NOLINT
+  906, 717, 908, 721, 1073742734, 725, 911, 729, 913, 733, 914, 737, 1073742739, 741, 916, 745,  // NOLINT
+  917, 749, 1073742742, 753, 919, 757, 920, 761, 921, 697, 922, 765, 923, 769, 924, 9,  // NOLINT
+  1073742749, 773, 927, 777, 928, 781, 929, 785, 931, 789, 1073742756, 793, 933, 797, 934, 801,  // NOLINT
+  1073742759, 805, 939, 809, 940, 709, 1073742765, 713, 943, 717, 945, 733, 946, 737, 1073742771, 741,  // NOLINT
+  948, 745, 949, 749, 1073742774, 753, 951, 757, 952, 761, 953, 697, 954, 765, 955, 769,  // NOLINT
+  956, 9, 1073742781, 773, 959, 777, 960, 781, 961, 785, 962, 789, 963, 789, 1073742788, 793,  // NOLINT
+  965, 797, 966, 801, 1073742791, 805, 971, 809, 972, 721, 1073742797, 725, 974, 729, 976, 737,  // NOLINT
+  977, 761, 981, 801, 982, 781, 984, 813, 985, 813, 986, 817, 987, 817, 988, 821,  // NOLINT
+  989, 821, 990, 825, 991, 825, 992, 829, 993, 829, 994, 833, 995, 833, 996, 837,  // NOLINT
+  997, 837, 998, 841, 999, 841, 1000, 845, 1001, 845, 1002, 849, 1003, 849, 1004, 853,  // NOLINT
+  1005, 853, 1006, 857, 1007, 857, 1008, 765, 1009, 785, 1010, 861, 1013, 749, 1015, 865,  // NOLINT
+  1016, 865, 1017, 861, 1018, 869, 1019, 869, 1073742845, 701, 1023, 705, 1073742848, 873, 1039, 877,  // NOLINT
+  1073742864, 881, 1071, 885, 1073742896, 881, 1103, 885, 1073742928, 873, 1119, 877, 1120, 889, 1121, 889,  // NOLINT
+  1122, 893, 1123, 893, 1124, 897, 1125, 897, 1126, 901, 1127, 901, 1128, 905, 1129, 905,  // NOLINT
+  1130, 909, 1131, 909, 1132, 913, 1133, 913, 1134, 917, 1135, 917, 1136, 921, 1137, 921,  // NOLINT
+  1138, 925, 1139, 925, 1140, 929, 1141, 929, 1142, 933, 1143, 933, 1144, 937, 1145, 937,  // NOLINT
+  1146, 941, 1147, 941, 1148, 945, 1149, 945, 1150, 949, 1151, 949, 1152, 953, 1153, 953,  // NOLINT
+  1162, 957, 1163, 957, 1164, 961, 1165, 961, 1166, 965, 1167, 965, 1168, 969, 1169, 969,  // NOLINT
+  1170, 973, 1171, 973, 1172, 977, 1173, 977, 1174, 981, 1175, 981, 1176, 985, 1177, 985,  // NOLINT
+  1178, 989, 1179, 989, 1180, 993, 1181, 993, 1182, 997, 1183, 997, 1184, 1001, 1185, 1001,  // NOLINT
+  1186, 1005, 1187, 1005, 1188, 1009, 1189, 1009, 1190, 1013, 1191, 1013, 1192, 1017, 1193, 1017,  // NOLINT
+  1194, 1021, 1195, 1021, 1196, 1025, 1197, 1025, 1198, 1029, 1199, 1029, 1200, 1033, 1201, 1033,  // NOLINT
+  1202, 1037, 1203, 1037, 1204, 1041, 1205, 1041, 1206, 1045, 1207, 1045, 1208, 1049, 1209, 1049,  // NOLINT
+  1210, 1053, 1211, 1053, 1212, 1057, 1213, 1057, 1214, 1061, 1215, 1061, 1216, 1065, 1217, 1069,  // NOLINT
+  1218, 1069, 1219, 1073, 1220, 1073, 1221, 1077, 1222, 1077, 1223, 1081, 1224, 1081, 1225, 1085,  // NOLINT
+  1226, 1085, 1227, 1089, 1228, 1089, 1229, 1093, 1230, 1093, 1231, 1065, 1232, 1097, 1233, 1097,  // NOLINT
+  1234, 1101, 1235, 1101, 1236, 1105, 1237, 1105, 1238, 1109, 1239, 1109, 1240, 1113, 1241, 1113,  // NOLINT
+  1242, 1117, 1243, 1117, 1244, 1121, 1245, 1121, 1246, 1125, 1247, 1125, 1248, 1129, 1249, 1129,  // NOLINT
+  1250, 1133, 1251, 1133, 1252, 1137, 1253, 1137, 1254, 1141, 1255, 1141, 1256, 1145, 1257, 1145,  // NOLINT
+  1258, 1149, 1259, 1149, 1260, 1153, 1261, 1153, 1262, 1157, 1263, 1157, 1264, 1161, 1265, 1161,  // NOLINT
+  1266, 1165, 1267, 1165, 1268, 1169, 1269, 1169, 1270, 1173, 1271, 1173, 1272, 1177, 1273, 1177,  // NOLINT
+  1274, 1181, 1275, 1181, 1276, 1185, 1277, 1185, 1278, 1189, 1279, 1189, 1280, 1193, 1281, 1193,  // NOLINT
+  1282, 1197, 1283, 1197, 1284, 1201, 1285, 1201, 1286, 1205, 1287, 1205, 1288, 1209, 1289, 1209,  // NOLINT
+  1290, 1213, 1291, 1213, 1292, 1217, 1293, 1217, 1294, 1221, 1295, 1221, 1296, 1225, 1297, 1225,  // NOLINT
+  1298, 1229, 1299, 1229, 1073743153, 1233, 1366, 1237, 1073743201, 1233, 1414, 1237, 1073746080, 1241, 4293, 1245,  // NOLINT
+  7549, 1249, 7680, 1253, 7681, 1253, 7682, 1257, 7683, 1257, 7684, 1261, 7685, 1261, 7686, 1265,  // NOLINT
+  7687, 1265, 7688, 1269, 7689, 1269, 7690, 1273, 7691, 1273, 7692, 1277, 7693, 1277, 7694, 1281,  // NOLINT
+  7695, 1281, 7696, 1285, 7697, 1285, 7698, 1289, 7699, 1289, 7700, 1293, 7701, 1293, 7702, 1297,  // NOLINT
+  7703, 1297, 7704, 1301, 7705, 1301, 7706, 1305, 7707, 1305, 7708, 1309, 7709, 1309, 7710, 1313,  // NOLINT
+  7711, 1313, 7712, 1317, 7713, 1317, 7714, 1321, 7715, 1321, 7716, 1325, 7717, 1325, 7718, 1329,  // NOLINT
+  7719, 1329, 7720, 1333, 7721, 1333, 7722, 1337, 7723, 1337, 7724, 1341, 7725, 1341, 7726, 1345,  // NOLINT
+  7727, 1345, 7728, 1349, 7729, 1349, 7730, 1353, 7731, 1353, 7732, 1357, 7733, 1357, 7734, 1361,  // NOLINT
+  7735, 1361, 7736, 1365, 7737, 1365, 7738, 1369, 7739, 1369, 7740, 1373, 7741, 1373, 7742, 1377,  // NOLINT
+  7743, 1377, 7744, 1381, 7745, 1381, 7746, 1385, 7747, 1385, 7748, 1389, 7749, 1389, 7750, 1393,  // NOLINT
+  7751, 1393, 7752, 1397, 7753, 1397, 7754, 1401, 7755, 1401, 7756, 1405, 7757, 1405, 7758, 1409,  // NOLINT
+  7759, 1409, 7760, 1413, 7761, 1413, 7762, 1417, 7763, 1417, 7764, 1421, 7765, 1421, 7766, 1425,  // NOLINT
+  7767, 1425, 7768, 1429, 7769, 1429, 7770, 1433, 7771, 1433, 7772, 1437, 7773, 1437, 7774, 1441,  // NOLINT
+  7775, 1441, 7776, 1445, 7777, 1445, 7778, 1449, 7779, 1449, 7780, 1453, 7781, 1453, 7782, 1457,  // NOLINT
+  7783, 1457, 7784, 1461, 7785, 1461, 7786, 1465, 7787, 1465, 7788, 1469, 7789, 1469, 7790, 1473,  // NOLINT
+  7791, 1473, 7792, 1477, 7793, 1477, 7794, 1481, 7795, 1481, 7796, 1485, 7797, 1485, 7798, 1489,  // NOLINT
+  7799, 1489, 7800, 1493, 7801, 1493, 7802, 1497, 7803, 1497, 7804, 1501, 7805, 1501, 7806, 1505,  // NOLINT
+  7807, 1505, 7808, 1509, 7809, 1509, 7810, 1513, 7811, 1513, 7812, 1517, 7813, 1517, 7814, 1521,  // NOLINT
+  7815, 1521, 7816, 1525, 7817, 1525, 7818, 1529, 7819, 1529, 7820, 1533, 7821, 1533, 7822, 1537,  // NOLINT
+  7823, 1537, 7824, 1541, 7825, 1541, 7826, 1545, 7827, 1545, 7828, 1549, 7829, 1549, 7835, 1445,  // NOLINT
+  7840, 1553, 7841, 1553, 7842, 1557, 7843, 1557, 7844, 1561, 7845, 1561, 7846, 1565, 7847, 1565,  // NOLINT
+  7848, 1569, 7849, 1569, 7850, 1573, 7851, 1573, 7852, 1577, 7853, 1577, 7854, 1581, 7855, 1581,  // NOLINT
+  7856, 1585, 7857, 1585, 7858, 1589, 7859, 1589, 7860, 1593, 7861, 1593, 7862, 1597, 7863, 1597,  // NOLINT
+  7864, 1601, 7865, 1601, 7866, 1605, 7867, 1605, 7868, 1609, 7869, 1609, 7870, 1613, 7871, 1613,  // NOLINT
+  7872, 1617, 7873, 1617, 7874, 1621, 7875, 1621, 7876, 1625, 7877, 1625, 7878, 1629, 7879, 1629,  // NOLINT
+  7880, 1633, 7881, 1633, 7882, 1637, 7883, 1637, 7884, 1641, 7885, 1641, 7886, 1645, 7887, 1645,  // NOLINT
+  7888, 1649, 7889, 1649, 7890, 1653, 7891, 1653, 7892, 1657, 7893, 1657, 7894, 1661, 7895, 1661,  // NOLINT
+  7896, 1665, 7897, 1665, 7898, 1669, 7899, 1669, 7900, 1673, 7901, 1673, 7902, 1677, 7903, 1677,  // NOLINT
+  7904, 1681, 7905, 1681, 7906, 1685, 7907, 1685, 7908, 1689, 7909, 1689, 7910, 1693, 7911, 1693,  // NOLINT
+  7912, 1697, 7913, 1697, 7914, 1701, 7915, 1701, 7916, 1705, 7917, 1705, 7918, 1709, 7919, 1709,  // NOLINT
+  7920, 1713, 7921, 1713, 7922, 1717, 7923, 1717, 7924, 1721, 7925, 1721, 7926, 1725, 7927, 1725,  // NOLINT
+  7928, 1729, 7929, 1729, 1073749760, 1733, 7943, 1737, 1073749768, 1733, 7951, 1737, 1073749776, 1741, 7957, 1745,  // NOLINT
+  1073749784, 1741, 7965, 1745, 1073749792, 1749, 7975, 1753, 1073749800, 1749, 7983, 1753, 1073749808, 1757, 7991, 1761,  // NOLINT
+  1073749816, 1757, 7999, 1761, 1073749824, 1765, 8005, 1769, 1073749832, 1765, 8013, 1769, 8017, 1773, 8019, 1777,  // NOLINT
+  8021, 1781, 8023, 1785, 8025, 1773, 8027, 1777, 8029, 1781, 8031, 1785, 1073749856, 1789, 8039, 1793,  // NOLINT
+  1073749864, 1789, 8047, 1793, 1073749872, 1797, 8049, 1801, 1073749874, 1805, 8053, 1809, 1073749878, 1813, 8055, 1817,  // NOLINT
+  1073749880, 1821, 8057, 1825, 1073749882, 1829, 8059, 1833, 1073749884, 1837, 8061, 1841, 1073749936, 1845, 8113, 1849,  // NOLINT
+  1073749944, 1845, 8121, 1849, 1073749946, 1797, 8123, 1801, 8126, 697, 1073749960, 1805, 8139, 1809, 1073749968, 1853,  // NOLINT
+  8145, 1857, 1073749976, 1853, 8153, 1857, 1073749978, 1813, 8155, 1817, 1073749984, 1861, 8161, 1865, 8165, 1869,  // NOLINT
+  1073749992, 1861, 8169, 1865, 1073749994, 1829, 8171, 1833, 8172, 1869, 1073750008, 1821, 8185, 1825, 1073750010, 1837,  // NOLINT
+  8187, 1841 };  // NOLINT
+static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 469;  // NOLINT
+static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings1[71] = {  // NOLINT
   {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}}, {{8579, 8580}},  // NOLINT
   {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}}, {{11310, 11358}},  // NOLINT
   {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}}, {{637, 11364}},  // NOLINT
   {{570, 11365}}, {{574, 11366}}, {{11367, 11368}}, {{11369, 11370}},  // NOLINT
-  {{11371, 11372}}, {{593, 11373}}, {{625, 11374}}, {{592, 11375}},  // NOLINT
-  {{594, 11376}}, {{11378, 11379}}, {{11381, 11382}}, {{575, 11390}},  // NOLINT
-  {{576, 11391}}, {{11392, 11393}}, {{11394, 11395}}, {{11396, 11397}},  // NOLINT
-  {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}}, {{11404, 11405}},  // NOLINT
-  {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}}, {{11412, 11413}},  // NOLINT
-  {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}}, {{11420, 11421}},  // NOLINT
-  {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}}, {{11428, 11429}},  // NOLINT
-  {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}}, {{11436, 11437}},  // NOLINT
-  {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}}, {{11444, 11445}},  // NOLINT
-  {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}}, {{11452, 11453}},  // NOLINT
-  {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}}, {{11460, 11461}},  // NOLINT
-  {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}}, {{11468, 11469}},  // NOLINT
-  {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}}, {{11476, 11477}},  // NOLINT
-  {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}}, {{11484, 11485}},  // NOLINT
-  {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}}, {{11499, 11500}},  // NOLINT
-  {{11501, 11502}}, {{11506, 11507}}, {{4256, 11520}}, {{4293, 11557}},  // NOLINT
-  {{4295, 11559}}, {{4301, 11565}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable1Size = 149;  // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable1[298] = {
+  {{11371, 11372}}, {{11381, 11382}}, {{11392, 11393}}, {{11394, 11395}},  // NOLINT
+  {{11396, 11397}}, {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}},  // NOLINT
+  {{11404, 11405}}, {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}},  // NOLINT
+  {{11412, 11413}}, {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}},  // NOLINT
+  {{11420, 11421}}, {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}},  // NOLINT
+  {{11428, 11429}}, {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}},  // NOLINT
+  {{11436, 11437}}, {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}},  // NOLINT
+  {{11444, 11445}}, {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}},  // NOLINT
+  {{11452, 11453}}, {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}},  // NOLINT
+  {{11460, 11461}}, {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}},  // NOLINT
+  {{11468, 11469}}, {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}},  // NOLINT
+  {{11476, 11477}}, {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}},  // NOLINT
+  {{11484, 11485}}, {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}},  // NOLINT
+  {{4256, 11520}}, {{4293, 11557}}, {{kSentinel}} }; // NOLINT
+static const uint16_t kEcma262UnCanonicalizeTable1Size = 133;  // NOLINT
+static const int32_t kEcma262UnCanonicalizeTable1[266] = {
   306, 1, 334, 1, 1073742176, 5, 367, 9, 1073742192, 5, 383, 9, 387, 13, 388, 13,  // NOLINT
   1073743030, 17, 1231, 21, 1073743056, 17, 1257, 21, 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29,  // NOLINT
   3168, 33, 3169, 33, 3170, 37, 3171, 41, 3172, 45, 3173, 49, 3174, 53, 3175, 57,  // NOLINT
-  3176, 57, 3177, 61, 3178, 61, 3179, 65, 3180, 65, 3181, 69, 3182, 73, 3183, 77,  // NOLINT
-  3184, 81, 3186, 85, 3187, 85, 3189, 89, 3190, 89, 1073745022, 93, 3199, 97, 3200, 101,  // NOLINT
-  3201, 101, 3202, 105, 3203, 105, 3204, 109, 3205, 109, 3206, 113, 3207, 113, 3208, 117,  // NOLINT
-  3209, 117, 3210, 121, 3211, 121, 3212, 125, 3213, 125, 3214, 129, 3215, 129, 3216, 133,  // NOLINT
-  3217, 133, 3218, 137, 3219, 137, 3220, 141, 3221, 141, 3222, 145, 3223, 145, 3224, 149,  // NOLINT
-  3225, 149, 3226, 153, 3227, 153, 3228, 157, 3229, 157, 3230, 161, 3231, 161, 3232, 165,  // NOLINT
-  3233, 165, 3234, 169, 3235, 169, 3236, 173, 3237, 173, 3238, 177, 3239, 177, 3240, 181,  // NOLINT
-  3241, 181, 3242, 185, 3243, 185, 3244, 189, 3245, 189, 3246, 193, 3247, 193, 3248, 197,  // NOLINT
-  3249, 197, 3250, 201, 3251, 201, 3252, 205, 3253, 205, 3254, 209, 3255, 209, 3256, 213,  // NOLINT
-  3257, 213, 3258, 217, 3259, 217, 3260, 221, 3261, 221, 3262, 225, 3263, 225, 3264, 229,  // NOLINT
-  3265, 229, 3266, 233, 3267, 233, 3268, 237, 3269, 237, 3270, 241, 3271, 241, 3272, 245,  // NOLINT
-  3273, 245, 3274, 249, 3275, 249, 3276, 253, 3277, 253, 3278, 257, 3279, 257, 3280, 261,  // NOLINT
-  3281, 261, 3282, 265, 3283, 265, 3284, 269, 3285, 269, 3286, 273, 3287, 273, 3288, 277,  // NOLINT
-  3289, 277, 3290, 281, 3291, 281, 3292, 285, 3293, 285, 3294, 289, 3295, 289, 3296, 293,  // NOLINT
-  3297, 293, 3298, 297, 3299, 297, 3307, 301, 3308, 301, 3309, 305, 3310, 305, 3314, 309,  // NOLINT
-  3315, 309, 1073745152, 313, 3365, 317, 3367, 321, 3373, 325 };  // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 83;  // NOLINT
-static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings5[92] = {  // NOLINT
-  {{42560, 42561}}, {{42562, 42563}}, {{42564, 42565}}, {{42566, 42567}},  // NOLINT
-  {{42568, 42569}}, {{42570, 42571}}, {{42572, 42573}}, {{42574, 42575}},  // NOLINT
-  {{42576, 42577}}, {{42578, 42579}}, {{42580, 42581}}, {{42582, 42583}},  // NOLINT
-  {{42584, 42585}}, {{42586, 42587}}, {{42588, 42589}}, {{42590, 42591}},  // NOLINT
-  {{42592, 42593}}, {{42594, 42595}}, {{42596, 42597}}, {{42598, 42599}},  // NOLINT
-  {{42600, 42601}}, {{42602, 42603}}, {{42604, 42605}}, {{42624, 42625}},  // NOLINT
-  {{42626, 42627}}, {{42628, 42629}}, {{42630, 42631}}, {{42632, 42633}},  // NOLINT
-  {{42634, 42635}}, {{42636, 42637}}, {{42638, 42639}}, {{42640, 42641}},  // NOLINT
-  {{42642, 42643}}, {{42644, 42645}}, {{42646, 42647}}, {{42786, 42787}},  // NOLINT
-  {{42788, 42789}}, {{42790, 42791}}, {{42792, 42793}}, {{42794, 42795}},  // NOLINT
-  {{42796, 42797}}, {{42798, 42799}}, {{42802, 42803}}, {{42804, 42805}},  // NOLINT
-  {{42806, 42807}}, {{42808, 42809}}, {{42810, 42811}}, {{42812, 42813}},  // NOLINT
-  {{42814, 42815}}, {{42816, 42817}}, {{42818, 42819}}, {{42820, 42821}},  // NOLINT
-  {{42822, 42823}}, {{42824, 42825}}, {{42826, 42827}}, {{42828, 42829}},  // NOLINT
-  {{42830, 42831}}, {{42832, 42833}}, {{42834, 42835}}, {{42836, 42837}},  // NOLINT
-  {{42838, 42839}}, {{42840, 42841}}, {{42842, 42843}}, {{42844, 42845}},  // NOLINT
-  {{42846, 42847}}, {{42848, 42849}}, {{42850, 42851}}, {{42852, 42853}},  // NOLINT
-  {{42854, 42855}}, {{42856, 42857}}, {{42858, 42859}}, {{42860, 42861}},  // NOLINT
-  {{42862, 42863}}, {{42873, 42874}}, {{42875, 42876}}, {{7545, 42877}},  // NOLINT
-  {{42878, 42879}}, {{42880, 42881}}, {{42882, 42883}}, {{42884, 42885}},  // NOLINT
-  {{42886, 42887}}, {{42891, 42892}}, {{613, 42893}}, {{42896, 42897}},  // NOLINT
-  {{42898, 42899}}, {{42912, 42913}}, {{42914, 42915}}, {{42916, 42917}},  // NOLINT
-  {{42918, 42919}}, {{42920, 42921}}, {{614, 42922}}, {{kSentinel}} }; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable5Size = 179;  // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable5[358] = {
-  1600, 1, 1601, 1, 1602, 5, 1603, 5, 1604, 9, 1605, 9, 1606, 13, 1607, 13,  // NOLINT
-  1608, 17, 1609, 17, 1610, 21, 1611, 21, 1612, 25, 1613, 25, 1614, 29, 1615, 29,  // NOLINT
-  1616, 33, 1617, 33, 1618, 37, 1619, 37, 1620, 41, 1621, 41, 1622, 45, 1623, 45,  // NOLINT
-  1624, 49, 1625, 49, 1626, 53, 1627, 53, 1628, 57, 1629, 57, 1630, 61, 1631, 61,  // NOLINT
-  1632, 65, 1633, 65, 1634, 69, 1635, 69, 1636, 73, 1637, 73, 1638, 77, 1639, 77,  // NOLINT
-  1640, 81, 1641, 81, 1642, 85, 1643, 85, 1644, 89, 1645, 89, 1664, 93, 1665, 93,  // NOLINT
-  1666, 97, 1667, 97, 1668, 101, 1669, 101, 1670, 105, 1671, 105, 1672, 109, 1673, 109,  // NOLINT
-  1674, 113, 1675, 113, 1676, 117, 1677, 117, 1678, 121, 1679, 121, 1680, 125, 1681, 125,  // NOLINT
-  1682, 129, 1683, 129, 1684, 133, 1685, 133, 1686, 137, 1687, 137, 1826, 141, 1827, 141,  // NOLINT
-  1828, 145, 1829, 145, 1830, 149, 1831, 149, 1832, 153, 1833, 153, 1834, 157, 1835, 157,  // NOLINT
-  1836, 161, 1837, 161, 1838, 165, 1839, 165, 1842, 169, 1843, 169, 1844, 173, 1845, 173,  // NOLINT
-  1846, 177, 1847, 177, 1848, 181, 1849, 181, 1850, 185, 1851, 185, 1852, 189, 1853, 189,  // NOLINT
-  1854, 193, 1855, 193, 1856, 197, 1857, 197, 1858, 201, 1859, 201, 1860, 205, 1861, 205,  // NOLINT
-  1862, 209, 1863, 209, 1864, 213, 1865, 213, 1866, 217, 1867, 217, 1868, 221, 1869, 221,  // NOLINT
-  1870, 225, 1871, 225, 1872, 229, 1873, 229, 1874, 233, 1875, 233, 1876, 237, 1877, 237,  // NOLINT
-  1878, 241, 1879, 241, 1880, 245, 1881, 245, 1882, 249, 1883, 249, 1884, 253, 1885, 253,  // NOLINT
-  1886, 257, 1887, 257, 1888, 261, 1889, 261, 1890, 265, 1891, 265, 1892, 269, 1893, 269,  // NOLINT
-  1894, 273, 1895, 273, 1896, 277, 1897, 277, 1898, 281, 1899, 281, 1900, 285, 1901, 285,  // NOLINT
-  1902, 289, 1903, 289, 1913, 293, 1914, 293, 1915, 297, 1916, 297, 1917, 301, 1918, 305,  // NOLINT
-  1919, 305, 1920, 309, 1921, 309, 1922, 313, 1923, 313, 1924, 317, 1925, 317, 1926, 321,  // NOLINT
-  1927, 321, 1931, 325, 1932, 325, 1933, 329, 1936, 333, 1937, 333, 1938, 337, 1939, 337,  // NOLINT
-  1952, 341, 1953, 341, 1954, 345, 1955, 345, 1956, 349, 1957, 349, 1958, 353, 1959, 353,  // NOLINT
-  1960, 357, 1961, 357, 1962, 361 };  // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings5Size = 92;  // NOLINT
+  3176, 57, 3177, 61, 3178, 61, 3179, 65, 3180, 65, 3189, 69, 3190, 69, 3200, 73,  // NOLINT
+  3201, 73, 3202, 77, 3203, 77, 3204, 81, 3205, 81, 3206, 85, 3207, 85, 3208, 89,  // NOLINT
+  3209, 89, 3210, 93, 3211, 93, 3212, 97, 3213, 97, 3214, 101, 3215, 101, 3216, 105,  // NOLINT
+  3217, 105, 3218, 109, 3219, 109, 3220, 113, 3221, 113, 3222, 117, 3223, 117, 3224, 121,  // NOLINT
+  3225, 121, 3226, 125, 3227, 125, 3228, 129, 3229, 129, 3230, 133, 3231, 133, 3232, 137,  // NOLINT
+  3233, 137, 3234, 141, 3235, 141, 3236, 145, 3237, 145, 3238, 149, 3239, 149, 3240, 153,  // NOLINT
+  3241, 153, 3242, 157, 3243, 157, 3244, 161, 3245, 161, 3246, 165, 3247, 165, 3248, 169,  // NOLINT
+  3249, 169, 3250, 173, 3251, 173, 3252, 177, 3253, 177, 3254, 181, 3255, 181, 3256, 185,  // NOLINT
+  3257, 185, 3258, 189, 3259, 189, 3260, 193, 3261, 193, 3262, 197, 3263, 197, 3264, 201,  // NOLINT
+  3265, 201, 3266, 205, 3267, 205, 3268, 209, 3269, 209, 3270, 213, 3271, 213, 3272, 217,  // NOLINT
+  3273, 217, 3274, 221, 3275, 221, 3276, 225, 3277, 225, 3278, 229, 3279, 229, 3280, 233,  // NOLINT
+  3281, 233, 3282, 237, 3283, 237, 3284, 241, 3285, 241, 3286, 245, 3287, 245, 3288, 249,  // NOLINT
+  3289, 249, 3290, 253, 3291, 253, 3292, 257, 3293, 257, 3294, 261, 3295, 261, 3296, 265,  // NOLINT
+  3297, 265, 3298, 269, 3299, 269, 1073745152, 273, 3365, 277 };  // NOLINT
+static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 71;  // NOLINT
 static const MultiCharacterSpecialCase<2> kEcma262UnCanonicalizeMultiStrings7[3] = {  // NOLINT
   {{65313, 65345}}, {{65338, 65370}}, {{kSentinel}} }; // NOLINT
 static const uint16_t kEcma262UnCanonicalizeTable7Size = 4;  // NOLINT
@@ -1735,13 +1502,6 @@
                                            n,
                                            result,
                                            allow_caching_ptr);
-    case 5: return LookupMapping<true>(kEcma262UnCanonicalizeTable5,
-                                           kEcma262UnCanonicalizeTable5Size,
-                                           kEcma262UnCanonicalizeMultiStrings5,
-                                           c,
-                                           n,
-                                           result,
-                                           allow_caching_ptr);
     case 7: return LookupMapping<true>(kEcma262UnCanonicalizeTable7,
                                            kEcma262UnCanonicalizeTable7Size,
                                            kEcma262UnCanonicalizeMultiStrings7,
@@ -1817,11 +1577,9 @@
 int UnicodeData::GetByteCount() {
   return kUppercaseTable0Size * sizeof(int32_t)  // NOLINT
       + kUppercaseTable1Size * sizeof(int32_t)  // NOLINT
-      + kUppercaseTable5Size * sizeof(int32_t)  // NOLINT
       + kUppercaseTable7Size * sizeof(int32_t)  // NOLINT
       + kLowercaseTable0Size * sizeof(int32_t)  // NOLINT
       + kLowercaseTable1Size * sizeof(int32_t)  // NOLINT
-      + kLowercaseTable5Size * sizeof(int32_t)  // NOLINT
       + kLowercaseTable7Size * sizeof(int32_t)  // NOLINT
       + kLetterTable0Size * sizeof(int32_t)  // NOLINT
       + kLetterTable1Size * sizeof(int32_t)  // NOLINT
@@ -1834,7 +1592,6 @@
       + kSpaceTable0Size * sizeof(int32_t)  // NOLINT
       + kSpaceTable1Size * sizeof(int32_t)  // NOLINT
       + kNumberTable0Size * sizeof(int32_t)  // NOLINT
-      + kNumberTable5Size * sizeof(int32_t)  // NOLINT
       + kNumberTable7Size * sizeof(int32_t)  // NOLINT
       + kWhiteSpaceTable0Size * sizeof(int32_t)  // NOLINT
       + kWhiteSpaceTable1Size * sizeof(int32_t)  // NOLINT
@@ -1849,19 +1606,15 @@
       + kConnectorPunctuationTable7Size * sizeof(int32_t)  // NOLINT
       + kToLowercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
       + kToLowercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
-      + kToLowercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kToLowercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kToUppercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<3>)  // NOLINT
       + kToUppercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
-      + kToUppercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kToUppercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<3>)  // NOLINT
       + kEcma262CanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kEcma262CanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
-      + kEcma262CanonicalizeMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kEcma262CanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kEcma262UnCanonicalizeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<4>)  // NOLINT
       + kEcma262UnCanonicalizeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
-      + kEcma262UnCanonicalizeMultiStrings5Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
       + kEcma262UnCanonicalizeMultiStrings7Size * sizeof(MultiCharacterSpecialCase<2>)  // NOLINT
       + kCanonicalizationRangeMultiStrings0Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
       + kCanonicalizationRangeMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>)  // NOLINT
diff --git a/src/unicode.h b/src/unicode.h
index 94ab1b4..39fc349 100644
--- a/src/unicode.h
+++ b/src/unicode.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,7 +44,7 @@
  * The max length of the result of converting the case of a single
  * character.
  */
-const int kMaxMappingSize = 4;
+static const int kMaxMappingSize = 4;
 
 template <class T, int size = 256>
 class Predicate {
@@ -100,7 +100,7 @@
   static const uchar kMaxCodePoint;
 };
 
-// --- U t f   8   a n d   16 ---
+// --- U t f   8 ---
 
 template <typename Data>
 class Buffer {
@@ -114,46 +114,10 @@
   unsigned length_;
 };
 
-
-class Utf16 {
- public:
-  static inline bool IsLeadSurrogate(int code) {
-    if (code == kNoPreviousCharacter) return false;
-    return (code & 0xfc00) == 0xd800;
-  }
-  static inline bool IsTrailSurrogate(int code) {
-    if (code == kNoPreviousCharacter) return false;
-    return (code & 0xfc00) == 0xdc00;
-  }
-
-  static inline int CombineSurrogatePair(uchar lead, uchar trail) {
-    return 0x10000 + ((lead & 0x3ff) << 10) + (trail & 0x3ff);
-  }
-  static const int kNoPreviousCharacter = -1;
-  static const uchar kMaxNonSurrogateCharCode = 0xffff;
-  // Encoding a single UTF-16 code unit will produce 1, 2 or 3 bytes
-  // of UTF-8 data.  The special case where the unit is a surrogate
-  // trail produces 1 byte net, because the encoding of the pair is
-  // 4 bytes and the 3 bytes that were used to encode the lead surrogate
-  // can be reclaimed.
-  static const int kMaxExtraUtf8BytesForOneUtf16CodeUnit = 3;
-  // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
-  // The illegality stems from the surrogate not being part of a pair.
-  static const int kUtf8BytesToCodeASurrogate = 3;
-  static inline uchar LeadSurrogate(int char_code) {
-    return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff);
-  }
-  static inline uchar TrailSurrogate(int char_code) {
-    return 0xdc00 + (char_code & 0x3ff);
-  }
-};
-
-
 class Utf8 {
  public:
-  static inline uchar Length(uchar chr, int previous);
-  static inline unsigned Encode(
-      char* out, uchar c, int previous);
+  static inline uchar Length(uchar chr);
+  static inline unsigned Encode(char* out, uchar c);
   static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
       unsigned capacity, unsigned* chars_read, unsigned* offset);
   static uchar CalculateValue(const byte* str,
@@ -166,11 +130,6 @@
   static const unsigned kMaxThreeByteChar = 0xffff;
   static const unsigned kMaxFourByteChar  = 0x1fffff;
 
-  // A single surrogate is coded as a 3 byte UTF-8 sequence, but two together
-  // that match are coded as a 4 byte UTF-8 sequence.
-  static const unsigned kBytesSavedByCombiningSurrogates = 2;
-  static const unsigned kSizeOfUnmatchedSurrogate = 3;
-
  private:
   template <unsigned s> friend class Utf8InputBuffer;
   friend class Test;
@@ -188,7 +147,6 @@
   // Note that default implementation is not efficient.
   virtual void Seek(unsigned);
   unsigned Length();
-  unsigned Utf16Length();
   virtual ~CharacterStream() { }
   static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
       unsigned& offset);
@@ -198,7 +156,6 @@
       unsigned capacity, unsigned& offset);
   static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
   virtual void Rewind() = 0;
-
  protected:
   virtual void FillBuffer() = 0;
   // The number of characters left in the current buffer
diff --git a/src/uri.js b/src/uri.js
index b195f3d..c910d75 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -111,59 +111,47 @@
     var o1 = octets[1];
     if (o0 < 0xe0) {
       var a = o0 & 0x1f;
-      if ((o1 < 0x80) || (o1 > 0xbf)) {
+      if ((o1 < 0x80) || (o1 > 0xbf))
         throw new $URIError("URI malformed");
-      }
       var b = o1 & 0x3f;
       value = (a << 6) + b;
-      if (value < 0x80 || value > 0x7ff) {
+      if (value < 0x80 || value > 0x7ff)
         throw new $URIError("URI malformed");
-      }
     } else {
       var o2 = octets[2];
       if (o0 < 0xf0) {
         var a = o0 & 0x0f;
-        if ((o1 < 0x80) || (o1 > 0xbf)) {
+        if ((o1 < 0x80) || (o1 > 0xbf))
           throw new $URIError("URI malformed");
-        }
         var b = o1 & 0x3f;
-        if ((o2 < 0x80) || (o2 > 0xbf)) {
+        if ((o2 < 0x80) || (o2 > 0xbf))
           throw new $URIError("URI malformed");
-        }
         var c = o2 & 0x3f;
         value = (a << 12) + (b << 6) + c;
-        if ((value < 0x800) || (value > 0xffff)) {
+        if ((value < 0x800) || (value > 0xffff))
           throw new $URIError("URI malformed");
-        }
       } else {
         var o3 = octets[3];
         if (o0 < 0xf8) {
           var a = (o0 & 0x07);
-          if ((o1 < 0x80) || (o1 > 0xbf)) {
+          if ((o1 < 0x80) || (o1 > 0xbf))
             throw new $URIError("URI malformed");
-          }
           var b = (o1 & 0x3f);
-          if ((o2 < 0x80) || (o2 > 0xbf)) {
+          if ((o2 < 0x80) || (o2 > 0xbf))
             throw new $URIError("URI malformed");
-          }
           var c = (o2 & 0x3f);
-          if ((o3 < 0x80) || (o3 > 0xbf)) {
+          if ((o3 < 0x80) || (o3 > 0xbf))
             throw new $URIError("URI malformed");
-          }
           var d = (o3 & 0x3f);
           value = (a << 18) + (b << 12) + (c << 6) + d;
-          if ((value < 0x10000) || (value > 0x10ffff)) {
+          if ((value < 0x10000) || (value > 0x10ffff))
             throw new $URIError("URI malformed");
-          }
         } else {
           throw new $URIError("URI malformed");
         }
       }
     }
   }
-  if (0xD800 <= value && value <= 0xDFFF) {
-    throw new $URIError("URI malformed");
-  }
   if (value < 0x10000) {
     result[index++] = value;
     return index;
@@ -219,15 +207,14 @@
       var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
       if (cc >> 7) {
         var n = 0;
-        while (((cc << ++n) & 0x80) != 0) { }
+        while (((cc << ++n) & 0x80) != 0) ;
         if (n == 1 || n > 4) throw new $URIError("URI malformed");
         var octets = new $Array(n);
         octets[0] = cc;
         if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
         for (var i = 1; i < n; i++) {
           if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
-          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
-                                            uri.charCodeAt(++k));
+          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
         }
         index = URIDecodeOctets(octets, result, index);
       } else {
@@ -250,7 +237,7 @@
 
 // ECMA-262 - 15.1.3.1.
 function URIDecode(uri) {
-  var reservedPredicate = function(cc) {
+  function reservedPredicate(cc) {
     // #$
     if (35 <= cc && cc <= 36) return true;
     // &
@@ -275,7 +262,7 @@
 
 // ECMA-262 - 15.1.3.2.
 function URIDecodeComponent(component) {
-  var reservedPredicate = function(cc) { return false; };
+  function reservedPredicate(cc) { return false; };
   var string = ToString(component);
   return Decode(string, reservedPredicate);
 }
@@ -296,7 +283,7 @@
 
 // ECMA-262 - 15.1.3.3.
 function URIEncode(uri) {
-  var unescapePredicate = function(cc) {
+  function unescapePredicate(cc) {
     if (isAlphaNumeric(cc)) return true;
     // !
     if (cc == 33) return true;
@@ -325,7 +312,7 @@
 
 // ECMA-262 - 15.1.3.4
 function URIEncodeComponent(component) {
-  var unescapePredicate = function(cc) {
+  function unescapePredicate(cc) {
     if (isAlphaNumeric(cc)) return true;
     // !
     if (cc == 33) return true;
@@ -379,9 +366,7 @@
 function IsValidHex(s) {
   for (var i = 0; i < s.length; ++i) {
     var cc = s.charCodeAt(i);
-    if ((48 <= cc && cc <= 57) ||
-        (65 <= cc && cc <= 70) ||
-        (97 <= cc && cc <= 102)) {
+    if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
       // '0'..'9', 'A'..'F' and 'a' .. 'f'.
     } else {
       return false;
diff --git a/src/utils.h b/src/utils.h
index 1d40c98..cf7819e 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,13 +47,13 @@
 // Returns true iff x is a power of 2 (or zero). Cannot be used with the
 // maximally negative value of the type T (the -1 overflows).
 template <typename T>
-inline bool IsPowerOf2(T x) {
+static inline bool IsPowerOf2(T x) {
   return IS_POWER_OF_TWO(x);
 }
 
 
 // X must be a power of 2.  Returns the number of trailing zeros.
-inline int WhichPowerOf2(uint32_t x) {
+static inline int WhichPowerOf2(uint32_t x) {
   ASSERT(IsPowerOf2(x));
   ASSERT(x != 0);
   int bits = 0;
@@ -88,7 +88,7 @@
 // The C++ standard leaves the semantics of '>>' undefined for
 // negative signed operands. Most implementations do the right thing,
 // though.
-inline int ArithmeticShiftRight(int x, int s) {
+static inline int ArithmeticShiftRight(int x, int s) {
   return x >> s;
 }
 
@@ -97,7 +97,7 @@
 // This allows conversion of Addresses and integral types into
 // 0-relative int offsets.
 template <typename T>
-inline intptr_t OffsetFrom(T x) {
+static inline intptr_t OffsetFrom(T x) {
   return x - static_cast<T>(0);
 }
 
@@ -106,14 +106,14 @@
 // This allows conversion of 0-relative int offsets into Addresses and
 // integral types.
 template <typename T>
-inline T AddressFrom(intptr_t x) {
+static inline T AddressFrom(intptr_t x) {
   return static_cast<T>(static_cast<T>(0) + x);
 }
 
 
 // Return the largest multiple of m which is <= x.
 template <typename T>
-inline T RoundDown(T x, intptr_t m) {
+static inline T RoundDown(T x, int m) {
   ASSERT(IsPowerOf2(m));
   return AddressFrom<T>(OffsetFrom(x) & -m);
 }
@@ -121,13 +121,13 @@
 
 // Return the smallest multiple of m which is >= x.
 template <typename T>
-inline T RoundUp(T x, intptr_t m) {
-  return RoundDown<T>(static_cast<T>(x + m - 1), m);
+static inline T RoundUp(T x, int m) {
+  return RoundDown(x + m - 1, m);
 }
 
 
 template <typename T>
-int Compare(const T& a, const T& b) {
+static int Compare(const T& a, const T& b) {
   if (a == b)
     return 0;
   else if (a < b)
@@ -138,26 +138,16 @@
 
 
 template <typename T>
-int PointerValueCompare(const T* a, const T* b) {
+static int PointerValueCompare(const T* a, const T* b) {
   return Compare<T>(*a, *b);
 }
 
 
-// Compare function to compare the object pointer value of two
-// handlified objects. The handles are passed as pointers to the
-// handles.
-template<typename T> class Handle;  // Forward declaration.
-template <typename T>
-int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
-  return Compare<T*>(*(*a), *(*b));
-}
-
-
 // Returns the smallest power of two which is >= x. If you pass in a
 // number that is already a power of two, it is returned as is.
 // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
 // figure 3-3, page 48, where the function is called clp2.
-inline uint32_t RoundUpToPowerOf2(uint32_t x) {
+static inline uint32_t RoundUpToPowerOf2(uint32_t x) {
   ASSERT(x <= 0x80000000u);
   x = x - 1;
   x = x | (x >> 1);
@@ -169,23 +159,18 @@
 }
 
 
-inline uint32_t RoundDownToPowerOf2(uint32_t x) {
-  uint32_t rounded_up = RoundUpToPowerOf2(x);
-  if (rounded_up > x) return rounded_up >> 1;
-  return rounded_up;
-}
 
-
-template <typename T, typename U>
-inline bool IsAligned(T value, U alignment) {
+template <typename T>
+static inline bool IsAligned(T value, T alignment) {
+  ASSERT(IsPowerOf2(alignment));
   return (value & (alignment - 1)) == 0;
 }
 
 
 // Returns true if (addr + offset) is aligned.
-inline bool IsAddressAligned(Address addr,
-                             intptr_t alignment,
-                             int offset = 0) {
+static inline bool IsAddressAligned(Address addr,
+                                    intptr_t alignment,
+                                    int offset) {
   intptr_t offs = OffsetFrom(addr + offset);
   return IsAligned(offs, alignment);
 }
@@ -193,14 +178,14 @@
 
 // Returns the maximum of the two parameters.
 template <typename T>
-T Max(T a, T b) {
+static T Max(T a, T b) {
   return a < b ? b : a;
 }
 
 
 // Returns the minimum of the two parameters.
 template <typename T>
-T Min(T a, T b) {
+static T Min(T a, T b) {
   return a < b ? a : b;
 }
 
@@ -256,7 +241,7 @@
 
 // Thomas Wang, Integer Hash Functions.
 // http://www.concentric.net/~Ttwang/tech/inthash.htm
-inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
+static inline uint32_t ComputeIntegerHash(uint32_t key, uint32_t seed) {
   uint32_t hash = key;
   hash = hash ^ seed;
   hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
@@ -269,19 +254,7 @@
 }
 
 
-inline uint32_t ComputeLongHash(uint64_t key) {
-  uint64_t hash = key;
-  hash = ~hash + (hash << 18);  // hash = (hash << 18) - hash - 1;
-  hash = hash ^ (hash >> 31);
-  hash = hash * 21;  // hash = (hash + (hash << 2)) + (hash << 4);
-  hash = hash ^ (hash >> 11);
-  hash = hash + (hash << 6);
-  hash = hash ^ (hash >> 22);
-  return (uint32_t) hash;
-}
-
-
-inline uint32_t ComputePointerHash(void* ptr) {
+static inline uint32_t ComputePointerHash(void* ptr) {
   return ComputeIntegerHash(
       static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)),
       v8::internal::kZeroHashSeed);
@@ -738,7 +711,7 @@
 
 // Compare ASCII/16bit chars to ASCII/16bit chars.
 template <typename lchar, typename rchar>
-inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
   const lchar* limit = lhs + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*lhs) == sizeof(*rhs)) {
@@ -765,7 +738,7 @@
 
 
 // Calculate 10^exponent.
-inline int TenToThe(int exponent) {
+static inline int TenToThe(int exponent) {
   ASSERT(exponent <= 9);
   ASSERT(exponent >= 1);
   int answer = 10;
@@ -931,17 +904,9 @@
   explicit EnumSet(T bits = 0) : bits_(bits) {}
   bool IsEmpty() const { return bits_ == 0; }
   bool Contains(E element) const { return (bits_ & Mask(element)) != 0; }
-  bool ContainsAnyOf(const EnumSet& set) const {
-    return (bits_ & set.bits_) != 0;
-  }
   void Add(E element) { bits_ |= Mask(element); }
-  void Add(const EnumSet& set) { bits_ |= set.bits_; }
   void Remove(E element) { bits_ &= ~Mask(element); }
-  void Remove(const EnumSet& set) { bits_ &= ~set.bits_; }
-  void RemoveAll() { bits_ = 0; }
-  void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
   T ToIntegral() const { return bits_; }
-  bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
 
  private:
   T Mask(E element) const {
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 6db9c77..2de8303 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -107,10 +107,7 @@
   SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)      \
   /* Number of code objects found from pc. */                         \
   SC(pc_to_code, V8.PcToCode)                                         \
-  SC(pc_to_code_cached, V8.PcToCodeCached)                            \
-  /* The store-buffer implementation of the write barrier. */         \
-  SC(store_buffer_compactions, V8.StoreBufferCompactions)             \
-  SC(store_buffer_overflows, V8.StoreBufferOverflows)
+  SC(pc_to_code_cached, V8.PcToCodeCached)
 
 
 #define STATS_COUNTER_LIST_2(SC)                                      \
@@ -129,6 +126,10 @@
      V8.GCCompactorCausedByWeakHandles)                               \
   SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
   SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
+  SC(map_to_fast_elements, V8.MapToFastElements)                      \
+  SC(map_to_fast_double_elements, V8.MapToFastDoubleElements)         \
+  SC(map_to_slow_elements, V8.MapToSlowElements)                      \
+  SC(map_to_external_array_elements, V8.MapToExternalArrayElements)   \
   /* How is the generic keyed-load stub used? */                      \
   SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                  \
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
@@ -198,9 +199,6 @@
   SC(constructed_objects_stub, V8.ConstructedObjectsStub)             \
   SC(negative_lookups, V8.NegativeLookups)                            \
   SC(negative_lookups_miss, V8.NegativeLookupsMiss)                   \
-  SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes)    \
-  SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses)    \
-  SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates)  \
   SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
   SC(array_function_native, V8.ArrayFunctionNative)                   \
   SC(for_in, V8.ForIn)                                                \
diff --git a/src/v8.cc b/src/v8.cc
index 506f3f6..1e9b5dc 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -36,32 +36,26 @@
 #include "hydrogen.h"
 #include "lithium-allocator.h"
 #include "log.h"
-#include "once.h"
-#include "platform.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
-#include "store-buffer.h"
 
 namespace v8 {
 namespace internal {
 
-V8_DECLARE_ONCE(init_once);
+static Mutex* init_once_mutex = OS::CreateMutex();
+static bool init_once_called = false;
 
 bool V8::is_running_ = false;
-bool V8::has_been_set_up_ = false;
+bool V8::has_been_setup_ = false;
 bool V8::has_been_disposed_ = false;
 bool V8::has_fatal_error_ = false;
 bool V8::use_crankshaft_ = true;
-List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
 
-static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-
+static Mutex* entropy_mutex = OS::CreateMutex();
 static EntropySource entropy_source;
 
 
 bool V8::Initialize(Deserializer* des) {
-  FlagList::EnforceFlagImplications();
-
   InitializeOncePerProcess();
 
   // The current thread may not yet had entered an isolate to run.
@@ -84,7 +78,7 @@
   if (isolate->IsInitialized()) return true;
 
   is_running_ = true;
-  has_been_set_up_ = true;
+  has_been_setup_ = true;
   has_fatal_error_ = false;
   has_been_disposed_ = false;
 
@@ -102,14 +96,11 @@
   Isolate* isolate = Isolate::Current();
   ASSERT(isolate->IsDefaultIsolate());
 
-  if (!has_been_set_up_ || has_been_disposed_) return;
+  if (!has_been_setup_ || has_been_disposed_) return;
   isolate->TearDown();
 
   is_running_ = false;
   has_been_disposed_ = true;
-
-  delete call_completed_callbacks_;
-  call_completed_callbacks_ = NULL;
 }
 
 
@@ -119,7 +110,7 @@
       state[i] = FLAG_random_seed;
     } else if (entropy_source != NULL) {
       uint32_t val;
-      ScopedLock lock(entropy_mutex.Pointer());
+      ScopedLock lock(entropy_mutex);
       entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
       state[i] = val;
     } else {
@@ -148,17 +139,10 @@
 }
 
 
-void V8::SetReturnAddressLocationResolver(
-      ReturnAddressLocationResolver resolver) {
-  StackFrame::SetReturnAddressLocationResolver(resolver);
-}
-
-
 // Used by JavaScript APIs
-uint32_t V8::Random(Context* context) {
-  ASSERT(context->IsGlobalContext());
-  ByteArray* seed = context->random_seed();
-  return random_base(reinterpret_cast<uint32_t*>(seed->GetDataStartAddress()));
+uint32_t V8::Random(Isolate* isolate) {
+  ASSERT(isolate == Isolate::Current());
+  return random_base(isolate->random_seed());
 }
 
 
@@ -171,48 +155,13 @@
 }
 
 
-bool V8::IdleNotification(int hint) {
+bool V8::IdleNotification() {
   // Returning true tells the caller that there is no need to call
   // IdleNotification again.
   if (!FLAG_use_idle_notification) return true;
 
   // Tell the heap that it may want to adjust.
-  return HEAP->IdleNotification(hint);
-}
-
-
-void V8::AddCallCompletedCallback(CallCompletedCallback callback) {
-  if (call_completed_callbacks_ == NULL) {  // Lazy init.
-    call_completed_callbacks_ = new List<CallCompletedCallback>();
-  }
-  for (int i = 0; i < call_completed_callbacks_->length(); i++) {
-    if (callback == call_completed_callbacks_->at(i)) return;
-  }
-  call_completed_callbacks_->Add(callback);
-}
-
-
-void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
-  if (call_completed_callbacks_ == NULL) return;
-  for (int i = 0; i < call_completed_callbacks_->length(); i++) {
-    if (callback == call_completed_callbacks_->at(i)) {
-      call_completed_callbacks_->Remove(i);
-    }
-  }
-}
-
-
-void V8::FireCallCompletedCallback(Isolate* isolate) {
-  if (call_completed_callbacks_ == NULL) return;
-  HandleScopeImplementer* handle_scope_implementer =
-      isolate->handle_scope_implementer();
-  if (!handle_scope_implementer->CallDepthIsZero()) return;
-  // Fire callbacks.  Increase call depth to prevent recursive callbacks.
-  handle_scope_implementer->IncrementCallDepth();
-  for (int i = 0; i < call_completed_callbacks_->length(); i++) {
-    call_completed_callbacks_->at(i)();
-  }
-  handle_scope_implementer->DecrementCallDepth();
+  return HEAP->IdleNotification();
 }
 
 
@@ -223,25 +172,31 @@
 } double_int_union;
 
 
-Object* V8::FillHeapNumberWithRandom(Object* heap_number,
-                                     Context* context) {
-  double_int_union r;
-  uint64_t random_bits = Random(context);
+Object* V8::FillHeapNumberWithRandom(Object* heap_number, Isolate* isolate) {
+  uint64_t random_bits = Random(isolate);
+  // Make a double* from address (heap_number + sizeof(double)).
+  double_int_union* r = reinterpret_cast<double_int_union*>(
+      reinterpret_cast<char*>(heap_number) +
+      HeapNumber::kValueOffset - kHeapObjectTag);
   // Convert 32 random bits to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  static const double binary_million = 1048576.0;
-  r.double_value = binary_million;
-  r.uint64_t_value |= random_bits;
-  r.double_value -= binary_million;
+  const double binary_million = 1048576.0;
+  r->double_value = binary_million;
+  r->uint64_t_value |=  random_bits;
+  r->double_value -= binary_million;
 
-  HeapNumber::cast(heap_number)->set_value(r.double_value);
   return heap_number;
 }
 
-void V8::InitializeOncePerProcessImpl() {
-  // Set up the platform OS support.
-  OS::SetUp();
+
+void V8::InitializeOncePerProcess() {
+  ScopedLock lock(init_once_mutex);
+  if (init_once_called) return;
+  init_once_called = true;
+
+  // Setup the platform OS support.
+  OS::Setup();
 
   use_crankshaft_ = FLAG_crankshaft;
 
@@ -249,26 +204,17 @@
     use_crankshaft_ = false;
   }
 
-  CPU::SetUp();
+  CPU::Setup();
   if (!CPU::SupportsCrankshaft()) {
     use_crankshaft_ = false;
   }
 
   RuntimeProfiler::GlobalSetup();
 
+  // Peephole optimization might interfere with deoptimization.
+  FLAG_peephole_optimization = !use_crankshaft_;
+
   ElementsAccessor::InitializeOncePerProcess();
-
-  if (FLAG_stress_compaction) {
-    FLAG_force_marking_deque_overflows = true;
-    FLAG_gc_global = true;
-    FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
-  }
-
-  LOperand::SetUpCaches();
-}
-
-void V8::InitializeOncePerProcess() {
-  CallOnce(&init_once, &InitializeOncePerProcessImpl);
 }
 
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index 59ce602..e565ca5 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -60,11 +60,10 @@
 #include "objects-inl.h"
 #include "spaces-inl.h"
 #include "heap-inl.h"
-#include "incremental-marking-inl.h"
-#include "mark-compact-inl.h"
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
+#include "isolate-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -95,34 +94,26 @@
   // Allows an entropy source to be provided for use in random number
   // generation.
   static void SetEntropySource(EntropySource source);
-  // Support for return-address rewriting profilers.
-  static void SetReturnAddressLocationResolver(
-      ReturnAddressLocationResolver resolver);
   // Random number generation support. Not cryptographically safe.
-  static uint32_t Random(Context* context);
+  static uint32_t Random(Isolate* isolate);
   // We use random numbers internally in memory allocation and in the
   // compilers for security. In order to prevent information leaks we
   // use a separate random state for internal random number
   // generation.
   static uint32_t RandomPrivate(Isolate* isolate);
   static Object* FillHeapNumberWithRandom(Object* heap_number,
-                                          Context* context);
+                                          Isolate* isolate);
 
   // Idle notification directly from the API.
-  static bool IdleNotification(int hint);
-
-  static void AddCallCompletedCallback(CallCompletedCallback callback);
-  static void RemoveCallCompletedCallback(CallCompletedCallback callback);
-  static void FireCallCompletedCallback(Isolate* isolate);
+  static bool IdleNotification();
 
  private:
-  static void InitializeOncePerProcessImpl();
   static void InitializeOncePerProcess();
 
   // True if engine is currently running
   static bool is_running_;
   // True if V8 has ever been run
-  static bool has_been_set_up_;
+  static bool has_been_setup_;
   // True if error has been signaled for current engine
   // (reset to false if engine is restarted)
   static bool has_fatal_error_;
@@ -131,19 +122,8 @@
   static bool has_been_disposed_;
   // True if we are using the crankshaft optimizing compiler.
   static bool use_crankshaft_;
-  // List of callbacks when a Call completes.
-  static List<CallCompletedCallback>* call_completed_callbacks_;
 };
 
-
-// JavaScript defines two kinds of 'nil'.
-enum NilValue { kNullValue, kUndefinedValue };
-
-
-// JavaScript defines two kinds of equality.
-enum EqualityKind { kStrictEquality, kNonStrictEquality };
-
-
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/src/v8conversions.h b/src/v8conversions.h
index 0147d8c..1840e3a 100644
--- a/src/v8conversions.h
+++ b/src/v8conversions.h
@@ -34,13 +34,13 @@
 namespace internal {
 
 // Convert from Number object to C integer.
-inline int32_t NumberToInt32(Object* number) {
+static inline int32_t NumberToInt32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToInt32(number->Number());
 }
 
 
-inline uint32_t NumberToUint32(Object* number) {
+static inline uint32_t NumberToUint32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToUint32(number->Number());
 }
diff --git a/src/v8globals.h b/src/v8globals.h
index bfc5e23..bf843e5 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,6 @@
 #define V8_V8GLOBALS_H_
 
 #include "globals.h"
-#include "checks.h"
 
 namespace v8 {
 namespace internal {
@@ -80,20 +79,18 @@
     reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
 const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
 const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
-const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
 #else
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
 const uint32_t kSlotsZapValue = 0xbeefdeef;
 const uint32_t kDebugZapValue = 0xbadbaddb;
-const uint32_t kFreeListZapValue = 0xfeed1eaf;
 #endif
 
 
-// Number of bits to represent the page size for paged spaces. The value of 20
-// gives 1Mb bytes per page.
-const int kPageSizeBits = 20;
+// Number of bits to represent the page size for paged spaces. The value of 13
+// gives 8K bytes per page.
+const int kPageSizeBits = 13;
 
 // On Intel architecture, cache line size is 64 bytes.
 // On ARM it may be less (32 bytes), but as far this constant is
@@ -101,18 +98,24 @@
 const int kProcessorCacheLineSize = 64;
 
 // Constants relevant to double precision floating point numbers.
+
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
 
 
 // -----------------------------------------------------------------------------
 // Forward declarations for frequently used classes
+// (sorted alphabetically)
 
 class AccessorInfo;
 class Allocation;
 class Arguments;
 class Assembler;
 class AssertNoAllocation;
+class BreakableStatement;
 class Code;
 class CodeGenerator;
 class CodeStub;
@@ -122,10 +125,12 @@
 class DebugInfo;
 class Descriptor;
 class DescriptorArray;
+class Expression;
 class ExternalReference;
 class FixedArray;
+class FunctionEntry;
+class FunctionLiteral;
 class FunctionTemplateInfo;
-class MemoryChunk;
 class SeededNumberDictionary;
 class UnseededNumberDictionary;
 class StringDictionary;
@@ -134,6 +139,7 @@
 class HeapObject;
 class IC;
 class InterceptorInfo;
+class IterationStatement;
 class JSArray;
 class JSFunction;
 class JSObject;
@@ -144,19 +150,32 @@
 class MapSpace;
 class MarkCompactCollector;
 class NewSpace;
+class NodeVisitor;
 class Object;
 class MaybeObject;
 class OldSpace;
+class Property;
 class Foreign;
+class RegExpNode;
+struct RegExpCompileData;
+class RegExpTree;
+class RegExpCompiler;
+class RegExpVisitor;
 class Scope;
-class ScopeInfo;
+template<class Allocator = FreeStoreAllocationPolicy> class ScopeInfo;
+class SerializedScopeInfo;
 class Script;
+class Slot;
 class Smi;
 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
     class SplayTree;
+class Statement;
 class String;
 class Struct;
+class SwitchStatement;
+class AstVisitor;
 class Variable;
+class VariableProxy;
 class RelocInfo;
 class Deserializer;
 class MessageLocation;
@@ -236,6 +255,12 @@
 };
 
 
+// Callback function on object slots, used for iterating heap object slots in
+// HeapObjects, global pointers to heap objects, etc. The callback allows the
+// callback function to change the value of the slot.
+typedef void (*ObjectSlotCallback)(HeapObject** pointer);
+
+
 // Callback function used for iterating objects in heap spaces,
 // for example, scanning heap objects.
 typedef int (*HeapObjectCallback)(HeapObject* obj);
@@ -282,9 +307,7 @@
   NO_CALL_FUNCTION_FLAGS = 0,
   // Receiver might implicitly be the global objects. If it is, the
   // hole is passed to the call function stub.
-  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
-  // The call target is cached in the instruction stream.
-  RECORD_CALL_TARGET = 1 << 1
+  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
 };
 
 
@@ -294,17 +317,28 @@
 };
 
 
-// The Store Buffer (GC).
-typedef enum {
-  kStoreBufferFullEvent,
-  kStoreBufferStartScanningPagesEvent,
-  kStoreBufferScanningPageEvent
-} StoreBufferEvent;
-
-
-typedef void (*StoreBufferCallback)(Heap* heap,
-                                    MemoryChunk* page,
-                                    StoreBufferEvent event);
+// Type of properties.
+// Order of properties is significant.
+// Must fit in the BitField PropertyDetails::TypeField.
+// A copy of this is in mirror-debugger.js.
+enum PropertyType {
+  NORMAL                    = 0,  // only in slow mode
+  FIELD                     = 1,  // only in fast mode
+  CONSTANT_FUNCTION         = 2,  // only in fast mode
+  CALLBACKS                 = 3,
+  HANDLER                   = 4,  // only in lookup results, not in descriptors
+  INTERCEPTOR               = 5,  // only in lookup results, not in descriptors
+  MAP_TRANSITION            = 6,  // only in fast mode
+  ELEMENTS_TRANSITION       = 7,
+  CONSTANT_TRANSITION       = 8,  // only in fast mode
+  NULL_DESCRIPTOR           = 9,  // only in fast mode
+  // All properties before MAP_TRANSITION are real.
+  FIRST_PHANTOM_PROPERTY_TYPE = MAP_TRANSITION,
+  // There are no IC stubs for NULL_DESCRIPTORS. Therefore,
+  // NULL_DESCRIPTOR can be used as the type flag for IC stubs for
+  // nonexistent properties.
+  NONEXISTENT = NULL_DESCRIPTOR
+};
 
 
 // Whether to remove map transitions and constant transitions from a
@@ -441,11 +475,21 @@
                   SAHF = 0,    // x86
                   FPU = 1};    // MIPS
 
+// The Strict Mode (ECMA-262 5th edition, 4.2.2).
+enum StrictModeFlag {
+  kNonStrictMode,
+  kStrictMode,
+  // This value is never used, but is needed to prevent GCC 4.5 from failing
+  // to compile when we assert that a flag is either kNonStrictMode or
+  // kStrictMode.
+  kInvalidStrictFlag
+};
+
 
 // Used to specify if a macro instruction must perform a smi check on tagged
 // values.
 enum SmiCheckType {
-  DONT_DO_SMI_CHECK,
+  DONT_DO_SMI_CHECK = 0,
   DO_SMI_CHECK
 };
 
@@ -453,106 +497,20 @@
 // Used to specify whether a receiver is implicitly or explicitly
 // provided to a call.
 enum CallKind {
-  CALL_AS_METHOD,
+  CALL_AS_METHOD = 0,
   CALL_AS_FUNCTION
 };
 
 
-enum ScopeType {
-  EVAL_SCOPE,      // The top-level scope for an eval source.
-  FUNCTION_SCOPE,  // The top-level scope for a function.
-  MODULE_SCOPE,    // The scope introduced by a module literal
-  GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
-  CATCH_SCOPE,     // The scope introduced by catch.
-  BLOCK_SCOPE,     // The scope introduced by a new block.
-  WITH_SCOPE       // The scope introduced by with.
-};
-
-
-const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
-const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
-const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
+static const uint32_t kHoleNanUpper32 = 0x7FFFFFFF;
+static const uint32_t kHoleNanLower32 = 0xFFFFFFFF;
+static const uint32_t kNaNOrInfinityLowerBoundUpper32 = 0x7FF00000;
 
 const uint64_t kHoleNanInt64 =
     (static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
 const uint64_t kLastNonNaNInt64 =
     (static_cast<uint64_t>(kNaNOrInfinityLowerBoundUpper32) << 32);
 
-
-enum VariableMode {
-  // User declared variables:
-  VAR,             // declared via 'var', and 'function' declarations
-
-  CONST,           // declared via 'const' declarations
-
-  CONST_HARMONY,   // declared via 'const' declarations in harmony mode
-
-  LET,             // declared via 'let' declarations
-
-  // Variables introduced by the compiler:
-  DYNAMIC,         // always require dynamic lookup (we don't know
-                   // the declaration)
-
-  DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
-                   // variable is global unless it has been shadowed
-                   // by an eval-introduced variable
-
-  DYNAMIC_LOCAL,   // requires dynamic lookup, but we know that the
-                   // variable is local and where it is unless it
-                   // has been shadowed by an eval-introduced
-                   // variable
-
-  INTERNAL,        // like VAR, but not user-visible (may or may not
-                   // be in a context)
-
-  TEMPORARY        // temporary variables (not user-visible), never
-                   // in a context
-};
-
-
-// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
-// and immutable bindings that can be in two states: initialized and
-// uninitialized. In ES5 only immutable bindings have these two states. When
-// accessing a binding, it needs to be checked for initialization. However in
-// the following cases the binding is initialized immediately after creation
-// so the initialization check can always be skipped:
-// 1. Var declared local variables.
-//      var foo;
-// 2. A local variable introduced by a function declaration.
-//      function foo() {}
-// 3. Parameters
-//      function x(foo) {}
-// 4. Catch bound variables.
-//      try {} catch (foo) {}
-// 6. Function variables of named function expressions.
-//      var x = function foo() {}
-// 7. Implicit binding of 'this'.
-// 8. Implicit binding of 'arguments' in functions.
-//
-// ES5 specified object environment records which are introduced by ES elements
-// such as Program and WithStatement that associate identifier bindings with the
-// properties of some object. In the specification only mutable bindings exist
-// (which may be non-writable) and have no distinct initialization step. However
-// V8 allows const declarations in global code with distinct creation and
-// initialization steps which are represented by non-writable properties in the
-// global object. As a result also these bindings need to be checked for
-// initialization.
-//
-// The following enum specifies a flag that indicates if the binding needs a
-// distinct initialization step (kNeedsInitialization) or if the binding is
-// immediately initialized upon creation (kCreatedInitialized).
-enum InitializationFlag {
-  kNeedsInitialization,
-  kCreatedInitialized
-};
-
-
-enum ClearExceptionFlag {
-  KEEP_EXCEPTION,
-  CLEAR_EXCEPTION
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_V8GLOBALS_H_
diff --git a/src/v8memory.h b/src/v8memory.h
index f71de82..901e78d 100644
--- a/src/v8memory.h
+++ b/src/v8memory.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,10 +60,6 @@
     return *reinterpret_cast<int*>(addr);
   }
 
-  static unsigned& unsigned_at(Address addr) {
-    return *reinterpret_cast<unsigned*>(addr);
-  }
-
   static double& double_at(Address addr)  {
     return *reinterpret_cast<double*>(addr);
   }
diff --git a/src/v8natives.js b/src/v8natives.js
index f1e8084..588bdb2 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,18 +28,18 @@
 // This file relies on the fact that the following declarations have been made
 //
 // in runtime.js:
-// var $Object = global.Object;
-// var $Boolean = global.Boolean;
-// var $Number = global.Number;
-// var $Function = global.Function;
-// var $Array = global.Array;
-// var $NaN = 0/0;
+// const $Object = global.Object;
+// const $Boolean = global.Boolean;
+// const $Number = global.Number;
+// const $Function = global.Function;
+// const $Array = global.Array;
+// const $NaN = 0/0;
 //
 // in math.js:
-// var $floor = MathFloor
+// const $floor = MathFloor
 
-var $isNaN = GlobalIsNaN;
-var $isFinite = GlobalIsFinite;
+const $isNaN = GlobalIsNaN;
+const $isFinite = GlobalIsFinite;
 
 // ----------------------------------------------------------------------------
 
@@ -60,6 +60,18 @@
   %ToFastProperties(object);
 }
 
+// Emulates JSC by installing functions on a hidden prototype that
+// lies above the current object/prototype.  This lets you override
+// functions on String.prototype etc. and then restore the old function
+// with delete.  See http://code.google.com/p/chromium/issues/detail?id=1717
+function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
+  %CheckIsBootstrapping();
+  var hidden_prototype = new $Object();
+  %SetHiddenPrototype(object, hidden_prototype);
+  InstallFunctions(hidden_prototype, attributes, functions);
+}
+
+
 // Prevents changes to the prototype of a built-infunction.
 // The "prototype" property of the function object is made non-configurable,
 // and the prototype object is made non-extensible. The latter prevents
@@ -127,9 +139,8 @@
     // The spec says ToString should be evaluated before ToInt32.
     string = TO_STRING_INLINE(string);
     radix = TO_INT32(radix);
-    if (!(radix == 0 || (2 <= radix && radix <= 36))) {
+    if (!(radix == 0 || (2 <= radix && radix <= 36)))
       return $NaN;
-    }
   }
 
   if (%_HasCachedArrayIndex(string) &&
@@ -151,23 +162,28 @@
 function GlobalEval(x) {
   if (!IS_STRING(x)) return x;
 
+  var receiver = this;
   var global_receiver = %GlobalReceiver(global);
+
+  if (receiver == null && !IS_UNDETECTABLE(receiver)) {
+    receiver = global_receiver;
+  }
+
+  var this_is_global_receiver = (receiver === global_receiver);
   var global_is_detached = (global === global_receiver);
 
   // For consistency with JSC we require the global object passed to
   // eval to be the global object from which 'eval' originated. This
   // is not mandated by the spec.
-  // We only throw if the global has been detached, since we need the
-  // receiver as this-value for the call.
-  if (global_is_detached) {
-    throw new $EvalError('The "this" value passed to eval must ' +
+  if (!this_is_global_receiver || global_is_detached) {
+    throw new $EvalError('The "this" object passed to eval must ' +
                          'be the global object from which eval originated');
   }
 
   var f = %CompileString(x);
   if (!IS_FUNCTION(f)) return f;
 
-  return %_CallFunction(global_receiver, f);
+  return %_CallFunction(receiver, f);
 }
 
 
@@ -177,14 +193,13 @@
 function SetUpGlobal() {
   %CheckIsBootstrapping();
   // ECMA 262 - 15.1.1.1.
-  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
 
   // ECMA-262 - 15.1.1.2.
-  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
 
   // ECMA-262 - 15.1.1.3.
-  %SetProperty(global, "undefined", void 0,
-               DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
 
   // Set up non-enumerable function on the global object.
   InstallFunctions(global, DONT_ENUM, $Array(
@@ -284,8 +299,7 @@
     receiver = %GlobalReceiver(global);
   }
   if (!IS_SPEC_FUNCTION(fun)) {
-    throw new $TypeError(
-        'Object.prototype.__defineGetter__: Expecting function');
+    throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
   }
   var desc = new PropertyDescriptor();
   desc.setGet(fun);
@@ -331,9 +345,8 @@
 
 
 function ObjectKeys(obj) {
-  if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.keys"]);
-  }
+  if (!IS_SPEC_OBJECT(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
   if (%IsJSProxy(obj)) {
     var handler = %GetHandler(obj);
     var names = CallTrap0(handler, "keys", DerivedKeysTrap);
@@ -359,7 +372,6 @@
 
 // ES5 8.10.3.
 function IsGenericDescriptor(desc) {
-  if (IS_UNDEFINED(desc)) return false;
   return !(IsAccessorDescriptor(desc) || IsDataDescriptor(desc));
 }
 
@@ -464,7 +476,7 @@
 
 // For Harmony proxies.
 function ToCompletePropertyDescriptor(obj) {
-  var desc = ToPropertyDescriptor(obj);
+  var desc = ToPropertyDescriptor(obj)
   if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) {
     if (!desc.hasValue()) desc.setValue(void 0);
     if (!desc.hasWritable()) desc.setWritable(false);
@@ -660,21 +672,6 @@
 }
 
 
-// ES5 section 8.12.7.
-function Delete(obj, p, should_throw) {
-  var desc = GetOwnProperty(obj, p);
-  if (IS_UNDEFINED(desc)) return true;
-  if (desc.isConfigurable()) {
-    %DeleteProperty(obj, p, 0);
-    return true;
-  } else if (should_throw) {
-    throw MakeTypeError("define_disallowed", [p]);
-  } else {
-    return;
-  }
-}
-
-
 // Harmony proxies.
 function DefineProxyProperty(obj, p, attributes, should_throw) {
   var handler = %GetHandler(obj);
@@ -692,7 +689,12 @@
 
 
 // ES5 8.12.9.
-function DefineObjectProperty(obj, p, desc, should_throw) {
+function DefineOwnProperty(obj, p, desc, should_throw) {
+  if (%IsJSProxy(obj)) {
+    var attributes = FromGenericPropertyDescriptor(desc);
+    return DefineProxyProperty(obj, p, attributes, should_throw);
+  }
+
   var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
   // A false value here means that access checks failed.
   if (current_or_access === false) return void 0;
@@ -706,7 +708,7 @@
     if (should_throw) {
       throw MakeTypeError("define_disallowed", [p]);
     } else {
-      return false;
+      return;
     }
   }
 
@@ -736,7 +738,7 @@
         if (should_throw) {
           throw MakeTypeError("redefine_disallowed", [p]);
         } else {
-          return false;
+          return;
         }
       }
       // Step 8
@@ -746,7 +748,7 @@
           if (should_throw) {
             throw MakeTypeError("redefine_disallowed", [p]);
           } else {
-            return false;
+            return;
           }
         }
         // Step 10a
@@ -755,7 +757,7 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return false;
+              return;
             }
           }
           if (!current.isWritable() && desc.hasValue() &&
@@ -763,7 +765,7 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return false;
+              return;
             }
           }
         }
@@ -773,14 +775,14 @@
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return false;
+              return;
             }
           }
           if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
             if (should_throw) {
               throw MakeTypeError("redefine_disallowed", [p]);
             } else {
-              return false;
+              return;
             }
           }
         }
@@ -834,6 +836,10 @@
     }
 
     %DefineOrRedefineDataProperty(obj, p, value, flag);
+  } else if (IsGenericDescriptor(desc)) {
+    // Step 12 - updating an existing accessor property with generic
+    //           descriptor. Changing flags only.
+    %DefineOrRedefineAccessorProperty(obj, p, GETTER, current.getGet(), flag);
   } else {
     // There are 3 cases that lead here:
     // Step 4b - defining a new accessor property.
@@ -841,113 +847,30 @@
     //                 property.
     // Step 12 - updating an existing accessor property with an accessor
     //           descriptor.
-    var getter = desc.hasGetter() ? desc.getGet() : null;
-    var setter = desc.hasSetter() ? desc.getSet() : null;
-    %DefineOrRedefineAccessorProperty(obj, p, getter, setter, flag);
+    if (desc.hasGetter()) {
+      %DefineOrRedefineAccessorProperty(obj, p, GETTER, desc.getGet(), flag);
+    }
+    if (desc.hasSetter()) {
+      %DefineOrRedefineAccessorProperty(obj, p, SETTER, desc.getSet(), flag);
+    }
   }
   return true;
 }
 
 
-// ES5 section 15.4.5.1.
-function DefineArrayProperty(obj, p, desc, should_throw) {
-  // Note that the length of an array is not actually stored as part of the
-  // property, hence we use generated code throughout this function instead of
-  // DefineObjectProperty() to modify its value.
-
-  // Step 3 - Special handling for length property.
-  if (p == "length") {
-    var length = obj.length;
-    if (!desc.hasValue()) {
-      return DefineObjectProperty(obj, "length", desc, should_throw);
-    }
-    var new_length = ToUint32(desc.getValue());
-    if (new_length != ToNumber(desc.getValue())) {
-      throw new $RangeError('defineProperty() array length out of range');
-    }
-    var length_desc = GetOwnProperty(obj, "length");
-    if (new_length != length && !length_desc.isWritable()) {
-      if (should_throw) {
-        throw MakeTypeError("redefine_disallowed", [p]);
-      } else {
-        return false;
-      }
-    }
-    var threw = false;
-    while (new_length < length--) {
-      if (!Delete(obj, ToString(length), false)) {
-        new_length = length + 1;
-        threw = true;
-        break;
-      }
-    }
-    // Make sure the below call to DefineObjectProperty() doesn't overwrite
-    // any magic "length" property by removing the value.
-    obj.length = new_length;
-    desc.value_ = void 0;
-    desc.hasValue_ = false;
-    if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
-      if (should_throw) {
-        throw MakeTypeError("redefine_disallowed", [p]);
-      } else {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  // Step 4 - Special handling for array index.
-  var index = ToUint32(p);
-  if (index == ToNumber(p) && index != 4294967295) {
-    var length = obj.length;
-    var length_desc = GetOwnProperty(obj, "length");
-    if ((index >= length && !length_desc.isWritable()) ||
-        !DefineObjectProperty(obj, p, desc, true)) {
-      if (should_throw) {
-        throw MakeTypeError("define_disallowed", [p]);
-      } else {
-        return false;
-      }
-    }
-    if (index >= length) {
-      obj.length = index + 1;
-    }
-    return true;
-  }
-
-  // Step 5 - Fallback to default implementation.
-  return DefineObjectProperty(obj, p, desc, should_throw);
-}
-
-
-// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
-function DefineOwnProperty(obj, p, desc, should_throw) {
-  if (%IsJSProxy(obj)) {
-    var attributes = FromGenericPropertyDescriptor(desc);
-    return DefineProxyProperty(obj, p, attributes, should_throw);
-  } else if (IS_ARRAY(obj)) {
-    return DefineArrayProperty(obj, p, desc, should_throw);
-  } else {
-    return DefineObjectProperty(obj, p, desc, should_throw);
-  }
-}
-
-
 // ES5 section 15.2.3.2.
 function ObjectGetPrototypeOf(obj) {
-  if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.getPrototypeOf"]);
-  }
+  if (!IS_SPEC_OBJECT(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
   return %GetPrototype(obj);
 }
 
 
 // ES5 section 15.2.3.3
 function ObjectGetOwnPropertyDescriptor(obj, p) {
-  if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object",
-                        ["Object.getOwnPropertyDescriptor"]);
-  }
+  if (!IS_SPEC_OBJECT(obj))
+    throw MakeTypeError("obj_ctor_property_non_object",
+                        ["getOwnPropertyDescriptor"]);
   var desc = GetOwnProperty(obj, p);
   return FromPropertyDescriptor(desc);
 }
@@ -960,14 +883,14 @@
   }
   var n = ToUint32(obj.length);
   var array = new $Array(n);
-  var names = {};  // TODO(rossberg): use sets once they are ready.
+  var names = {}
   for (var index = 0; index < n; index++) {
     var s = ToString(obj[index]);
     if (s in names) {
-      throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]);
+      throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s])
     }
     array[index] = s;
-    names[s] = 0;
+    names.s = 0;
   }
   return array;
 }
@@ -975,9 +898,9 @@
 
 // ES5 section 15.2.3.4.
 function ObjectGetOwnPropertyNames(obj) {
-  if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.getOwnPropertyNames"]);
-  }
+  if (!IS_SPEC_OBJECT(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
+
   // Special handling for proxies.
   if (%IsJSProxy(obj)) {
     var handler = %GetHandler(obj);
@@ -994,9 +917,8 @@
   if (%GetInterceptorInfo(obj) & 1) {
     var indexedInterceptorNames =
         %GetIndexedInterceptorElementNames(obj);
-    if (indexedInterceptorNames) {
+    if (indexedInterceptorNames)
       propertyNames = propertyNames.concat(indexedInterceptorNames);
-    }
   }
 
   // Find all the named properties.
@@ -1022,9 +944,8 @@
     // We need to check for the exact property value since for intrinsic
     // properties like toString if(propertySet["toString"]) will always
     // succeed.
-    if (propertySet[name] === true) {
+    if (propertySet[name] === true)
       continue;
-    }
     propertySet[name] = true;
     propertyNames[j++] = name;
   }
@@ -1049,7 +970,7 @@
 // ES5 section 15.2.3.6.
 function ObjectDefineProperty(obj, p, attributes) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.defineProperty"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
   }
   var name = ToString(p);
   if (%IsJSProxy(obj)) {
@@ -1100,17 +1021,14 @@
 
 // ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
-  if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.defineProperties"]);
-  }
+  if (!IS_SPEC_OBJECT(obj))
+    throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
   var props = ToObject(properties);
   var names = GetOwnEnumerablePropertyNames(props);
-  var descriptors = new InternalArray();
   for (var i = 0; i < names.length; i++) {
-    descriptors.push(ToPropertyDescriptor(props[names[i]]));
-  }
-  for (var i = 0; i < names.length; i++) {
-    DefineOwnProperty(obj, names[i], descriptors[i], true);
+    var name = names[i];
+    var desc = ToPropertyDescriptor(props[name]);
+    DefineOwnProperty(obj, name, desc, true);
   }
   return obj;
 }
@@ -1124,20 +1042,12 @@
     throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
   }
 
-  if (%IsJSFunctionProxy(obj)) {
+  if (IS_SPEC_FUNCTION(obj)) {
     var callTrap = %GetCallTrap(obj);
     var constructTrap = %GetConstructTrap(obj);
     var code = DelegateCallAndConstruct(callTrap, constructTrap);
     %Fix(obj);  // becomes a regular function
     %SetCode(obj, code);
-    // TODO(rossberg): What about length and other properties? Not specified.
-    // We just put in some half-reasonable defaults for now.
-    var prototype = new $Object();
-    $Object.defineProperty(prototype, "constructor",
-      {value: obj, writable: true, enumerable: false, configurable: true});
-    // TODO(v8:1530): defineProperty does not handle prototype and length.
-    %FunctionSetPrototype(obj, prototype);
-    obj.length = 0;
   } else {
     %Fix(obj);
   }
@@ -1148,7 +1058,7 @@
 // ES5 section 15.2.3.8.
 function ObjectSeal(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.seal"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["seal"]);
   }
   if (%IsJSProxy(obj)) {
     ProxyFix(obj);
@@ -1170,7 +1080,7 @@
 // ES5 section 15.2.3.9.
 function ObjectFreeze(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.freeze"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["freeze"]);
   }
   if (%IsJSProxy(obj)) {
     ProxyFix(obj);
@@ -1193,7 +1103,7 @@
 // ES5 section 15.2.3.10
 function ObjectPreventExtension(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.preventExtension"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
   }
   if (%IsJSProxy(obj)) {
     ProxyFix(obj);
@@ -1206,7 +1116,7 @@
 // ES5 section 15.2.3.11
 function ObjectIsSealed(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.isSealed"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["isSealed"]);
   }
   if (%IsJSProxy(obj)) {
     return false;
@@ -1227,7 +1137,7 @@
 // ES5 section 15.2.3.12
 function ObjectIsFrozen(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.isFrozen"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["isFrozen"]);
   }
   if (%IsJSProxy(obj)) {
     return false;
@@ -1249,7 +1159,7 @@
 // ES5 section 15.2.3.13
 function ObjectIsExtensible(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
-    throw MakeTypeError("called_on_non_object", ["Object.isExtensible"]);
+    throw MakeTypeError("obj_ctor_property_non_object", ["isExtensible"]);
   }
   if (%IsJSProxy(obj)) {
     return true;
@@ -1258,16 +1168,6 @@
 }
 
 
-// Harmony egal.
-function ObjectIs(obj1, obj2) {
-  if (obj1 === obj2) {
-    return (obj1 !== 0) || (1 / obj1 === 1 / obj2);
-  } else {
-    return (obj1 !== obj1) && (obj2 !== obj2);
-  }
-}
-
-
 %SetCode($Object, function(x) {
   if (%_IsConstructCall()) {
     if (x == null) return this;
@@ -1307,7 +1207,6 @@
     "getPrototypeOf", ObjectGetPrototypeOf,
     "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
     "getOwnPropertyNames", ObjectGetOwnPropertyNames,
-    "is", ObjectIs,
     "isExtensible", ObjectIsExtensible,
     "isFrozen", ObjectIsFrozen,
     "isSealed", ObjectIsSealed,
@@ -1338,9 +1237,8 @@
 function BooleanValueOf() {
   // NOTE: Both Boolean objects and values can enter here as
   // 'this'. This is not as dictated by ECMA-262.
-  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) {
+  if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this))
     throw new $TypeError('Boolean.prototype.valueOf is not generic');
-  }
   return %_ValueOf(this);
 }
 
@@ -1380,9 +1278,8 @@
   // 'this'. This is not as dictated by ECMA-262.
   var number = this;
   if (!IS_NUMBER(this)) {
-    if (!IS_NUMBER_WRAPPER(this)) {
+    if (!IS_NUMBER_WRAPPER(this))
       throw new $TypeError('Number.prototype.toString is not generic');
-    }
     // Get the value of this number in case it's an object.
     number = %_ValueOf(this);
   }
@@ -1415,9 +1312,8 @@
 function NumberValueOf() {
   // NOTE: Both Number objects and values can enter here as
   // 'this'. This is not as dictated by ECMA-262.
-  if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this)) {
+  if (!IS_NUMBER(this) && !IS_NUMBER_WRAPPER(this))
     throw new $TypeError('Number.prototype.valueOf is not generic');
-  }
   return %_ValueOf(this);
 }
 
@@ -1443,8 +1339,7 @@
   if (!IS_UNDEFINED(fractionDigits)) {
     f = TO_INTEGER(fractionDigits);
     if (f < 0 || f > 20) {
-      throw new $RangeError(
-          "toExponential() argument must be between 0 and 20");
+      throw new $RangeError("toExponential() argument must be between 0 and 20");
     }
   }
   if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
@@ -1472,18 +1367,6 @@
 }
 
 
-// Harmony isFinite.
-function NumberIsFinite(number) {
-  return IS_NUMBER(number) && NUMBER_IS_FINITE(number);
-}
-
-
-// Harmony isNaN.
-function NumberIsNaN(number) {
-  return IS_NUMBER(number) && NUMBER_IS_NAN(number);
-}
-
-
 // ----------------------------------------------------------------------------
 
 function SetUpNumber() {
@@ -1500,8 +1383,7 @@
                DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 section 15.7.3.2.
-  %SetProperty($Number, "MIN_VALUE", 5e-324,
-               DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %SetProperty($Number, "MIN_VALUE", 5e-324, DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 section 15.7.3.3.
   %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1528,10 +1410,6 @@
     "toExponential", NumberToExponential,
     "toPrecision", NumberToPrecision
   ));
-  InstallFunctions($Number, DONT_ENUM, $Array(
-    "isFinite", NumberIsFinite,
-    "isNaN", NumberIsNaN
-  ));
 }
 
 SetUpNumber();
@@ -1577,54 +1455,53 @@
 // ES5 15.3.4.5
 function FunctionBind(this_arg) { // Length is 1.
   if (!IS_SPEC_FUNCTION(this)) {
-    throw new $TypeError('Bind must be called on a function');
+      throw new $TypeError('Bind must be called on a function');
   }
-  var boundFunction = function () {
-    // Poison .arguments and .caller, but is otherwise not detectable.
-    "use strict";
-    // This function must not use any object literals (Object, Array, RegExp),
-    // since the literals-array is being used to store the bound data.
-    if (%_IsConstructCall()) {
-      return %NewObjectFromBound(boundFunction);
-    }
-    var bindings = %BoundFunctionGetBindings(boundFunction);
+  // this_arg is not an argument that should be bound.
+  var argc_bound = (%_ArgumentsLength() || 1) - 1;
+  var fn = this;
 
-    var argc = %_ArgumentsLength();
-    if (argc == 0) {
-      return %Apply(bindings[0], bindings[1], bindings, 2, bindings.length - 2);
+  if (argc_bound == 0) {
+    var result = function() {
+      if (%_IsConstructCall()) {
+        // %NewObjectFromBound implicitly uses arguments passed to this
+        // function. We do not pass the arguments object explicitly to avoid
+        // materializing it and guarantee that this function will be optimized.
+        return %NewObjectFromBound(fn, null);
+      }
+      return %Apply(fn, this_arg, arguments, 0, %_ArgumentsLength());
+    };
+  } else {
+    var bound_args = new InternalArray(argc_bound);
+    for(var i = 0; i < argc_bound; i++) {
+      bound_args[i] = %_Arguments(i+1);
     }
-    if (bindings.length === 2) {
-      return %Apply(bindings[0], bindings[1], arguments, 0, argc);
-    }
-    var bound_argc = bindings.length - 2;
-    var argv = new InternalArray(bound_argc + argc);
-    for (var i = 0; i < bound_argc; i++) {
-      argv[i] = bindings[i + 2];
-    }
-    for (var j = 0; j < argc; j++) {
-      argv[i++] = %_Arguments(j);
-    }
-    return %Apply(bindings[0], bindings[1], argv, 0, bound_argc + argc);
-  };
 
-  %FunctionRemovePrototype(boundFunction);
-  var new_length = 0;
-  if (%_ClassOf(this) == "Function") {
-    // Function or FunctionProxy.
-    var old_length = this.length;
-    // FunctionProxies might provide a non-UInt32 value. If so, ignore it.
-    if ((typeof old_length === "number") &&
-        ((old_length >>> 0) === old_length)) {
+    var result = function() {
+      // If this is a construct call we use a special runtime method
+      // to generate the actual object using the bound function.
+      if (%_IsConstructCall()) {
+        // %NewObjectFromBound implicitly uses arguments passed to this
+        // function. We do not pass the arguments object explicitly to avoid
+        // materializing it and guarantee that this function will be optimized.
+        return %NewObjectFromBound(fn, bound_args);
+      }
+
+      // Combine the args we got from the bind call with the args
+      // given as argument to the invocation.
       var argc = %_ArgumentsLength();
-      if (argc > 0) argc--;  // Don't count the thisArg as parameter.
-      new_length = old_length - argc;
-      if (new_length < 0) new_length = 0;
-    }
+      var args = new InternalArray(argc + argc_bound);
+      // Add bound arguments.
+      for (var i = 0; i < argc_bound; i++) {
+        args[i] = bound_args[i];
+      }
+      // Add arguments from call.
+      for (var i = 0; i < argc; i++) {
+        args[argc_bound + i] = %_Arguments(i);
+      }
+      return %Apply(fn, this_arg, args, 0, argc + argc_bound);
+    };
   }
-  // This runtime function finds any remaining arguments on the stack,
-  // so we don't pass the arguments object.
-  var result = %FunctionBindArguments(boundFunction, this,
-                                      this_arg, new_length);
 
   // We already have caller and arguments properties on functions,
   // which are non-configurable. It therefore makes no sence to
@@ -1632,7 +1509,17 @@
   // that bind should make these throw a TypeError if get or set
   // is called and make them non-enumerable and non-configurable.
   // To be consistent with our normal functions we leave this as it is.
-  // TODO(lrn): Do set these to be thrower.
+
+  %FunctionRemovePrototype(result);
+  %FunctionSetBound(result);
+  // Set the correct length. If this is a function proxy, this.length might
+  // throw, or return a bogus result. Leave length alone in that case.
+  // TODO(rossberg): This is underspecified in the current proxy proposal.
+  try {
+    var old_length = ToInteger(this.length);
+    var length = (old_length - argc_bound) > 0 ? old_length - argc_bound : 0;
+    %BoundFunctionSetLength(result, length);
+  } catch(x) {}
   return result;
 }
 
diff --git a/src/v8threads.cc b/src/v8threads.cc
index fd8d536..3881d66 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -154,7 +154,7 @@
 
 bool ThreadManager::RestoreThread() {
   ASSERT(IsLockedByCurrentThread());
-  // First check whether the current thread has been 'lazily archived', i.e.
+  // First check whether the current thread has been 'lazily archived', ie
   // not archived at all.  If that is the case we put the state storage we
   // had prepared back in the free list, since we didn't need it after all.
   if (lazily_archived_thread_.Equals(ThreadId::Current())) {
diff --git a/src/v8utils.cc b/src/v8utils.cc
index 042a60f..bf0e05d 100644
--- a/src/v8utils.cc
+++ b/src/v8utils.cc
@@ -316,7 +316,7 @@
   for (const char* p = data_; p < end; p++) {
     char c = *p;
     if ((c & 0x80) != 0) {
-      // Non-ASCII detected:
+      // Non-ascii detected:
       is_ascii = false;
 
       // Report the error and abort if appropriate:
@@ -329,7 +329,7 @@
                c, filename_, line_no, char_no);
 
         // Allow for some context up to kNumberOfLeadingContextChars chars
-        // before the offending non-ASCII char to help the user see where
+        // before the offending non-ascii char to help the user see where
         // the offending char is.
         const int kNumberOfLeadingContextChars = 10;
         const char* err_context = p - kNumberOfLeadingContextChars;
@@ -345,7 +345,7 @@
         OS::Abort();
       }
 
-      break;  // Non-ASCII detected.  No need to continue scanning.
+      break;  // Non-ascii detected.  No need to continue scanning.
     }
     if (c == '\n') {
       start_of_line = p;
diff --git a/src/v8utils.h b/src/v8utils.h
index c73222a..aada521 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -142,14 +142,8 @@
 }
 
 
-template <typename T, typename U>
-inline void MemsetPointer(T** dest, U* value, int counter) {
-#ifdef DEBUG
-  T* a = NULL;
-  U* b = NULL;
-  a = b;  // Fake assignment to check assignability.
-  USE(a);
-#endif  // DEBUG
+template <typename T>
+static inline void MemsetPointer(T** dest, T* value, int counter) {
 #if defined(V8_HOST_ARCH_IA32)
 #define STOS "stosl"
 #elif defined(V8_HOST_ARCH_X64)
@@ -202,7 +196,7 @@
 
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
 template <typename sourcechar, typename sinkchar>
-inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
 #ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
diff --git a/src/variables.cc b/src/variables.cc
index 32ad5bc..971061b 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -37,11 +37,10 @@
 // ----------------------------------------------------------------------------
 // Implementation Variable.
 
-const char* Variable::Mode2String(VariableMode mode) {
+const char* Variable::Mode2String(Mode mode) {
   switch (mode) {
     case VAR: return "VAR";
     case CONST: return "CONST";
-    case CONST_HARMONY: return "CONST";
     case LET: return "LET";
     case DYNAMIC: return "DYNAMIC";
     case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
@@ -56,28 +55,21 @@
 
 Variable::Variable(Scope* scope,
                    Handle<String> name,
-                   VariableMode mode,
+                   Mode mode,
                    bool is_valid_LHS,
-                   Kind kind,
-                   InitializationFlag initialization_flag,
-                   Interface* interface)
+                   Kind kind)
   : scope_(scope),
     name_(name),
     mode_(mode),
     kind_(kind),
     location_(UNALLOCATED),
     index_(-1),
-    initializer_position_(RelocInfo::kNoPosition),
     local_if_not_shadowed_(NULL),
     is_valid_LHS_(is_valid_LHS),
-    force_context_allocation_(false),
-    is_used_(false),
-    initialization_flag_(initialization_flag),
-    interface_(interface) {
-  // Names must be canonicalized for fast equality checks.
+    is_accessed_from_inner_scope_(false),
+    is_used_(false) {
+  // names must be canonicalized for fast equality checks
   ASSERT(name->IsSymbol());
-  // Var declared variables never need initialization.
-  ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
 }
 
 
@@ -87,12 +79,4 @@
   return mode_ != TEMPORARY && scope_ != NULL && scope_->is_global_scope();
 }
 
-
-int Variable::CompareIndex(Variable* const* v, Variable* const* w) {
-  int x = (*v)->index();
-  int y = (*w)->index();
-  // Consider sorting them according to type as well?
-  return x - y;
-}
-
 } }  // namespace v8::internal
diff --git a/src/variables.h b/src/variables.h
index f49b6e1..56c8dab 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -29,7 +29,6 @@
 #define V8_VARIABLES_H_
 
 #include "zone.h"
-#include "interface.h"
 
 namespace v8 {
 namespace internal {
@@ -41,6 +40,34 @@
 
 class Variable: public ZoneObject {
  public:
+  enum Mode {
+    // User declared variables:
+    VAR,       // declared via 'var', and 'function' declarations
+
+    CONST,     // declared via 'const' declarations
+
+    LET,       // declared via 'let' declarations
+
+    // Variables introduced by the compiler:
+    DYNAMIC,         // always require dynamic lookup (we don't know
+                     // the declaration)
+
+    DYNAMIC_GLOBAL,  // requires dynamic lookup, but we know that the
+                     // variable is global unless it has been shadowed
+                     // by an eval-introduced variable
+
+    DYNAMIC_LOCAL,   // requires dynamic lookup, but we know that the
+                     // variable is local and where it is unless it
+                     // has been shadowed by an eval-introduced
+                     // variable
+
+    INTERNAL,        // like VAR, but not user-visible (may or may not
+                     // be in a context)
+
+    TEMPORARY        // temporary variables (not user-visible), never
+                     // in a context
+  };
+
   enum Kind {
     NORMAL,
     THIS,
@@ -76,14 +103,12 @@
 
   Variable(Scope* scope,
            Handle<String> name,
-           VariableMode mode,
+           Mode mode,
            bool is_valid_lhs,
-           Kind kind,
-           InitializationFlag initialization_flag,
-           Interface* interface = Interface::NewValue());
+           Kind kind);
 
   // Printing support
-  static const char* Mode2String(VariableMode mode);
+  static const char* Mode2String(Mode mode);
 
   bool IsValidLeftHandSide() { return is_valid_LHS_; }
 
@@ -94,20 +119,17 @@
   Scope* scope() const { return scope_; }
 
   Handle<String> name() const { return name_; }
-  VariableMode mode() const { return mode_; }
-  bool has_forced_context_allocation() const {
-    return force_context_allocation_;
+  Mode mode() const { return mode_; }
+  bool is_accessed_from_inner_scope() const {
+    return is_accessed_from_inner_scope_;
   }
-  void ForceContextAllocation() {
+  void MarkAsAccessedFromInnerScope() {
     ASSERT(mode_ != TEMPORARY);
-    force_context_allocation_ = true;
+    is_accessed_from_inner_scope_ = true;
   }
   bool is_used() { return is_used_; }
   void set_is_used(bool flag) { is_used_ = flag; }
 
-  int initializer_position() { return initializer_position_; }
-  void set_initializer_position(int pos) { initializer_position_ = pos; }
-
   bool IsVariable(Handle<String> n) const {
     return !is_this() && name().is_identical_to(n);
   }
@@ -124,13 +146,6 @@
             mode_ == DYNAMIC_GLOBAL ||
             mode_ == DYNAMIC_LOCAL);
   }
-  bool is_const_mode() const {
-    return (mode_ == CONST ||
-            mode_ == CONST_HARMONY);
-  }
-  bool binding_needs_init() const {
-    return initialization_flag_ == kNeedsInitialization;
-  }
 
   bool is_global() const;
   bool is_this() const { return kind_ == THIS; }
@@ -138,7 +153,8 @@
 
   // True if the variable is named eval and not known to be shadowed.
   bool is_possibly_eval() const {
-    return IsVariable(FACTORY->eval_symbol());
+    return IsVariable(FACTORY->eval_symbol()) &&
+        (mode_ == DYNAMIC || mode_ == DYNAMIC_GLOBAL);
   }
 
   Variable* local_if_not_shadowed() const {
@@ -152,43 +168,28 @@
 
   Location location() const { return location_; }
   int index() const { return index_; }
-  InitializationFlag initialization_flag() const {
-    return initialization_flag_;
-  }
-  Interface* interface() const { return interface_; }
 
   void AllocateTo(Location location, int index) {
     location_ = location;
     index_ = index;
   }
 
-  static int CompareIndex(Variable* const* v, Variable* const* w);
-
  private:
   Scope* scope_;
   Handle<String> name_;
-  VariableMode mode_;
+  Mode mode_;
   Kind kind_;
   Location location_;
   int index_;
-  int initializer_position_;
 
-  // If this field is set, this variable references the stored locally bound
-  // variable, but it might be shadowed by variable bindings introduced by
-  // non-strict 'eval' calls between the reference scope (inclusive) and the
-  // binding scope (exclusive).
   Variable* local_if_not_shadowed_;
 
   // Valid as a LHS? (const and this are not valid LHS, for example)
   bool is_valid_LHS_;
 
   // Usage info.
-  bool force_context_allocation_;  // set by variable resolver
+  bool is_accessed_from_inner_scope_;  // set by variable resolver
   bool is_used_;
-  InitializationFlag initialization_flag_;
-
-  // Module type info.
-  Interface* interface_;
 };
 
 
diff --git a/src/version.cc b/src/version.cc
index d23fe61..2c21152 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     9
-#define BUILD_NUMBER      24
-#define PATCH_LEVEL       9
+#define MINOR_VERSION     6
+#define BUILD_NUMBER      6
+#define PATCH_LEVEL       19
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/weakmap.js b/src/weakmap.js
new file mode 100644
index 0000000..5fb5151
--- /dev/null
+++ b/src/weakmap.js
@@ -0,0 +1,98 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// const $Object = global.Object;
+const $WeakMap = global.WeakMap;
+
+// -------------------------------------------------------------------
+
+function WeakMapConstructor() {
+  if (%_IsConstructCall()) {
+    %WeakMapInitialize(this);
+  } else {
+    return new $WeakMap();
+  }
+}
+
+
+function WeakMapGet(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakMapSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return !IS_UNDEFINED(%WeakMapGet(this, key));
+}
+
+
+function WeakMapDelete(key) {
+  if (!IS_SPEC_OBJECT(key)) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  if (!IS_UNDEFINED(%WeakMapGet(this, key))) {
+    %WeakMapSet(this, key, void 0);
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// -------------------------------------------------------------------
+
+(function () {
+  %CheckIsBootstrapping();
+  // Set up the WeakMap constructor function.
+  %SetCode($WeakMap, WeakMapConstructor);
+
+  // Set up the constructor property on the WeakMap prototype object.
+  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the WeakMap prototype object.
+  InstallFunctionsOnHiddenPrototype($WeakMap.prototype, DONT_ENUM, $Array(
+    "get", WeakMapGet,
+    "set", WeakMapSet,
+    "has", WeakMapHas,
+    "delete", WeakMapDelete
+  ));
+})();
diff --git a/src/win32-headers.h b/src/win32-headers.h
index 5d9c89e..fca5c13 100644
--- a/src/win32-headers.h
+++ b/src/win32-headers.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -56,7 +56,6 @@
 #include <windows.h>
 
 #ifdef V8_WIN32_HEADERS_FULL
-#include <signal.h>  // For raise().
 #include <time.h>  // For LocalOffset() implementation.
 #include <mmsystem.h>  // For timeGetTime().
 #ifdef __MINGW32__
@@ -65,10 +64,10 @@
 #undef _WIN32_WINNT
 #define _WIN32_WINNT 0x501
 #endif  // __MINGW32__
-#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#ifndef __MINGW32__
 #include <dbghelp.h>  // For SymLoadModule64 and al.
 #include <errno.h>  // For STRUNCATE
-#endif  // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
+#endif  // __MINGW32__
 #include <limits.h>  // For INT_MAX and al.
 #include <tlhelp32.h>  // For Module32First and al.
 
@@ -76,10 +75,7 @@
 // makes it impossible to have them elsewhere.
 #include <winsock2.h>
 #include <ws2tcpip.h>
-#ifndef __MINGW32__
-#include <wspiapi.h>
-#endif  // __MINGW32__
-#include <process.h>  // For _beginthreadex().
+#include <process.h>  // for _beginthreadex()
 #include <stdlib.h>
 #endif  // V8_WIN32_HEADERS_FULL
 
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index a9cc2ef..8db54f0 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,8 +28,6 @@
 #ifndef V8_X64_ASSEMBLER_X64_INL_H_
 #define V8_X64_ASSEMBLER_X64_INL_H_
 
-#include "x64/assembler-x64.h"
-
 #include "cpu.h"
 #include "debug.h"
 #include "v8memory.h"
@@ -226,31 +224,24 @@
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
-                              || rmode_ == EMBEDDED_OBJECT
-                              || rmode_ == EXTERNAL_REFERENCE);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   return reinterpret_cast<Address>(pc_);
 }
 
 
 int RelocInfo::target_address_size() {
   if (IsCodedSpecially()) {
-    return Assembler::kSpecialTargetSize;
+    return Assembler::kCallTargetSize;
   } else {
-    return kPointerSize;
+    return Assembler::kExternalTargetSize;
   }
 }
 
 
-void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
+void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   if (IsCodeTarget(rmode_)) {
     Assembler::set_target_address_at(pc_, target);
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    if (mode == UPDATE_WRITE_BARRIER && host() != NULL) {
-      host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-          host(), this, HeapObject::cast(target_code));
-    }
   } else {
     Memory::Address_at(pc_) = target;
     CPU::FlushICache(pc_, sizeof(Address));
@@ -264,7 +255,7 @@
 }
 
 
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   if (rmode_ == EMBEDDED_OBJECT) {
     return Memory::Object_Handle_at(pc_);
@@ -286,16 +277,10 @@
 }
 
 
-void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
+void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  Memory::Object_at(pc_) = target;
+  *reinterpret_cast<Object**>(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
-  if (mode == UPDATE_WRITE_BARRIER &&
-      host() != NULL &&
-      target->IsHeapObject()) {
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
-  }
 }
 
 
@@ -316,19 +301,11 @@
 }
 
 
-void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
-                                WriteBarrierMode mode) {
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
-  if (mode == UPDATE_WRITE_BARRIER &&
-      host() != NULL) {
-    // TODO(1550) We are passing NULL as a slot because cell can never be on
-    // evacuation candidate.
-    host()->GetHeap()->incremental_marking()->RecordWrite(
-        host(), NULL, cell);
-  }
 }
 
 
@@ -367,11 +344,6 @@
       target;
   CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
                    sizeof(Address));
-  if (host() != NULL) {
-    Object* target_code = Code::GetCodeFromTargetAddress(target);
-    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
-        host(), this, HeapObject::cast(target_code));
-  }
 }
 
 
@@ -396,14 +368,14 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitEmbeddedPointer(this);
+    visitor->VisitPointer(target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     visitor->VisitGlobalPropertyCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    visitor->VisitExternalReference(this);
+    visitor->VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // TODO(isolates): Get a cached isolate below.
@@ -424,14 +396,14 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitEmbeddedPointer(heap, this);
+    StaticVisitor::VisitPointer(heap, target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
     StaticVisitor::VisitGlobalPropertyCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
-    StaticVisitor::VisitExternalReference(this);
+    StaticVisitor::VisitExternalReference(target_reference_address());
     CPU::FlushICache(pc_, sizeof(Address));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   } else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 2f0c542..745fdae 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,7 +47,7 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
+  ASSERT(!initialized_);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -383,7 +383,7 @@
   }
 #endif
 
-  // Set up buffer pointers.
+  // Setup buffer pointers.
   ASSERT(buffer_ != NULL);
   pc_ = buffer_;
   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
@@ -412,7 +412,7 @@
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
   ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
-  // Set up code descriptor.
+  // Setup code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
@@ -426,7 +426,13 @@
 void Assembler::Align(int m) {
   ASSERT(IsPowerOf2(m));
   int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
-  Nop(delta);
+  while (delta >= 9) {
+    nop(9);
+    delta -= 9;
+  }
+  if (delta > 0) {
+    nop(delta);
+  }
 }
 
 
@@ -435,15 +441,6 @@
 }
 
 
-bool Assembler::IsNop(Address addr) {
-  Address a = addr;
-  while (*a == 0x66) a++;
-  if (*a == 0x90) return true;
-  if (a[0] == 0xf && a[1] == 0x1f) return true;
-  return false;
-}
-
-
 void Assembler::bind_to(Label* L, int pos) {
   ASSERT(!L->is_bound());  // Label may only be bound once.
   ASSERT(0 <= pos && pos <= pc_offset());  // Position must be valid.
@@ -502,7 +499,7 @@
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
-  // Set up new buffer.
+  // Setup new buffer.
   desc.buffer = NewArray<byte>(desc.buffer_size);
   desc.instr_size = pc_offset();
   desc.reloc_size =
@@ -775,7 +772,7 @@
                                           Register dst,
                                           Immediate src) {
   EnsureSpace ensure_space(this);
-  if (!dst.is_byte_register()) {
+  if (dst.code() > 3) {
     // Use 64-bit mode byte registers.
     emit_rex_64(dst);
   }
@@ -1059,7 +1056,7 @@
 
 void Assembler::decb(Register dst) {
   EnsureSpace ensure_space(this);
-  if (!dst.is_byte_register()) {
+  if (dst.code() > 3) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst);
   }
@@ -1387,7 +1384,7 @@
 
 void Assembler::movb(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  if (!dst.is_byte_register()) {
+  if (dst.code() > 3) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst, src);
   } else {
@@ -1400,7 +1397,7 @@
 
 void Assembler::movb(Register dst, Immediate imm) {
   EnsureSpace ensure_space(this);
-  if (!dst.is_byte_register()) {
+  if (dst.code() > 3) {
     emit_rex_32(dst);
   }
   emit(0xB0 + dst.low_bits());
@@ -1410,7 +1407,7 @@
 
 void Assembler::movb(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
-  if (!src.is_byte_register()) {
+  if (src.code() > 3) {
     emit_rex_32(src, dst);
   } else {
     emit_optional_rex_32(src, dst);
@@ -1640,8 +1637,6 @@
 
 void Assembler::movzxbq(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
-  // 32 bit operations zero the top 32 bits of 64 bit registers.  Therefore
-  // there is no need to make this a 64 bit operation.
   emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xB6);
@@ -1768,7 +1763,7 @@
 }
 
 
-void Assembler::Nop(int n) {
+void Assembler::nop(int n) {
   // The recommended muti-byte sequences of NOP instructions from the Intel 64
   // and IA-32 Architectures Software Developer's Manual.
   //
@@ -1783,64 +1778,73 @@
   // 9 bytes  66 NOP DWORD ptr [EAX + EAX*1 +         66 0F 1F 84 00 00 00 00
   //          00000000H]                              00H
 
+  ASSERT(1 <= n);
+  ASSERT(n <= 9);
   EnsureSpace ensure_space(this);
-  while (n > 0) {
-    switch (n) {
-      case 2:
-        emit(0x66);
-      case 1:
-        emit(0x90);
-        return;
-      case 3:
-        emit(0x0f);
-        emit(0x1f);
-        emit(0x00);
-        return;
-      case 4:
-        emit(0x0f);
-        emit(0x1f);
-        emit(0x40);
-        emit(0x00);
-        return;
-      case 6:
-        emit(0x66);
-      case 5:
-        emit(0x0f);
-        emit(0x1f);
-        emit(0x44);
-        emit(0x00);
-        emit(0x00);
-        return;
-      case 7:
-        emit(0x0f);
-        emit(0x1f);
-        emit(0x80);
-        emit(0x00);
-        emit(0x00);
-        emit(0x00);
-        emit(0x00);
-        return;
-      default:
-      case 11:
-        emit(0x66);
-        n--;
-      case 10:
-        emit(0x66);
-        n--;
-      case 9:
-        emit(0x66);
-        n--;
-      case 8:
-        emit(0x0f);
-        emit(0x1f);
-        emit(0x84);
-        emit(0x00);
-        emit(0x00);
-        emit(0x00);
-        emit(0x00);
-        emit(0x00);
-        n -= 8;
-    }
+  switch (n) {
+  case 1:
+    emit(0x90);
+    return;
+  case 2:
+    emit(0x66);
+    emit(0x90);
+    return;
+  case 3:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x00);
+    return;
+  case 4:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x40);
+    emit(0x00);
+    return;
+  case 5:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x44);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 6:
+    emit(0x66);
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x44);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 7:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x80);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 8:
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x84);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
+  case 9:
+    emit(0x66);
+    emit(0x0f);
+    emit(0x1f);
+    emit(0x84);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    emit(0x00);
+    return;
   }
 }
 
@@ -1933,7 +1937,7 @@
   }
   EnsureSpace ensure_space(this);
   ASSERT(is_uint4(cc));
-  if (!reg.is_byte_register()) {  // Use x64 byte registers, where different.
+  if (reg.code() > 3) {  // Use x64 byte registers, where different.
     emit_rex_32(reg);
   }
   emit(0x0F);
@@ -1998,7 +2002,7 @@
     emit(0x84);
     emit_modrm(src, dst);
   } else {
-    if (!dst.is_byte_register() || !src.is_byte_register()) {
+    if (dst.code() > 3 || src.code() > 3) {
       // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
       emit_rex_32(dst, src);
     }
@@ -2015,7 +2019,7 @@
     emit(0xA8);
     emit(mask.value_);  // Low byte emitted.
   } else {
-    if (!reg.is_byte_register()) {
+    if (reg.code() > 3) {
       // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
       emit_rex_32(reg);
     }
@@ -2038,7 +2042,7 @@
 
 void Assembler::testb(const Operand& op, Register reg) {
   EnsureSpace ensure_space(this);
-  if (!reg.is_byte_register()) {
+  if (reg.code() > 3) {
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(reg, op);
   } else {
@@ -2295,13 +2299,6 @@
 }
 
 
-void Assembler::fptan() {
-  EnsureSpace ensure_space(this);
-  emit(0xD9);
-  emit(0xF2);
-}
-
-
 void Assembler::fyl2x() {
   EnsureSpace ensure_space(this);
   emit(0xD9);
@@ -2309,27 +2306,6 @@
 }
 
 
-void Assembler::f2xm1() {
-  EnsureSpace ensure_space(this);
-  emit(0xD9);
-  emit(0xF0);
-}
-
-
-void Assembler::fscale() {
-  EnsureSpace ensure_space(this);
-  emit(0xD9);
-  emit(0xFD);
-}
-
-
-void Assembler::fninit() {
-  EnsureSpace ensure_space(this);
-  emit(0xDB);
-  emit(0xE3);
-}
-
-
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xC0, i);
@@ -2589,8 +2565,7 @@
 
 
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
-  ASSERT(CpuFeatures::IsSupported(SSE4_1));
-  ASSERT(is_uint8(imm8));
+  ASSERT(is_uint2(imm8));
   EnsureSpace ensure_space(this);
   emit(0x66);
   emit_optional_rex_32(dst, src);
@@ -3008,7 +2983,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data, NULL);
+  RelocInfo rinfo(pc_, rmode, data);
   reloc_info_writer.Write(&rinfo);
 }
 
@@ -3045,6 +3020,8 @@
   return (1 << rmode_) & kApplyMask;
 }
 
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 60b29e6..2e373fa 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // A lightweight X64 Assembler.
 
@@ -45,22 +45,22 @@
 // Utility functions
 
 // Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
+static inline bool is_uint32(int64_t x) {
   static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
   return static_cast<uint64_t>(x) <= kMaxUInt32;
 }
 
-inline bool is_int32(int64_t x) {
+static inline bool is_int32(int64_t x) {
   static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
   return is_uint32(x - kMinInt32);
 }
 
-inline bool uint_is_int32(uint64_t x) {
+static inline bool uint_is_int32(uint64_t x) {
   static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
   return x <= kMaxInt32;
 }
 
-inline bool is_uint32(uint64_t x) {
+static inline bool is_uint32(uint64_t x) {
   static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
   return x <= kMaxUInt32;
 }
@@ -131,8 +131,6 @@
   }
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
-  // rax, rbx, rcx and rdx are byte registers, the rest are not.
-  bool is_byte_register() const { return code_ <= 3; }
   int code() const {
     ASSERT(is_valid());
     return code_;
@@ -161,41 +159,23 @@
   static const int kAllocationIndexByRegisterCode[kNumRegisters];
 };
 
-const int kRegister_rax_Code = 0;
-const int kRegister_rcx_Code = 1;
-const int kRegister_rdx_Code = 2;
-const int kRegister_rbx_Code = 3;
-const int kRegister_rsp_Code = 4;
-const int kRegister_rbp_Code = 5;
-const int kRegister_rsi_Code = 6;
-const int kRegister_rdi_Code = 7;
-const int kRegister_r8_Code = 8;
-const int kRegister_r9_Code = 9;
-const int kRegister_r10_Code = 10;
-const int kRegister_r11_Code = 11;
-const int kRegister_r12_Code = 12;
-const int kRegister_r13_Code = 13;
-const int kRegister_r14_Code = 14;
-const int kRegister_r15_Code = 15;
-const int kRegister_no_reg_Code = -1;
-
-const Register rax = { kRegister_rax_Code };
-const Register rcx = { kRegister_rcx_Code };
-const Register rdx = { kRegister_rdx_Code };
-const Register rbx = { kRegister_rbx_Code };
-const Register rsp = { kRegister_rsp_Code };
-const Register rbp = { kRegister_rbp_Code };
-const Register rsi = { kRegister_rsi_Code };
-const Register rdi = { kRegister_rdi_Code };
-const Register r8 = { kRegister_r8_Code };
-const Register r9 = { kRegister_r9_Code };
-const Register r10 = { kRegister_r10_Code };
-const Register r11 = { kRegister_r11_Code };
-const Register r12 = { kRegister_r12_Code };
-const Register r13 = { kRegister_r13_Code };
-const Register r14 = { kRegister_r14_Code };
-const Register r15 = { kRegister_r15_Code };
-const Register no_reg = { kRegister_no_reg_Code };
+const Register rax = { 0 };
+const Register rcx = { 1 };
+const Register rdx = { 2 };
+const Register rbx = { 3 };
+const Register rsp = { 4 };
+const Register rbp = { 5 };
+const Register rsi = { 6 };
+const Register rdi = { 7 };
+const Register r8 = { 8 };
+const Register r9 = { 9 };
+const Register r10 = { 10 };
+const Register r11 = { 11 };
+const Register r12 = { 12 };
+const Register r13 = { 13 };
+const Register r14 = { 14 };
+const Register r15 = { 15 };
+const Register no_reg = { -1 };
 
 
 struct XMMRegister {
@@ -235,12 +215,6 @@
     return names[index];
   }
 
-  static XMMRegister from_code(int code) {
-    ASSERT(code >= 0);
-    ASSERT(code < kNumRegisters);
-    XMMRegister r = { code };
-    return r;
-  }
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
@@ -577,8 +551,8 @@
 
   // This sets the branch destination (which is in the instruction on x64).
   // This is for calls and branches within generated code.
-  inline static void deserialization_set_special_target_at(
-      Address instruction_payload, Address target) {
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
     set_target_address_at(instruction_payload, target);
   }
 
@@ -591,7 +565,8 @@
 
   inline Handle<Object> code_target_object_handle_at(Address pc);
   // Number of bytes taken up by the branch target in the code.
-  static const int kSpecialTargetSize = 4;  // Use 32-bit displacement.
+  static const int kCallTargetSize = 4;      // Use 32-bit displacement.
+  static const int kExternalTargetSize = 8;  // Use 64-bit absolute.
   // Distance between the address of the code target in the call instruction
   // and the return address pushed on the stack.
   static const int kCallTargetAddressOffset = 4;  // Use 32-bit displacement.
@@ -655,7 +630,6 @@
   // possible to align the pc offset to a multiple
   // of m, where m must be a power of 2.
   void Align(int m);
-  void Nop(int bytes = 1);
   // Aligns code to something that's optimal for a jump target for the platform.
   void CodeTargetAlign();
 
@@ -669,6 +643,7 @@
   void push_imm32(int32_t imm32);
   void push(Register src);
   void push(const Operand& src);
+  void push(Handle<Object> handle);
 
   void pop(Register dst);
   void pop(const Operand& dst);
@@ -760,10 +735,6 @@
     immediate_arithmetic_op_32(0x0, dst, src);
   }
 
-  void addl(const Operand& dst, Register src) {
-    arithmetic_op_32(0x01, src, dst);
-  }
-
   void addq(Register dst, Register src) {
     arithmetic_op(0x03, dst, src);
   }
@@ -1174,6 +1145,7 @@
   void hlt();
   void int3();
   void nop();
+  void nop(int n);
   void rdtsc();
   void ret(int imm16);
   void setcc(Condition cc, Register reg);
@@ -1294,11 +1266,7 @@
 
   void fsin();
   void fcos();
-  void fptan();
   void fyl2x();
-  void f2xm1();
-  void fscale();
-  void fninit();
 
   void frndint();
 
@@ -1420,20 +1388,19 @@
     return static_cast<int>(reloc_info_writer.pos() - pc_);
   }
 
-  static bool IsNop(Address addr);
+  static bool IsNop(Address addr) { return *addr == 0x90; }
 
   // Avoid overflows for displacements etc.
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
-
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 4e037ff..db06909 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -73,295 +73,310 @@
 }
 
 
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
-                                           bool is_api_function,
-                                           bool count_constructions) {
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax: number of arguments
   //  -- rdi: constructor function
   // -----------------------------------
 
+  Label non_function_call;
+  // Check that function is not a smi.
+  __ JumpIfSmi(rdi, &non_function_call);
+  // Check that function is a JSFunction.
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &non_function_call);
+
+  // Jump to the function-specific construct stub.
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
+  __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
+  __ jmp(rbx);
+
+  // rdi: called object
+  // rax: number of arguments
+  __ bind(&non_function_call);
+  // Set expected number of arguments to zero (not changing rax).
+  __ Set(rbx, 0);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool count_constructions) {
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
-  // Enter a construct frame.
-  {
-    FrameScope scope(masm, StackFrame::CONSTRUCT);
+    // Enter a construct frame.
+  __ EnterConstructFrame();
 
-    // Store a smi-tagged arguments count on the stack.
-    __ Integer32ToSmi(rax, rax);
-    __ push(rax);
+  // Store a smi-tagged arguments count on the stack.
+  __ Integer32ToSmi(rax, rax);
+  __ push(rax);
 
-    // Push the function to invoke on the stack.
-    __ push(rdi);
+  // Push the function to invoke on the stack.
+  __ push(rdi);
 
-    // Try to allocate the object without transitioning into C code. If any of
-    // the preconditions is not met, the code bails out to the runtime call.
-    Label rt_call, allocated;
-    if (FLAG_inline_new) {
-      Label undo_allocation;
+  // Try to allocate the object without transitioning into C code. If any of the
+  // preconditions is not met, the code bails out to the runtime call.
+  Label rt_call, allocated;
+  if (FLAG_inline_new) {
+    Label undo_allocation;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-      ExternalReference debug_step_in_fp =
-          ExternalReference::debug_step_in_fp_address(masm->isolate());
-      __ movq(kScratchRegister, debug_step_in_fp);
-      __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
-      __ j(not_equal, &rt_call);
+    ExternalReference debug_step_in_fp =
+        ExternalReference::debug_step_in_fp_address(masm->isolate());
+    __ movq(kScratchRegister, debug_step_in_fp);
+    __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+    __ j(not_equal, &rt_call);
 #endif
 
-      // Verified that the constructor is a JSFunction.
-      // Load the initial map and verify that it is in fact a map.
-      // rdi: constructor
-      __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-      // Will both indicate a NULL and a Smi
-      ASSERT(kSmiTag == 0);
-      __ JumpIfSmi(rax, &rt_call);
-      // rdi: constructor
-      // rax: initial map (if proven valid below)
-      __ CmpObjectType(rax, MAP_TYPE, rbx);
-      __ j(not_equal, &rt_call);
+    // Verified that the constructor is a JSFunction.
+    // Load the initial map and verify that it is in fact a map.
+    // rdi: constructor
+    __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(rax, &rt_call);
+    // rdi: constructor
+    // rax: initial map (if proven valid below)
+    __ CmpObjectType(rax, MAP_TYPE, rbx);
+    __ j(not_equal, &rt_call);
 
-      // Check that the constructor is not constructing a JSFunction (see
-      // comments in Runtime_NewObject in runtime.cc). In which case the
-      // initial map's instance type would be JS_FUNCTION_TYPE.
-      // rdi: constructor
-      // rax: initial map
-      __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
-      __ j(equal, &rt_call);
+    // Check that the constructor is not constructing a JSFunction (see comments
+    // in Runtime_NewObject in runtime.cc). In which case the initial map's
+    // instance type would be JS_FUNCTION_TYPE.
+    // rdi: constructor
+    // rax: initial map
+    __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+    __ j(equal, &rt_call);
 
+    if (count_constructions) {
+      Label allocate;
+      // Decrease generous allocation count.
+      __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+      __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
+      __ j(not_zero, &allocate);
+
+      __ push(rax);
+      __ push(rdi);
+
+      __ push(rdi);  // constructor
+      // The call will replace the stub, so the countdown is only done once.
+      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+      __ pop(rdi);
+      __ pop(rax);
+
+      __ bind(&allocate);
+    }
+
+    // Now allocate the JSObject on the heap.
+    __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+    __ shl(rdi, Immediate(kPointerSizeLog2));
+    // rdi: size of new object
+    __ AllocateInNewSpace(rdi,
+                          rbx,
+                          rdi,
+                          no_reg,
+                          &rt_call,
+                          NO_ALLOCATION_FLAGS);
+    // Allocated the JSObject, now initialize the fields.
+    // rax: initial map
+    // rbx: JSObject (not HeapObject tagged - the actual address).
+    // rdi: start of next object
+    __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+    __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+    __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+    // Set extra fields in the newly allocated object.
+    // rax: initial map
+    // rbx: JSObject
+    // rdi: start of next object
+    { Label loop, entry;
+      // To allow for truncation.
       if (count_constructions) {
-        Label allocate;
-        // Decrease generous allocation count.
-        __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-        __ decb(FieldOperand(rcx,
-                             SharedFunctionInfo::kConstructionCountOffset));
-        __ j(not_zero, &allocate);
-
-        __ push(rax);
-        __ push(rdi);
-
-        __ push(rdi);  // constructor
-        // The call will replace the stub, so the countdown is only done once.
-        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-        __ pop(rdi);
-        __ pop(rax);
-
-        __ bind(&allocate);
-      }
-
-      // Now allocate the JSObject on the heap.
-      __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
-      __ shl(rdi, Immediate(kPointerSizeLog2));
-      // rdi: size of new object
-      __ AllocateInNewSpace(rdi,
-                            rbx,
-                            rdi,
-                            no_reg,
-                            &rt_call,
-                            NO_ALLOCATION_FLAGS);
-      // Allocated the JSObject, now initialize the fields.
-      // rax: initial map
-      // rbx: JSObject (not HeapObject tagged - the actual address).
-      // rdi: start of next object
-      __ movq(Operand(rbx, JSObject::kMapOffset), rax);
-      __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-      __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
-      __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
-      // Set extra fields in the newly allocated object.
-      // rax: initial map
-      // rbx: JSObject
-      // rdi: start of next object
-      __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
-      __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
-      if (count_constructions) {
-        __ movzxbq(rsi,
-                   FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
-        __ lea(rsi,
-               Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
-        // rsi: offset of first field after pre-allocated fields
-        if (FLAG_debug_code) {
-          __ cmpq(rsi, rdi);
-          __ Assert(less_equal,
-                    "Unexpected number of pre-allocated property fields.");
-        }
-        __ InitializeFieldsWithFiller(rcx, rsi, rdx);
         __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
-      }
-      __ InitializeFieldsWithFiller(rcx, rdi, rdx);
-
-      // Add the object tag to make the JSObject real, so that we can continue
-      // and jump into the continuation code at any time from now on. Any
-      // failures need to undo the allocation, so that the heap is in a
-      // consistent state and verifiable.
-      // rax: initial map
-      // rbx: JSObject
-      // rdi: start of next object
-      __ or_(rbx, Immediate(kHeapObjectTag));
-
-      // Check if a non-empty properties array is needed.
-      // Allocate and initialize a FixedArray if it is.
-      // rax: initial map
-      // rbx: JSObject
-      // rdi: start of next object
-      // Calculate total properties described map.
-      __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
-      __ movzxbq(rcx,
-                 FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
-      __ addq(rdx, rcx);
-      // Calculate unused properties past the end of the in-object properties.
-      __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
-      __ subq(rdx, rcx);
-      // Done if no extra properties are to be allocated.
-      __ j(zero, &allocated);
-      __ Assert(positive, "Property allocation count failed.");
-
-      // Scale the number of elements by pointer size and add the header for
-      // FixedArrays to the start of the next object calculation from above.
-      // rbx: JSObject
-      // rdi: start of next object (will be start of FixedArray)
-      // rdx: number of elements in properties array
-      __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                            times_pointer_size,
-                            rdx,
-                            rdi,
-                            rax,
-                            no_reg,
-                            &undo_allocation,
-                            RESULT_CONTAINS_TOP);
-
-      // Initialize the FixedArray.
-      // rbx: JSObject
-      // rdi: FixedArray
-      // rdx: number of elements
-      // rax: start of next object
-      __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
-      __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
-      __ Integer32ToSmi(rdx, rdx);
-      __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
-
-      // Initialize the fields to undefined.
-      // rbx: JSObject
-      // rdi: FixedArray
-      // rax: start of next object
-      // rdx: number of elements
-      { Label loop, entry;
+      } else {
         __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
-        __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
-        __ jmp(&entry);
-        __ bind(&loop);
-        __ movq(Operand(rcx, 0), rdx);
-        __ addq(rcx, Immediate(kPointerSize));
-        __ bind(&entry);
-        __ cmpq(rcx, rax);
-        __ j(below, &loop);
       }
-
-      // Store the initialized FixedArray into the properties field of
-      // the JSObject
-      // rbx: JSObject
-      // rdi: FixedArray
-      __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
-      __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
-
-
-      // Continue with JSObject being successfully allocated
-      // rbx: JSObject
-      __ jmp(&allocated);
-
-      // Undo the setting of the new top so that the heap is verifiable. For
-      // example, the map's unused properties potentially do not match the
-      // allocated objects unused properties.
-      // rbx: JSObject (previous new top)
-      __ bind(&undo_allocation);
-      __ UndoAllocationInNewSpace(rbx);
+      __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ movq(Operand(rcx, 0), rdx);
+      __ addq(rcx, Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmpq(rcx, rdi);
+      __ j(less, &loop);
     }
 
-    // Allocate the new receiver object using the runtime call.
-    // rdi: function (constructor)
-    __ bind(&rt_call);
-    // Must restore rdi (constructor) before calling runtime.
-    __ movq(rdi, Operand(rsp, 0));
-    __ push(rdi);
-    __ CallRuntime(Runtime::kNewObject, 1);
-    __ movq(rbx, rax);  // store result in rbx
+    // Add the object tag to make the JSObject real, so that we can continue and
+    // jump into the continuation code at any time from now on. Any failures
+    // need to undo the allocation, so that the heap is in a consistent state
+    // and verifiable.
+    // rax: initial map
+    // rbx: JSObject
+    // rdi: start of next object
+    __ or_(rbx, Immediate(kHeapObjectTag));
 
-    // New object allocated.
-    // rbx: newly allocated object
-    __ bind(&allocated);
-    // Retrieve the function from the stack.
-    __ pop(rdi);
+    // Check if a non-empty properties array is needed.
+    // Allocate and initialize a FixedArray if it is.
+    // rax: initial map
+    // rbx: JSObject
+    // rdi: start of next object
+    // Calculate total properties described map.
+    __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+    __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+    __ addq(rdx, rcx);
+    // Calculate unused properties past the end of the in-object properties.
+    __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+    __ subq(rdx, rcx);
+    // Done if no extra properties are to be allocated.
+    __ j(zero, &allocated);
+    __ Assert(positive, "Property allocation count failed.");
 
-    // Retrieve smi-tagged arguments count from the stack.
-    __ movq(rax, Operand(rsp, 0));
-    __ SmiToInteger32(rax, rax);
+    // Scale the number of elements by pointer size and add the header for
+    // FixedArrays to the start of the next object calculation from above.
+    // rbx: JSObject
+    // rdi: start of next object (will be start of FixedArray)
+    // rdx: number of elements in properties array
+    __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                          times_pointer_size,
+                          rdx,
+                          rdi,
+                          rax,
+                          no_reg,
+                          &undo_allocation,
+                          RESULT_CONTAINS_TOP);
 
-    // Push the allocated receiver to the stack. We need two copies
-    // because we may have to return the original one and the calling
-    // conventions dictate that the called function pops the receiver.
-    __ push(rbx);
-    __ push(rbx);
+    // Initialize the FixedArray.
+    // rbx: JSObject
+    // rdi: FixedArray
+    // rdx: number of elements
+    // rax: start of next object
+    __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+    __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
+    __ Integer32ToSmi(rdx, rdx);
+    __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
 
-    // Set up pointer to last argument.
-    __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
-    // Copy arguments and receiver to the expression stack.
-    Label loop, entry;
-    __ movq(rcx, rax);
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ push(Operand(rbx, rcx, times_pointer_size, 0));
-    __ bind(&entry);
-    __ decq(rcx);
-    __ j(greater_equal, &loop);
-
-    // Call the function.
-    if (is_api_function) {
-      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-      Handle<Code> code =
-          masm->isolate()->builtins()->HandleApiCallConstruct();
-      ParameterCount expected(0);
-      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-    } else {
-      ParameterCount actual(rax);
-      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
+    // Initialize the fields to undefined.
+    // rbx: JSObject
+    // rdi: FixedArray
+    // rax: start of next object
+    // rdx: number of elements
+    { Label loop, entry;
+      __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+      __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+      __ jmp(&entry);
+      __ bind(&loop);
+      __ movq(Operand(rcx, 0), rdx);
+      __ addq(rcx, Immediate(kPointerSize));
+      __ bind(&entry);
+      __ cmpq(rcx, rax);
+      __ j(below, &loop);
     }
 
-    // Store offset of return address for deoptimizer.
-    if (!is_api_function && !count_constructions) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
-    }
+    // Store the initialized FixedArray into the properties field of
+    // the JSObject
+    // rbx: JSObject
+    // rdi: FixedArray
+    __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
+    __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
 
-    // Restore context from the frame.
-    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 
-    // If the result is an object (in the ECMA sense), we should get rid
-    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-    // on page 74.
-    Label use_receiver, exit;
-    // If the result is a smi, it is *not* an object in the ECMA sense.
-    __ JumpIfSmi(rax, &use_receiver);
+    // Continue with JSObject being successfully allocated
+    // rbx: JSObject
+    __ jmp(&allocated);
 
-    // If the type of the result (stored in its map) is less than
-    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
-    __ j(above_equal, &exit);
-
-    // Throw away the result of the constructor invocation and use the
-    // on-stack receiver as the result.
-    __ bind(&use_receiver);
-    __ movq(rax, Operand(rsp, 0));
-
-    // Restore the arguments count and leave the construct frame.
-    __ bind(&exit);
-    __ movq(rbx, Operand(rsp, kPointerSize));  // Get arguments count.
-
-    // Leave construct frame.
+    // Undo the setting of the new top so that the heap is verifiable. For
+    // example, the map's unused properties potentially do not match the
+    // allocated objects unused properties.
+    // rbx: JSObject (previous new top)
+    __ bind(&undo_allocation);
+    __ UndoAllocationInNewSpace(rbx);
   }
 
+  // Allocate the new receiver object using the runtime call.
+  // rdi: function (constructor)
+  __ bind(&rt_call);
+  // Must restore rdi (constructor) before calling runtime.
+  __ movq(rdi, Operand(rsp, 0));
+  __ push(rdi);
+  __ CallRuntime(Runtime::kNewObject, 1);
+  __ movq(rbx, rax);  // store result in rbx
+
+  // New object allocated.
+  // rbx: newly allocated object
+  __ bind(&allocated);
+  // Retrieve the function from the stack.
+  __ pop(rdi);
+
+  // Retrieve smi-tagged arguments count from the stack.
+  __ movq(rax, Operand(rsp, 0));
+  __ SmiToInteger32(rax, rax);
+
+  // Push the allocated receiver to the stack. We need two copies
+  // because we may have to return the original one and the calling
+  // conventions dictate that the called function pops the receiver.
+  __ push(rbx);
+  __ push(rbx);
+
+  // Setup pointer to last argument.
+  __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+  // Copy arguments and receiver to the expression stack.
+  Label loop, entry;
+  __ movq(rcx, rax);
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ push(Operand(rbx, rcx, times_pointer_size, 0));
+  __ bind(&entry);
+  __ decq(rcx);
+  __ j(greater_equal, &loop);
+
+  // Call the function.
+  if (is_api_function) {
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    Handle<Code> code =
+        masm->isolate()->builtins()->HandleApiCallConstruct();
+    ParameterCount expected(0);
+    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+  } else {
+    ParameterCount actual(rax);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+  }
+
+  // Restore context from the frame.
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  // If the result is an object (in the ECMA sense), we should get rid
+  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+  // on page 74.
+  Label use_receiver, exit;
+  // If the result is a smi, it is *not* an object in the ECMA sense.
+  __ JumpIfSmi(rax, &use_receiver);
+
+  // If the type of the result (stored in its map) is less than
+  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+  __ j(above_equal, &exit);
+
+  // Throw away the result of the constructor invocation and use the
+  // on-stack receiver as the result.
+  __ bind(&use_receiver);
+  __ movq(rax, Operand(rsp, 0));
+
+  // Restore the arguments count and leave the construct frame.
+  __ bind(&exit);
+  __ movq(rbx, Operand(rsp, kPointerSize));  // get arguments count
+  __ LeaveConstructFrame();
+
   // Remove caller arguments from the stack and return.
   __ pop(rcx);
   SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
@@ -398,108 +413,104 @@
   // - Object*** argv
   // (see Handle::Invoke in execution.cc).
 
-  // Open a C++ scope for the FrameScope.
-  {
-    // Platform specific argument handling. After this, the stack contains
-    // an internal frame and the pushed function and receiver, and
-    // register rax and rbx holds the argument count and argument array,
-    // while rdi holds the function pointer and rsi the context.
-
+  // Platform specific argument handling. After this, the stack contains
+  // an internal frame and the pushed function and receiver, and
+  // register rax and rbx holds the argument count and argument array,
+  // while rdi holds the function pointer and rsi the context.
 #ifdef _WIN64
-    // MSVC parameters in:
-    // rcx : entry (ignored)
-    // rdx : function
-    // r8 : receiver
-    // r9 : argc
-    // [rsp+0x20] : argv
+  // MSVC parameters in:
+  // rcx : entry (ignored)
+  // rdx : function
+  // r8 : receiver
+  // r9 : argc
+  // [rsp+0x20] : argv
 
-    // Clear the context before we push it when entering the internal frame.
-    __ Set(rsi, 0);
-    // Enter an internal frame.
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Clear the context before we push it when entering the JS frame.
+  __ Set(rsi, 0);
+  __ EnterInternalFrame();
 
-    // Load the function context into rsi.
-    __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+  // Load the function context into rsi.
+  __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
 
-    // Push the function and the receiver onto the stack.
-    __ push(rdx);
-    __ push(r8);
+  // Push the function and the receiver onto the stack.
+  __ push(rdx);
+  __ push(r8);
 
-    // Load the number of arguments and setup pointer to the arguments.
-    __ movq(rax, r9);
-    // Load the previous frame pointer to access C argument on stack
-    __ movq(kScratchRegister, Operand(rbp, 0));
-    __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
-    // Load the function pointer into rdi.
-    __ movq(rdi, rdx);
+  // Load the number of arguments and setup pointer to the arguments.
+  __ movq(rax, r9);
+  // Load the previous frame pointer to access C argument on stack
+  __ movq(kScratchRegister, Operand(rbp, 0));
+  __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+  // Load the function pointer into rdi.
+  __ movq(rdi, rdx);
 #else  // _WIN64
-    // GCC parameters in:
-    // rdi : entry (ignored)
-    // rsi : function
-    // rdx : receiver
-    // rcx : argc
-    // r8  : argv
+  // GCC parameters in:
+  // rdi : entry (ignored)
+  // rsi : function
+  // rdx : receiver
+  // rcx : argc
+  // r8  : argv
 
-    __ movq(rdi, rsi);
-    // rdi : function
+  __ movq(rdi, rsi);
+  // rdi : function
 
-    // Clear the context before we push it when entering the internal frame.
-    __ Set(rsi, 0);
-    // Enter an internal frame.
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  // Clear the context before we push it when entering the JS frame.
+  __ Set(rsi, 0);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
 
-    // Push the function and receiver and setup the context.
-    __ push(rdi);
-    __ push(rdx);
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+  // Push the function and receiver and setup the context.
+  __ push(rdi);
+  __ push(rdx);
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-    // Load the number of arguments and setup pointer to the arguments.
-    __ movq(rax, rcx);
-    __ movq(rbx, r8);
+  // Load the number of arguments and setup pointer to the arguments.
+  __ movq(rax, rcx);
+  __ movq(rbx, r8);
 #endif  // _WIN64
 
-    // Current stack contents:
-    // [rsp + 2 * kPointerSize ... ]: Internal frame
-    // [rsp + kPointerSize]         : function
-    // [rsp]                        : receiver
-    // Current register contents:
-    // rax : argc
-    // rbx : argv
-    // rsi : context
-    // rdi : function
+  // Current stack contents:
+  // [rsp + 2 * kPointerSize ... ]: Internal frame
+  // [rsp + kPointerSize]         : function
+  // [rsp]                        : receiver
+  // Current register contents:
+  // rax : argc
+  // rbx : argv
+  // rsi : context
+  // rdi : function
 
-    // Copy arguments to the stack in a loop.
-    // Register rbx points to array of pointers to handle locations.
-    // Push the values of these handles.
-    Label loop, entry;
-    __ Set(rcx, 0);  // Set loop variable to 0.
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
-    __ push(Operand(kScratchRegister, 0));  // dereference handle
-    __ addq(rcx, Immediate(1));
-    __ bind(&entry);
-    __ cmpq(rcx, rax);
-    __ j(not_equal, &loop);
+  // Copy arguments to the stack in a loop.
+  // Register rbx points to array of pointers to handle locations.
+  // Push the values of these handles.
+  Label loop, entry;
+  __ Set(rcx, 0);  // Set loop variable to 0.
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+  __ push(Operand(kScratchRegister, 0));  // dereference handle
+  __ addq(rcx, Immediate(1));
+  __ bind(&entry);
+  __ cmpq(rcx, rax);
+  __ j(not_equal, &loop);
 
-    // Invoke the code.
-    if (is_construct) {
-      // Expects rdi to hold function pointer.
-      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
-      __ CallStub(&stub);
-    } else {
-      ParameterCount actual(rax);
-      // Function must be in rdi.
-      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                        NullCallWrapper(), CALL_AS_METHOD);
-    }
-    // Exit the internal frame. Notice that this also removes the empty
-    // context and the function left on the stack by the code
-    // invocation.
+  // Invoke the code.
+  if (is_construct) {
+    // Expects rdi to hold function pointer.
+    __ Call(masm->isolate()->builtins()->JSConstructCall(),
+            RelocInfo::CODE_TARGET);
+  } else {
+    ParameterCount actual(rax);
+    // Function must be in rdi.
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
   }
 
+  // Exit the JS frame. Notice that this also removes the empty
+  // context and the function left on the stack by the code
+  // invocation.
+  __ LeaveInternalFrame();
   // TODO(X64): Is argument correct? Is there a receiver to remove?
-  __ ret(1 * kPointerSize);  // Remove receiver.
+  __ ret(1 * kPointerSize);  // remove receiver
 }
 
 
@@ -515,24 +526,23 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Push a copy of the function onto the stack.
-    __ push(rdi);
-    // Push call kind information.
-    __ push(rcx);
+  // Push a copy of the function onto the stack.
+  __ push(rdi);
+  // Push call kind information.
+  __ push(rcx);
 
-    __ push(rdi);  // Function is also the parameter to the runtime call.
-    __ CallRuntime(Runtime::kLazyCompile, 1);
+  __ push(rdi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyCompile, 1);
 
-    // Restore call kind information.
-    __ pop(rcx);
-    // Restore receiver.
-    __ pop(rdi);
+  // Restore call kind information.
+  __ pop(rcx);
+  // Restore receiver.
+  __ pop(rdi);
 
-    // Tear down internal frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -542,24 +552,23 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Push a copy of the function onto the stack.
-    __ push(rdi);
-    // Push call kind information.
-    __ push(rcx);
+  // Push a copy of the function onto the stack.
+  __ push(rdi);
+  // Push call kind information.
+  __ push(rcx);
 
-    __ push(rdi);  // Function is also the parameter to the runtime call.
-    __ CallRuntime(Runtime::kLazyRecompile, 1);
+  __ push(rdi);  // Function is also the parameter to the runtime call.
+  __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-    // Restore call kind information.
-    __ pop(rcx);
-    // Restore function.
-    __ pop(rdi);
+  // Restore call kind information.
+  __ pop(rcx);
+  // Restore function.
+  __ pop(rdi);
 
-    // Tear down internal frame.
-  }
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -570,15 +579,14 @@
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Pass the deoptimization type to the runtime system.
-    __ Push(Smi::FromInt(static_cast<int>(type)));
+  // Pass the deoptimization type to the runtime system.
+  __ Push(Smi::FromInt(static_cast<int>(type)));
 
-    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-    // Tear down internal frame.
-  }
+  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  // Tear down temporary frame.
+  __ LeaveInternalFrame();
 
   // Get the full codegen state from the stack and untag it.
   __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
@@ -615,10 +623,9 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ Pushad();
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kNotifyOSR, 0);
-  }
+  __ EnterInternalFrame();
+  __ CallRuntime(Runtime::kNotifyOSR, 0);
+  __ LeaveInternalFrame();
   __ Popad();
   __ ret(0);
 }
@@ -640,7 +647,7 @@
     __ testq(rax, rax);
     __ j(not_zero, &done);
     __ pop(rbx);
-    __ Push(masm->isolate()->factory()->undefined_value());
+    __ Push(FACTORY->undefined_value());
     __ push(rbx);
     __ incq(rax);
     __ bind(&done);
@@ -688,21 +695,18 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
-    {
-      // Enter an internal frame in order to preserve argument count.
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Integer32ToSmi(rax, rax);
-      __ push(rax);
+    __ EnterInternalFrame();  // In order to preserve argument count.
+    __ Integer32ToSmi(rax, rax);
+    __ push(rax);
 
-      __ push(rbx);
-      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-      __ movq(rbx, rax);
-      __ Set(rdx, 0);  // indicate regular JS_FUNCTION
+    __ push(rbx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ movq(rbx, rax);
+    __ Set(rdx, 0);  // indicate regular JS_FUNCTION
 
-      __ pop(rax);
-      __ SmiToInteger32(rax, rax);
-    }
-
+    __ pop(rax);
+    __ SmiToInteger32(rax, rax);
+    __ LeaveInternalFrame();
     // Restore the function to rdi.
     __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
     __ jmp(&patch_receiver, Label::kNear);
@@ -803,166 +807,168 @@
   //  rsp+8: arguments
   // rsp+16: receiver ("this")
   // rsp+24: function
-  {
-    FrameScope frame_scope(masm, StackFrame::INTERNAL);
-    // Stack frame:
-    //    rbp: Old base pointer
-    // rbp[1]: return address
-    // rbp[2]: function arguments
-    // rbp[3]: receiver
-    // rbp[4]: function
-    static const int kArgumentsOffset = 2 * kPointerSize;
-    static const int kReceiverOffset = 3 * kPointerSize;
-    static const int kFunctionOffset = 4 * kPointerSize;
+  __ EnterInternalFrame();
+  // Stack frame:
+  //    rbp: Old base pointer
+  // rbp[1]: return address
+  // rbp[2]: function arguments
+  // rbp[3]: receiver
+  // rbp[4]: function
+  static const int kArgumentsOffset = 2 * kPointerSize;
+  static const int kReceiverOffset = 3 * kPointerSize;
+  static const int kFunctionOffset = 4 * kPointerSize;
 
-    __ push(Operand(rbp, kFunctionOffset));
-    __ push(Operand(rbp, kArgumentsOffset));
-    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+  __ push(Operand(rbp, kFunctionOffset));
+  __ push(Operand(rbp, kArgumentsOffset));
+  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-    // Check the stack for overflow. We are not trying to catch
-    // interruptions (e.g. debug break and preemption) here, so the "real stack
-    // limit" is checked.
-    Label okay;
-    __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
-    __ movq(rcx, rsp);
-    // Make rcx the space we have left. The stack might already be overflowed
-    // here which will cause rcx to become negative.
-    __ subq(rcx, kScratchRegister);
-    // Make rdx the space we need for the array when it is unrolled onto the
-    // stack.
-    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
-    // Check if the arguments will overflow the stack.
-    __ cmpq(rcx, rdx);
-    __ j(greater, &okay);  // Signed comparison.
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  Label okay;
+  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+  __ movq(rcx, rsp);
+  // Make rcx the space we have left. The stack might already be overflowed
+  // here which will cause rcx to become negative.
+  __ subq(rcx, kScratchRegister);
+  // Make rdx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+  // Check if the arguments will overflow the stack.
+  __ cmpq(rcx, rdx);
+  __ j(greater, &okay);  // Signed comparison.
 
-    // Out of stack space.
-    __ push(Operand(rbp, kFunctionOffset));
-    __ push(rax);
-    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-    __ bind(&okay);
-    // End of stack check.
+  // Out of stack space.
+  __ push(Operand(rbp, kFunctionOffset));
+  __ push(rax);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  __ bind(&okay);
+  // End of stack check.
 
-    // Push current index and limit.
-    const int kLimitOffset =
-        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-    __ push(rax);  // limit
-    __ push(Immediate(0));  // index
+  // Push current index and limit.
+  const int kLimitOffset =
+      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+  __ push(rax);  // limit
+  __ push(Immediate(0));  // index
 
-    // Get the receiver.
-    __ movq(rbx, Operand(rbp, kReceiverOffset));
+  // Get the receiver.
+  __ movq(rbx, Operand(rbp, kReceiverOffset));
 
-    // Check that the function is a JS function (otherwise it must be a proxy).
-    Label push_receiver;
-    __ movq(rdi, Operand(rbp, kFunctionOffset));
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-    __ j(not_equal, &push_receiver);
+  // Check that the function is a JS function (otherwise it must be a proxy).
+  Label push_receiver;
+  __ movq(rdi, Operand(rbp, kFunctionOffset));
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &push_receiver);
 
-    // Change context eagerly to get the right global object if necessary.
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+  // Change context eagerly to get the right global object if necessary.
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-    // Do not transform the receiver for strict mode functions.
-    Label call_to_object, use_global_receiver;
-    __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-    __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
-             Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
-    __ j(not_equal, &push_receiver);
+  // Do not transform the receiver for strict mode functions.
+  Label call_to_object, use_global_receiver;
+  __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+  __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+           Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+  __ j(not_equal, &push_receiver);
 
-    // Do not transform the receiver for natives.
-    __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
-             Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
-    __ j(not_equal, &push_receiver);
+  // Do not transform the receiver for natives.
+  __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+  __ j(not_equal, &push_receiver);
 
-    // Compute the receiver in non-strict mode.
-    __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
-    __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-    __ j(equal, &use_global_receiver);
-    __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-    __ j(equal, &use_global_receiver);
+  // Compute the receiver in non-strict mode.
+  __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
+  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+  __ j(equal, &use_global_receiver);
+  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &use_global_receiver);
 
-    // If given receiver is already a JavaScript object then there's no
-    // reason for converting it.
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
-    __ j(above_equal, &push_receiver);
+  // If given receiver is already a JavaScript object then there's no
+  // reason for converting it.
+  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+  __ j(above_equal, &push_receiver);
 
-    // Convert the receiver to an object.
-    __ bind(&call_to_object);
-    __ push(rbx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ movq(rbx, rax);
-    __ jmp(&push_receiver, Label::kNear);
+  // Convert the receiver to an object.
+  __ bind(&call_to_object);
+  __ push(rbx);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ movq(rbx, rax);
+  __ jmp(&push_receiver, Label::kNear);
 
-    // Use the current global receiver object as the receiver.
-    __ bind(&use_global_receiver);
-    const int kGlobalOffset =
-        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-    __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
-    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
-    __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
-    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+  // Use the current global receiver object as the receiver.
+  __ bind(&use_global_receiver);
+  const int kGlobalOffset =
+      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
-    // Push the receiver.
-    __ bind(&push_receiver);
-    __ push(rbx);
+  // Push the receiver.
+  __ bind(&push_receiver);
+  __ push(rbx);
 
-    // Copy all arguments from the array to the stack.
-    Label entry, loop;
-    __ movq(rax, Operand(rbp, kIndexOffset));
-    __ jmp(&entry);
-    __ bind(&loop);
-    __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
+  // Copy all arguments from the array to the stack.
+  Label entry, loop;
+  __ movq(rax, Operand(rbp, kIndexOffset));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
 
-    // Use inline caching to speed up access to arguments.
-    Handle<Code> ic =
-        masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-    __ Call(ic, RelocInfo::CODE_TARGET);
-    // It is important that we do not have a test instruction after the
-    // call.  A test instruction after the call is used to indicate that
-    // we have generated an inline version of the keyed load.  In this
-    // case, we know that we are not generating a test instruction next.
+  // Use inline caching to speed up access to arguments.
+  Handle<Code> ic =
+      masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+  __ Call(ic, RelocInfo::CODE_TARGET);
+  // It is important that we do not have a test instruction after the
+  // call.  A test instruction after the call is used to indicate that
+  // we have generated an inline version of the keyed load.  In this
+  // case, we know that we are not generating a test instruction next.
 
-    // Push the nth argument.
-    __ push(rax);
+  // Push the nth argument.
+  __ push(rax);
 
-    // Update the index on the stack and in register rax.
-    __ movq(rax, Operand(rbp, kIndexOffset));
-    __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-    __ movq(Operand(rbp, kIndexOffset), rax);
+  // Update the index on the stack and in register rax.
+  __ movq(rax, Operand(rbp, kIndexOffset));
+  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+  __ movq(Operand(rbp, kIndexOffset), rax);
 
-    __ bind(&entry);
-    __ cmpq(rax, Operand(rbp, kLimitOffset));
-    __ j(not_equal, &loop);
+  __ bind(&entry);
+  __ cmpq(rax, Operand(rbp, kLimitOffset));
+  __ j(not_equal, &loop);
 
-    // Invoke the function.
-    Label call_proxy;
-    ParameterCount actual(rax);
-    __ SmiToInteger32(rax, rax);
-    __ movq(rdi, Operand(rbp, kFunctionOffset));
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-    __ j(not_equal, &call_proxy);
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+  // Invoke the function.
+  Label call_proxy;
+  ParameterCount actual(rax);
+  __ SmiToInteger32(rax, rax);
+  __ movq(rdi, Operand(rbp, kFunctionOffset));
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &call_proxy);
+  __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                    NullCallWrapper(), CALL_AS_METHOD);
 
-    frame_scope.GenerateLeaveFrame();
-    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+  __ LeaveInternalFrame();
+  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-    // Invoke the function proxy.
-    __ bind(&call_proxy);
-    __ push(rdi);  // add function proxy as last argument
-    __ incq(rax);
-    __ Set(rbx, 0);
-    __ SetCallKind(rcx, CALL_AS_METHOD);
-    __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
-    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-            RelocInfo::CODE_TARGET);
+  // Invoke the function proxy.
+  __ bind(&call_proxy);
+  __ push(rdi);  // add function proxy as last argument
+  __ incq(rax);
+  __ Set(rbx, 0);
+  __ SetCallKind(rcx, CALL_AS_METHOD);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
 
-    // Leave internal frame.
-  }
+  __ LeaveInternalFrame();
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
 
+// Number of empty elements to allocate for an empty array.
+static const int kPreallocatedArrayElements = 4;
+
+
 // Allocate an empty JSArray. The allocated array is put into the result
 // register. If the parameter initial_capacity is larger than zero an elements
 // backing store is allocated with this size and filled with the hole values.
@@ -973,11 +979,13 @@
                                  Register scratch1,
                                  Register scratch2,
                                  Register scratch3,
+                                 int initial_capacity,
                                  Label* gc_required) {
-  const int initial_capacity = JSArray::kPreallocatedArrayElements;
-  STATIC_ASSERT(initial_capacity >= 0);
+  ASSERT(initial_capacity >= 0);
 
-  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
+  // Load the initial map from the array function.
+  __ movq(scratch1, FieldOperand(array_function,
+                                 JSFunction::kPrototypeOrInitialMapOffset));
 
   // Allocate the JSArray object together with space for a fixed array with the
   // requested elements.
@@ -997,10 +1005,9 @@
   // result: JSObject
   // scratch1: initial map
   // scratch2: start of next object
-  Factory* factory = masm->isolate()->factory();
   __ movq(FieldOperand(result, JSObject::kMapOffset), scratch1);
   __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
-          factory->empty_fixed_array());
+          FACTORY->empty_fixed_array());
   // Field JSArray::kElementsOffset is initialized later.
   __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
 
@@ -1008,7 +1015,7 @@
   // fixed array.
   if (initial_capacity == 0) {
     __ Move(FieldOperand(result, JSArray::kElementsOffset),
-            factory->empty_fixed_array());
+            FACTORY->empty_fixed_array());
     return;
   }
 
@@ -1025,14 +1032,15 @@
   // scratch1: elements array
   // scratch2: start of next object
   __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
-          factory->fixed_array_map());
+          FACTORY->fixed_array_map());
   __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
           Smi::FromInt(initial_capacity));
 
   // Fill the FixedArray with the hole value. Inline the code if short.
   // Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
   static const int kLoopUnfoldLimit = 4;
-  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  ASSERT(kPreallocatedArrayElements <= kLoopUnfoldLimit);
+  __ Move(scratch3, FACTORY->the_hole_value());
   if (initial_capacity <= kLoopUnfoldLimit) {
     // Use a scratch register here to have only one reloc info when unfolding
     // the loop.
@@ -1043,17 +1051,13 @@
     }
   } else {
     Label loop, entry;
-    __ movq(scratch2, Immediate(initial_capacity));
     __ jmp(&entry);
     __ bind(&loop);
-    __ movq(FieldOperand(scratch1,
-                         scratch2,
-                         times_pointer_size,
-                         FixedArray::kHeaderSize),
-            scratch3);
+    __ movq(Operand(scratch1, 0), scratch3);
+    __ addq(scratch1, Immediate(kPointerSize));
     __ bind(&entry);
-    __ decq(scratch2);
-    __ j(not_sign, &loop);
+    __ cmpq(scratch1, scratch2);
+    __ j(below, &loop);
   }
 }
 
@@ -1069,22 +1073,38 @@
 // register elements_array is scratched.
 static void AllocateJSArray(MacroAssembler* masm,
                             Register array_function,  // Array function.
-                            Register array_size,  // As a smi, cannot be 0.
+                            Register array_size,  // As a smi.
                             Register result,
                             Register elements_array,
                             Register elements_array_end,
                             Register scratch,
                             bool fill_with_hole,
                             Label* gc_required) {
-  __ LoadInitialArrayMap(array_function, scratch, elements_array);
+  Label not_empty, allocated;
 
-  if (FLAG_debug_code) {  // Assert that array size is not zero.
-    __ testq(array_size, array_size);
-    __ Assert(not_zero, "array size is unexpectedly 0");
-  }
+  // Load the initial map from the array function.
+  __ movq(elements_array,
+          FieldOperand(array_function,
+                       JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check whether an empty sized array is requested.
+  __ testq(array_size, array_size);
+  __ j(not_zero, &not_empty);
+
+  // If an empty array is requested allocate a small elements array anyway. This
+  // keeps the code below free of special casing for the empty array.
+  int size = JSArray::kSize + FixedArray::SizeFor(kPreallocatedArrayElements);
+  __ AllocateInNewSpace(size,
+                        result,
+                        elements_array_end,
+                        scratch,
+                        gc_required,
+                        TAG_OBJECT);
+  __ jmp(&allocated);
 
   // Allocate the JSArray object together with space for a FixedArray with the
   // requested elements.
+  __ bind(&not_empty);
   SmiIndex index =
       masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
   __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
@@ -1102,9 +1122,9 @@
   // elements_array: initial map
   // elements_array_end: start of next object
   // array_size: size of array (smi)
-  Factory* factory = masm->isolate()->factory();
+  __ bind(&allocated);
   __ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
-  __ Move(elements_array, factory->empty_fixed_array());
+  __ Move(elements_array, FACTORY->empty_fixed_array());
   __ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
   // Field JSArray::kElementsOffset is initialized later.
   __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
@@ -1123,7 +1143,16 @@
   // elements_array_end: start of next object
   // array_size: size of array (smi)
   __ Move(FieldOperand(elements_array, JSObject::kMapOffset),
-          factory->fixed_array_map());
+          FACTORY->fixed_array_map());
+  Label not_empty_2, fill_array;
+  __ SmiTest(array_size);
+  __ j(not_zero, &not_empty_2);
+  // Length of the FixedArray is the number of pre-allocated elements even
+  // though the actual JSArray has length 0.
+  __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
+          Smi::FromInt(kPreallocatedArrayElements));
+  __ jmp(&fill_array);
+  __ bind(&not_empty_2);
   // For non-empty JSArrays the length of the FixedArray and the JSArray is the
   // same.
   __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
@@ -1132,9 +1161,10 @@
   // result: JSObject
   // elements_array: elements array
   // elements_array_end: start of next object
+  __ bind(&fill_array);
   if (fill_with_hole) {
     Label loop, entry;
-    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ Move(scratch, FACTORY->the_hole_value());
     __ lea(elements_array, Operand(elements_array,
                                    FixedArray::kHeaderSize - kHeapObjectTag));
     __ jmp(&entry);
@@ -1163,15 +1193,13 @@
 // Both registers are preserved by this code so no need to differentiate between
 // a construct call and a normal call.
 static void ArrayNativeCode(MacroAssembler* masm,
-                            Label* call_generic_code) {
-  Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array,
-      has_non_smi_element, finish, cant_transition_map, not_double;
+                            Label *call_generic_code) {
+  Label argc_one_or_more, argc_two_or_more;
 
   // Check for array construction with zero arguments.
   __ testq(rax, rax);
   __ j(not_zero, &argc_one_or_more);
 
-  __ bind(&empty_array);
   // Handle construction of an empty array.
   AllocateEmptyJSArray(masm,
                        rdi,
@@ -1179,6 +1207,7 @@
                        rcx,
                        rdx,
                        r8,
+                       kPreallocatedArrayElements,
                        call_generic_code);
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->array_function_native(), 1);
@@ -1191,16 +1220,6 @@
   __ cmpq(rax, Immediate(1));
   __ j(not_equal, &argc_two_or_more);
   __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
-
-  __ SmiTest(rdx);
-  __ j(not_zero, &not_empty_array);
-  __ pop(r8);  // Adjust stack.
-  __ Drop(1);
-  __ push(r8);
-  __ movq(rax, Immediate(0));  // Treat this as a call with argc of zero.
-  __ jmp(&empty_array);
-
-  __ bind(&not_empty_array);
   __ JumpUnlessNonNegativeSmi(rdx, call_generic_code);
 
   // Handle construction of an empty array of a certain size. Bail out if size
@@ -1270,11 +1289,8 @@
   __ movq(rcx, rax);
   __ jmp(&entry);
   __ bind(&loop);
-  __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
-  if (FLAG_smi_only_arrays) {
-    __ JumpIfNotSmi(r8, &has_non_smi_element);
-  }
-  __ movq(Operand(rdx, 0), r8);
+  __ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+  __ movq(Operand(rdx, 0), kScratchRegister);
   __ addq(rdx, Immediate(kPointerSize));
   __ bind(&entry);
   __ decq(rcx);
@@ -1285,81 +1301,11 @@
   // rbx: JSArray
   // esp[0]: return address
   // esp[8]: last argument
-  __ bind(&finish);
   __ pop(rcx);
   __ lea(rsp, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
   __ push(rcx);
   __ movq(rax, rbx);
   __ ret(0);
-
-  __ bind(&has_non_smi_element);
-  // Double values are handled by the runtime.
-  __ CheckMap(r8,
-              masm->isolate()->factory()->heap_number_map(),
-              &not_double,
-              DONT_DO_SMI_CHECK);
-  __ bind(&cant_transition_map);
-  __ UndoAllocationInNewSpace(rbx);
-  __ jmp(call_generic_code);
-
-  __ bind(&not_double);
-  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
-  // rbx: JSArray
-  __ movq(r11, FieldOperand(rbx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         r11,
-                                         kScratchRegister,
-                                         &cant_transition_map);
-
-  __ movq(FieldOperand(rbx, HeapObject::kMapOffset), r11);
-  __ RecordWriteField(rbx, HeapObject::kMapOffset, r11, r8,
-                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  // Finish the array initialization loop.
-  Label loop2;
-  __ bind(&loop2);
-  __ movq(r8, Operand(r9, rcx, times_pointer_size, 0));
-  __ movq(Operand(rdx, 0), r8);
-  __ addq(rdx, Immediate(kPointerSize));
-  __ decq(rcx);
-  __ j(greater_equal, &loop2);
-  __ jmp(&finish);
-}
-
-
-void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax : argc
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : last argument
-  // -----------------------------------
-  Label generic_array_code;
-
-  // Get the InternalArray function.
-  __ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, rdi);
-
-  if (FLAG_debug_code) {
-    // Initial map for the builtin InternalArray functions should be maps.
-    __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi.
-    STATIC_ASSERT(kSmiTag == 0);
-    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
-    __ Check(not_smi, "Unexpected initial map for InternalArray function");
-    __ CmpObjectType(rbx, MAP_TYPE, rcx);
-    __ Check(equal, "Unexpected initial map for InternalArray function");
-  }
-
-  // Run the native code for the InternalArray function called as a normal
-  // function.
-  ArrayNativeCode(masm, &generic_array_code);
-
-  // Jump to the generic array code in case the specialized code cannot handle
-  // the construction.
-  __ bind(&generic_array_code);
-  Handle<Code> array_code =
-      masm->isolate()->builtins()->InternalArrayCodeGeneric();
-  __ Jump(array_code, RelocInfo::CODE_TARGET);
 }
 
 
@@ -1432,130 +1378,9 @@
 
 
 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax                 : number of arguments
-  //  -- rdi                 : constructor function
-  //  -- rsp[0]              : return address
-  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
-  //  -- rsp[(argc + 1) * 8] : receiver
-  // -----------------------------------
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->string_ctor_calls(), 1);
-
-  if (FLAG_debug_code) {
-    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
-    __ cmpq(rdi, rcx);
-    __ Assert(equal, "Unexpected String function");
-  }
-
-  // Load the first argument into rax and get rid of the rest
-  // (including the receiver).
-  Label no_arguments;
-  __ testq(rax, rax);
-  __ j(zero, &no_arguments);
-  __ movq(rbx, Operand(rsp, rax, times_pointer_size, 0));
-  __ pop(rcx);
-  __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
-  __ push(rcx);
-  __ movq(rax, rbx);
-
-  // Lookup the argument in the number to string cache.
-  Label not_cached, argument_is_string;
-  NumberToStringStub::GenerateLookupNumberStringCache(
-      masm,
-      rax,  // Input.
-      rbx,  // Result.
-      rcx,  // Scratch 1.
-      rdx,  // Scratch 2.
-      false,  // Input is known to be smi?
-      &not_cached);
-  __ IncrementCounter(counters->string_ctor_cached_number(), 1);
-  __ bind(&argument_is_string);
-
-  // ----------- S t a t e -------------
-  //  -- rbx    : argument converted to string
-  //  -- rdi    : constructor function
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  // Allocate a JSValue and put the tagged pointer into rax.
-  Label gc_required;
-  __ AllocateInNewSpace(JSValue::kSize,
-                        rax,  // Result.
-                        rcx,  // New allocation top (we ignore it).
-                        no_reg,
-                        &gc_required,
-                        TAG_OBJECT);
-
-  // Set the map.
-  __ LoadGlobalFunctionInitialMap(rdi, rcx);
-  if (FLAG_debug_code) {
-    __ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
-            Immediate(JSValue::kSize >> kPointerSizeLog2));
-    __ Assert(equal, "Unexpected string wrapper instance size");
-    __ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
-    __ Assert(equal, "Unexpected unused properties of string wrapper");
-  }
-  __ movq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
-
-  // Set properties and elements.
-  __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
-  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
-
-  // Set the value.
-  __ movq(FieldOperand(rax, JSValue::kValueOffset), rbx);
-
-  // Ensure the object is fully initialized.
-  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
-
-  // We're done. Return.
-  __ ret(0);
-
-  // The argument was not found in the number to string cache. Check
-  // if it's a string already before calling the conversion builtin.
-  Label convert_argument;
-  __ bind(&not_cached);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfSmi(rax, &convert_argument);
-  Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
-  __ j(NegateCondition(is_string), &convert_argument);
-  __ movq(rbx, rax);
-  __ IncrementCounter(counters->string_ctor_string_value(), 1);
-  __ jmp(&argument_is_string);
-
-  // Invoke the conversion builtin and put the result into rbx.
-  __ bind(&convert_argument);
-  __ IncrementCounter(counters->string_ctor_conversions(), 1);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(rdi);  // Preserve the function.
-    __ push(rax);
-    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-    __ pop(rdi);
-  }
-  __ movq(rbx, rax);
-  __ jmp(&argument_is_string);
-
-  // Load the empty string into rbx, remove the receiver from the
-  // stack, and jump back to the case where the argument is a string.
-  __ bind(&no_arguments);
-  __ LoadRoot(rbx, Heap::kEmptyStringRootIndex);
-  __ pop(rcx);
-  __ lea(rsp, Operand(rsp, kPointerSize));
-  __ push(rcx);
-  __ jmp(&argument_is_string);
-
-  // At this point the argument is already a string. Call runtime to
-  // create a string wrapper.
-  __ bind(&gc_required);
-  __ IncrementCounter(counters->string_ctor_gc_required(), 1);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(rbx);
-    __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  }
-  __ ret(0);
+  // TODO(849): implement custom construct stub.
+  // Generate a copy of the generic stub for now.
+  Generate_JSConstructStubGeneric(masm);
 }
 
 
@@ -1664,9 +1489,6 @@
   __ bind(&invoke);
   __ call(rdx);
 
-  // Store offset of return address for deoptimizer.
-  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
-
   // Leave frame and return.
   LeaveArgumentsAdaptorFrame(masm);
   __ ret(0);
@@ -1698,11 +1520,10 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(rax);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  }
+  __ EnterInternalFrame();
+  __ push(rax);
+  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  __ LeaveInternalFrame();
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1720,9 +1541,7 @@
 
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  if (FLAG_debug_code) {
-    __ Abort("Unreachable code: returned from tail call.");
-  }
+  __ Abort("Unreachable code: returned from tail call.");
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 2845039..6499ea0 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -68,9 +68,9 @@
   // Get the function info from the stack.
   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
 
-  int map_index = (language_mode_ == CLASSIC_MODE)
-      ? Context::FUNCTION_MAP_INDEX
-      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+  int map_index = strict_mode_ == kStrictMode
+      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
+      : Context::FUNCTION_MAP_INDEX;
 
   // Compute the function map in the current global context and set that
   // as the map of the allocated object.
@@ -124,12 +124,12 @@
   // Get the function from the stack.
   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
 
-  // Set up the object header.
+  // Setup the object header.
   __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
   __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
   __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
 
-  // Set up the fixed slots.
+  // Setup the fixed slots.
   __ Set(rbx, 0);  // Set to NULL.
   __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
   __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
@@ -155,131 +155,6 @@
 }
 
 
-void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [rsp + (1 * kPointerSize)]: function
-  // [rsp + (2 * kPointerSize)]: serialized scope info
-
-  // Try to allocate the context in new space.
-  Label gc;
-  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
-  __ AllocateInNewSpace(FixedArray::SizeFor(length),
-                        rax, rbx, rcx, &gc, TAG_OBJECT);
-
-  // Get the function from the stack.
-  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
-
-  // Get the serialized scope info from the stack.
-  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
-
-  // Set up the object header.
-  __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
-  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
-  __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
-
-  // If this block context is nested in the global context we get a smi
-  // sentinel instead of a function. The block context should get the
-  // canonical empty function of the global context as its closure which
-  // we still have to look up.
-  Label after_sentinel;
-  __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
-  if (FLAG_debug_code) {
-    const char* message = "Expected 0 as a Smi sentinel";
-    __ cmpq(rcx, Immediate(0));
-    __ Assert(equal, message);
-  }
-  __ movq(rcx, GlobalObjectOperand());
-  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
-  __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
-  __ bind(&after_sentinel);
-
-  // Set up the fixed slots.
-  __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
-  __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
-  __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
-
-  // Copy the global object from the previous context.
-  __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
-  __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
-
-  // Initialize the rest of the slots to the hole value.
-  __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
-  for (int i = 0; i < slots_; i++) {
-    __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
-  }
-
-  // Return and remove the on-stack parameter.
-  __ movq(rsi, rax);
-  __ ret(2 * kPointerSize);
-
-  // Need to collect. Call into runtime system.
-  __ bind(&gc);
-  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
-}
-
-
-static void GenerateFastCloneShallowArrayCommon(
-    MacroAssembler* masm,
-    int length,
-    FastCloneShallowArrayStub::Mode mode,
-    Label* fail) {
-  // Registers on entry:
-  //
-  // rcx: boilerplate literal array.
-  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
-  // All sizes here are multiples of kPointerSize.
-  int elements_size = 0;
-  if (length > 0) {
-    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        ? FixedDoubleArray::SizeFor(length)
-        : FixedArray::SizeFor(length);
-  }
-  int size = JSArray::kSize + elements_size;
-
-  // Allocate both the JS array and the elements array in one big
-  // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
-
-  // Copy the JS array part.
-  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length == 0)) {
-      __ movq(rbx, FieldOperand(rcx, i));
-      __ movq(FieldOperand(rax, i), rbx);
-    }
-  }
-
-  if (length > 0) {
-    // Get hold of the elements array of the boilerplate and setup the
-    // elements pointer in the resulting object.
-    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
-    __ lea(rdx, Operand(rax, JSArray::kSize));
-    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
-    // Copy the elements array.
-    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
-      for (int i = 0; i < elements_size; i += kPointerSize) {
-        __ movq(rbx, FieldOperand(rcx, i));
-        __ movq(FieldOperand(rdx, i), rbx);
-      }
-    } else {
-      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
-      int i;
-      for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
-        __ movq(rbx, FieldOperand(rcx, i));
-        __ movq(FieldOperand(rdx, i), rbx);
-      }
-      while (i < elements_size) {
-        __ movsd(xmm0, FieldOperand(rcx, i));
-        __ movsd(FieldOperand(rdx, i), xmm0);
-        i += kDoubleSize;
-      }
-      ASSERT(i == elements_size);
-    }
-  }
-}
-
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
   // Stack layout on entry:
   //
@@ -287,54 +162,29 @@
   // [rsp + (2 * kPointerSize)]: literal index.
   // [rsp + (3 * kPointerSize)]: literals array.
 
+  // All sizes here are multiples of kPointerSize.
+  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
+  int size = JSArray::kSize + elements_size;
+
   // Load boilerplate object into rcx and check if we need to create a
   // boilerplate.
+  Label slow_case;
   __ movq(rcx, Operand(rsp, 3 * kPointerSize));
   __ movq(rax, Operand(rsp, 2 * kPointerSize));
   SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
   __ movq(rcx,
           FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
   __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
-  Label slow_case;
   __ j(equal, &slow_case);
 
-  FastCloneShallowArrayStub::Mode mode = mode_;
-  // rcx is boilerplate object.
-  Factory* factory = masm->isolate()->factory();
-  if (mode == CLONE_ANY_ELEMENTS) {
-    Label double_elements, check_fast_elements;
-    __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
-    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-           factory->fixed_cow_array_map());
-    __ j(not_equal, &check_fast_elements);
-    GenerateFastCloneShallowArrayCommon(masm, 0,
-                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
-    __ ret(3 * kPointerSize);
-
-    __ bind(&check_fast_elements);
-    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
-           factory->fixed_array_map());
-    __ j(not_equal, &double_elements);
-    GenerateFastCloneShallowArrayCommon(masm, length_,
-                                        CLONE_ELEMENTS, &slow_case);
-    __ ret(3 * kPointerSize);
-
-    __ bind(&double_elements);
-    mode = CLONE_DOUBLE_ELEMENTS;
-    // Fall through to generate the code to handle double elements.
-  }
-
   if (FLAG_debug_code) {
     const char* message;
     Heap::RootListIndex expected_map_index;
-    if (mode == CLONE_ELEMENTS) {
+    if (mode_ == CLONE_ELEMENTS) {
       message = "Expected (writable) fixed array";
       expected_map_index = Heap::kFixedArrayMapRootIndex;
-    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
-      message = "Expected (writable) fixed double array";
-      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
     } else {
-      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
+      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
       message = "Expected copy-on-write fixed array";
       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
     }
@@ -346,7 +196,33 @@
     __ pop(rcx);
   }
 
-  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+  // Allocate both the JS array and the elements array in one big
+  // allocation. This avoids multiple limit checks.
+  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+
+  // Copy the JS array part.
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+      __ movq(rbx, FieldOperand(rcx, i));
+      __ movq(FieldOperand(rax, i), rbx);
+    }
+  }
+
+  if (length_ > 0) {
+    // Get hold of the elements array of the boilerplate and setup the
+    // elements pointer in the resulting object.
+    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ lea(rdx, Operand(rax, JSArray::kSize));
+    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+
+    // Copy the elements array.
+    for (int i = 0; i < elements_size; i += kPointerSize) {
+      __ movq(rbx, FieldOperand(rcx, i));
+      __ movq(FieldOperand(rdx, i), rbx);
+    }
+  }
+
+  // Return and remove the on-stack parameters.
   __ ret(3 * kPointerSize);
 
   __ bind(&slow_case);
@@ -354,54 +230,9 @@
 }
 
 
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
-  //
-  // [rsp + kPointerSize]: object literal flags.
-  // [rsp + (2 * kPointerSize)]: constant properties.
-  // [rsp + (3 * kPointerSize)]: literal index.
-  // [rsp + (4 * kPointerSize)]: literals array.
-
-  // Load boilerplate object into ecx and check if we need to create a
-  // boilerplate.
-  Label slow_case;
-  __ movq(rcx, Operand(rsp, 4 * kPointerSize));
-  __ movq(rax, Operand(rsp, 3 * kPointerSize));
-  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
-  __ movq(rcx,
-          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
-  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &slow_case);
-
-  // Check that the boilerplate contains only fast properties and we can
-  // statically determine the instance size.
-  int size = JSObject::kHeaderSize + length_ * kPointerSize;
-  __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
-  __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
-  __ j(not_equal, &slow_case);
-
-  // Allocate the JS object and copy header together with all in-object
-  // properties from the boilerplate.
-  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
-  for (int i = 0; i < size; i += kPointerSize) {
-    __ movq(rbx, FieldOperand(rcx, i));
-    __ movq(FieldOperand(rax, i), rbx);
-  }
-
-  // Return and remove the on-stack parameters.
-  __ ret(4 * kPointerSize);
-
-  __ bind(&slow_case);
-  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
-  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
-  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   const Register argument = rax;
   const Register map = rdx;
@@ -497,25 +328,6 @@
 }
 
 
-void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
-  __ PushCallerSaved(save_doubles_);
-  const int argument_count = 1;
-  __ PrepareCallCFunction(argument_count);
-#ifdef _WIN64
-  __ LoadAddress(rcx, ExternalReference::isolate_address());
-#else
-  __ LoadAddress(rdi, ExternalReference::isolate_address());
-#endif
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  __ CallCFunction(
-      ExternalReference::store_buffer_overflow_function(masm->isolate()),
-      argument_count);
-  __ PopCallerSaved(save_doubles_);
-  __ ret(0);
-}
-
-
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -810,13 +622,12 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(rax);
-      __ CallRuntime(Runtime::kNumberAlloc, 0);
-      __ movq(rcx, rax);
-      __ pop(rax);
-    }
+    __ EnterInternalFrame();
+    __ push(rax);
+    __ CallRuntime(Runtime::kNumberAlloc, 0);
+    __ movq(rcx, rax);
+    __ pop(rax);
+    __ LeaveInternalFrame();
     __ bind(&heapnumber_allocated);
     // rcx: allocated 'empty' number
 
@@ -940,10 +751,6 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
-  // Explicitly allow generation of nested stubs. It is safe here because
-  // generation code does not use any raw pointers.
-  AllowStubCallsScope allow_stub_calls(masm, true);
-
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1607,8 +1414,6 @@
   __ cmpq(rbx, Operand(rcx, 0));
   __ j(not_equal, &cache_miss, Label::kNear);
   // Cache hit!
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
   __ movq(rax, Operand(rcx, 2 * kIntSize));
   if (tagged) {
     __ fstp(0);  // Clear FPU stack.
@@ -1619,7 +1424,6 @@
   }
 
   __ bind(&cache_miss);
-  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
   // Update cache with new value.
   if (tagged) {
   __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
@@ -1628,7 +1432,7 @@
     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
     __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
   }
-  GenerateOperation(masm, type_);
+  GenerateOperation(masm);
   __ movq(Operand(rcx, 0), rbx);
   __ movq(Operand(rcx, 2 * kIntSize), rax);
   __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
@@ -1643,18 +1447,17 @@
     __ subq(rsp, Immediate(kDoubleSize));
     __ movsd(Operand(rsp, 0), xmm1);
     __ fld_d(Operand(rsp, 0));
-    GenerateOperation(masm, type_);
+    GenerateOperation(masm);
     __ fstp_d(Operand(rsp, 0));
     __ movsd(xmm1, Operand(rsp, 0));
     __ addq(rsp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      // Allocate an unused object bigger than a HeapNumber.
-      __ Push(Smi::FromInt(2 * kDoubleSize));
-      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    }
+    __ EnterInternalFrame();
+    // Allocate an unused object bigger than a HeapNumber.
+    __ Push(Smi::FromInt(2 * kDoubleSize));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    __ LeaveInternalFrame();
     __ Ret();
   }
 
@@ -1670,11 +1473,10 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(rax, rdi, &skip_cache);
     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(rax);
-      __ CallRuntime(RuntimeFunction(), 1);
-    }
+    __ EnterInternalFrame();
+    __ push(rax);
+    __ CallRuntime(RuntimeFunction(), 1);
+    __ LeaveInternalFrame();
     __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -1686,7 +1488,6 @@
     // Add more cases when necessary.
     case TranscendentalCache::SIN: return Runtime::kMath_sin;
     case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -1695,17 +1496,14 @@
 }
 
 
-void TranscendentalCacheStub::GenerateOperation(
-    MacroAssembler* masm, TranscendentalCache::Type type) {
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
   // Registers:
   // rax: Newly allocated HeapNumber, which must be preserved.
   // rbx: Bits of input double. Must be preserved.
   // rcx: Pointer to cache entry. Must be preserved.
   // st(0): Input double
   Label done;
-  if (type == TranscendentalCache::SIN ||
-      type == TranscendentalCache::COS ||
-      type == TranscendentalCache::TAN) {
+  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
     // Both fsin and fcos require arguments in the range +/-2^63 and
     // return NaN for infinities and NaN. They can share all code except
     // the actual fsin/fcos operation.
@@ -1726,12 +1524,8 @@
     __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
     __ fstp(0);
-    // NaN is represented by 0x7ff8000000000000.
-    __ subq(rsp, Immediate(kPointerSize));
-    __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
-    __ movl(Operand(rsp, 0), Immediate(0x00000000));
-    __ fld_d(Operand(rsp, 0));
-    __ addq(rsp, Immediate(kPointerSize));
+    __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
+    __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
     __ jmp(&done);
 
     __ bind(&non_nan_result);
@@ -1772,25 +1566,19 @@
     // FPU Stack: input % 2*pi
     __ movq(rax, rdi);  // Restore rax, pointer to the new HeapNumber.
     __ bind(&in_range);
-    switch (type) {
+    switch (type_) {
       case TranscendentalCache::SIN:
         __ fsin();
         break;
       case TranscendentalCache::COS:
         __ fcos();
         break;
-      case TranscendentalCache::TAN:
-        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
-        // FP register stack.
-        __ fptan();
-        __ fstp(0);  // Pop FP register stack.
-        break;
       default:
         UNREACHABLE();
     }
     __ bind(&done);
   } else {
-    ASSERT(type == TranscendentalCache::LOG);
+    ASSERT(type_ == TranscendentalCache::LOG);
     __ fldln2();
     __ fxch();
     __ fyl2x();
@@ -1996,259 +1784,152 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
-  const Register exponent = rdx;
-#else
-  const Register exponent = rdi;
-#endif
-  const Register base = rax;
-  const Register scratch = rcx;
-  const XMMRegister double_result = xmm3;
-  const XMMRegister double_base = xmm2;
-  const XMMRegister double_exponent = xmm1;
-  const XMMRegister double_scratch = xmm4;
+  // Registers are used as follows:
+  // rdx = base
+  // rax = exponent
+  // rcx = temporary, result
 
-  Label call_runtime, done, exponent_not_smi, int_exponent;
+  Label allocate_return, call_runtime;
 
-  // Save 1 in double_result - we need this several times later on.
-  __ movq(scratch, Immediate(1));
-  __ cvtlsi2sd(double_result, scratch);
+  // Load input parameters.
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+  __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
-  if (exponent_type_ == ON_STACK) {
-    Label base_is_smi, unpack_exponent;
-    // The exponent and base are supplied as arguments on the stack.
-    // This can only happen if the stub is called from non-optimized code.
-    // Load input parameters from stack.
-    __ movq(base, Operand(rsp, 2 * kPointerSize));
-    __ movq(exponent, Operand(rsp, 1 * kPointerSize));
-    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
-    __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
+  // Save 1 in xmm3 - we need this several times later on.
+  __ Set(rcx, 1);
+  __ cvtlsi2sd(xmm3, rcx);
 
-    __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
-    __ jmp(&unpack_exponent, Label::kNear);
+  Label exponent_nonsmi;
+  Label base_nonsmi;
+  // If the exponent is a heap number go to that specific case.
+  __ JumpIfNotSmi(rax, &exponent_nonsmi);
+  __ JumpIfNotSmi(rdx, &base_nonsmi);
 
-    __ bind(&base_is_smi);
-    __ SmiToInteger32(base, base);
-    __ cvtlsi2sd(double_base, base);
-    __ bind(&unpack_exponent);
+  // Optimized version when both exponent and base are smis.
+  Label powi;
+  __ SmiToInteger32(rdx, rdx);
+  __ cvtlsi2sd(xmm0, rdx);
+  __ jmp(&powi);
+  // Exponent is a smi and base is a heapnumber.
+  __ bind(&base_nonsmi);
+  __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &call_runtime);
 
-    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
-    __ SmiToInteger32(exponent, exponent);
-    __ jmp(&int_exponent);
+  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
 
-    __ bind(&exponent_not_smi);
-    __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &call_runtime);
-    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
-    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
-    __ SmiToInteger32(exponent, exponent);
-    __ jmp(&int_exponent);
+  // Optimized version of pow if exponent is a smi.
+  // xmm0 contains the base.
+  __ bind(&powi);
+  __ SmiToInteger32(rax, rax);
 
-    __ bind(&exponent_not_smi);
-    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
-  }
-
-  if (exponent_type_ != INTEGER) {
-    Label fast_power;
-    // Detect integer exponents stored as double.
-    __ cvttsd2si(exponent, double_exponent);
-    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
-    __ cmpl(exponent, Immediate(0x80000000u));
-    __ j(equal, &call_runtime);
-    __ cvtlsi2sd(double_scratch, exponent);
-    // Already ruled out NaNs for exponent.
-    __ ucomisd(double_exponent, double_scratch);
-    __ j(equal, &int_exponent);
-
-    if (exponent_type_ == ON_STACK) {
-      // Detect square root case.  Crankshaft detects constant +/-0.5 at
-      // compile time and uses DoMathPowHalf instead.  We then skip this check
-      // for non-constant cases of +/-0.5 as these hardly occur.
-      Label continue_sqrt, continue_rsqrt, not_plus_half;
-      // Test for 0.5.
-      // Load double_scratch with 0.5.
-      __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
-      __ movq(double_scratch, scratch);
-      // Already ruled out NaNs for exponent.
-      __ ucomisd(double_scratch, double_exponent);
-      __ j(not_equal, &not_plus_half, Label::kNear);
-
-      // Calculates square root of base.  Check for the special case of
-      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
-      // According to IEEE-754, double-precision -Infinity has the highest
-      // 12 bits set and the lowest 52 bits cleared.
-      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
-      __ movq(double_scratch, scratch);
-      __ ucomisd(double_scratch, double_base);
-      // Comparing -Infinity with NaN results in "unordered", which sets the
-      // zero flag as if both were equal.  However, it also sets the carry flag.
-      __ j(not_equal, &continue_sqrt, Label::kNear);
-      __ j(carry, &continue_sqrt, Label::kNear);
-
-      // Set result to Infinity in the special case.
-      __ xorps(double_result, double_result);
-      __ subsd(double_result, double_scratch);
-      __ jmp(&done);
-
-      __ bind(&continue_sqrt);
-      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-      __ xorps(double_scratch, double_scratch);
-      __ addsd(double_scratch, double_base);  // Convert -0 to 0.
-      __ sqrtsd(double_result, double_scratch);
-      __ jmp(&done);
-
-      // Test for -0.5.
-      __ bind(&not_plus_half);
-      // Load double_scratch with -0.5 by substracting 1.
-      __ subsd(double_scratch, double_result);
-      // Already ruled out NaNs for exponent.
-      __ ucomisd(double_scratch, double_exponent);
-      __ j(not_equal, &fast_power, Label::kNear);
-
-      // Calculates reciprocal of square root of base.  Check for the special
-      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
-      // According to IEEE-754, double-precision -Infinity has the highest
-      // 12 bits set and the lowest 52 bits cleared.
-      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
-      __ movq(double_scratch, scratch);
-      __ ucomisd(double_scratch, double_base);
-      // Comparing -Infinity with NaN results in "unordered", which sets the
-      // zero flag as if both were equal.  However, it also sets the carry flag.
-      __ j(not_equal, &continue_rsqrt, Label::kNear);
-      __ j(carry, &continue_rsqrt, Label::kNear);
-
-      // Set result to 0 in the special case.
-      __ xorps(double_result, double_result);
-      __ jmp(&done);
-
-      __ bind(&continue_rsqrt);
-      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
-      __ xorps(double_exponent, double_exponent);
-      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
-      __ sqrtsd(double_exponent, double_exponent);
-      __ divsd(double_result, double_exponent);
-      __ jmp(&done);
-    }
-
-    // Using FPU instructions to calculate power.
-    Label fast_power_failed;
-    __ bind(&fast_power);
-    __ fnclex();  // Clear flags to catch exceptions later.
-    // Transfer (B)ase and (E)xponent onto the FPU register stack.
-    __ subq(rsp, Immediate(kDoubleSize));
-    __ movsd(Operand(rsp, 0), double_exponent);
-    __ fld_d(Operand(rsp, 0));  // E
-    __ movsd(Operand(rsp, 0), double_base);
-    __ fld_d(Operand(rsp, 0));  // B, E
-
-    // Exponent is in st(1) and base is in st(0)
-    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
-    // FYL2X calculates st(1) * log2(st(0))
-    __ fyl2x();    // X
-    __ fld(0);     // X, X
-    __ frndint();  // rnd(X), X
-    __ fsub(1);    // rnd(X), X-rnd(X)
-    __ fxch(1);    // X - rnd(X), rnd(X)
-    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
-    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
-    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
-    __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
-    // FSCALE calculates st(0) * 2^st(1)
-    __ fscale();   // 2^X, rnd(X)
-    __ fstp(1);
-    // Bail out to runtime in case of exceptions in the status word.
-    __ fnstsw_ax();
-    __ testb(rax, Immediate(0x5F));  // Check for all but precision exception.
-    __ j(not_zero, &fast_power_failed, Label::kNear);
-    __ fstp_d(Operand(rsp, 0));
-    __ movsd(double_result, Operand(rsp, 0));
-    __ addq(rsp, Immediate(kDoubleSize));
-    __ jmp(&done);
-
-    __ bind(&fast_power_failed);
-    __ fninit();
-    __ addq(rsp, Immediate(kDoubleSize));
-    __ jmp(&call_runtime);
-  }
-
-  // Calculate power with integer exponent.
-  __ bind(&int_exponent);
-  const XMMRegister double_scratch2 = double_exponent;
-  // Back up exponent as we need to check if exponent is negative later.
-  __ movq(scratch, exponent);  // Back up exponent.
-  __ movsd(double_scratch, double_base);  // Back up base.
-  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
+  // Save exponent in base as we need to check if exponent is negative later.
+  // We know that base and exponent are in different registers.
+  __ movq(rdx, rax);
 
   // Get absolute value of exponent.
-  Label no_neg, while_true, no_multiply;
-  __ testl(scratch, scratch);
-  __ j(positive, &no_neg, Label::kNear);
-  __ negl(scratch);
+  Label no_neg;
+  __ cmpl(rax, Immediate(0));
+  __ j(greater_equal, &no_neg, Label::kNear);
+  __ negl(rax);
   __ bind(&no_neg);
 
-  __ bind(&while_true);
-  __ shrl(scratch, Immediate(1));
-  __ j(not_carry, &no_multiply, Label::kNear);
-  __ mulsd(double_result, double_scratch);
-  __ bind(&no_multiply);
+  // Load xmm1 with 1.
+  __ movaps(xmm1, xmm3);
+  Label while_true;
+  Label no_multiply;
 
-  __ mulsd(double_scratch, double_scratch);
+  __ bind(&while_true);
+  __ shrl(rax, Immediate(1));
+  __ j(not_carry, &no_multiply, Label::kNear);
+  __ mulsd(xmm1, xmm0);
+  __ bind(&no_multiply);
+  __ mulsd(xmm0, xmm0);
   __ j(not_zero, &while_true);
 
-  // If the exponent is negative, return 1/result.
-  __ testl(exponent, exponent);
-  __ j(greater, &done);
-  __ divsd(double_scratch2, double_result);
-  __ movsd(double_result, double_scratch2);
-  // Test whether result is zero.  Bail out to check for subnormal result.
-  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
-  __ xorps(double_scratch2, double_scratch2);
-  __ ucomisd(double_scratch2, double_result);
-  // double_exponent aliased as double_scratch2 has already been overwritten
-  // and may not have contained the exponent value in the first place when the
-  // input was a smi.  We reset it with exponent value before bailing out.
-  __ j(not_equal, &done);
-  __ cvtlsi2sd(double_exponent, exponent);
+  // Base has the original value of the exponent - if the exponent  is
+  // negative return 1/result.
+  __ testl(rdx, rdx);
+  __ j(positive, &allocate_return);
+  // Special case if xmm1 has reached infinity.
+  __ divsd(xmm3, xmm1);
+  __ movaps(xmm1, xmm3);
+  __ xorps(xmm0, xmm0);
+  __ ucomisd(xmm0, xmm1);
+  __ j(equal, &call_runtime);
 
-  // Returning or bailing out.
-  Counters* counters = masm->isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
-    // The arguments are still on the stack.
-    __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+  __ jmp(&allocate_return);
 
-    // The stub is called from non-optimized code, which expects the result
-    // as heap number in eax.
-    __ bind(&done);
-    __ AllocateHeapNumber(rax, rcx, &call_runtime);
-    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
-    __ IncrementCounter(counters->math_pow(), 1);
-    __ ret(2 * kPointerSize);
-  } else {
-    __ bind(&call_runtime);
-    // Move base to the correct argument register.  Exponent is already in xmm1.
-    __ movsd(xmm0, double_base);
-    ASSERT(double_exponent.is(xmm1));
-    {
-      AllowExternalCallThatCantCauseGC scope(masm);
-      __ PrepareCallCFunction(2);
-      __ CallCFunction(
-          ExternalReference::power_double_double_function(masm->isolate()), 2);
-    }
-    // Return value is in xmm0.
-    __ movsd(double_result, xmm0);
-    // Restore context register.
-    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  // Exponent (or both) is a heapnumber - no matter what we should now work
+  // on doubles.
+  __ bind(&exponent_nonsmi);
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &call_runtime);
+  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+  // Test if exponent is nan.
+  __ ucomisd(xmm1, xmm1);
+  __ j(parity_even, &call_runtime);
 
-    __ bind(&done);
-    __ IncrementCounter(counters->math_pow(), 1);
-    __ ret(0);
-  }
+  Label base_not_smi, handle_special_cases;
+  __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
+  __ SmiToInteger32(rdx, rdx);
+  __ cvtlsi2sd(xmm0, rdx);
+  __ jmp(&handle_special_cases, Label::kNear);
+
+  __ bind(&base_not_smi);
+  __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &call_runtime);
+  __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+  __ andl(rcx, Immediate(HeapNumber::kExponentMask));
+  __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
+  // base is NaN or +/-Infinity
+  __ j(greater_equal, &call_runtime);
+  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+
+  // base is in xmm0 and exponent is in xmm1.
+  __ bind(&handle_special_cases);
+  Label not_minus_half;
+  // Test for -0.5.
+  // Load xmm2 with -0.5.
+  __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
+  __ movq(xmm2, rcx);
+  // xmm2 now has -0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &not_minus_half, Label::kNear);
+
+  // Calculates reciprocal of square root.
+  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);
+  __ sqrtsd(xmm1, xmm1);
+  __ divsd(xmm3, xmm1);
+  __ movaps(xmm1, xmm3);
+  __ jmp(&allocate_return);
+
+  // Test for 0.5.
+  __ bind(&not_minus_half);
+  // Load xmm2 with 0.5.
+  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
+  __ addsd(xmm2, xmm3);
+  // xmm2 now has 0.5.
+  __ ucomisd(xmm2, xmm1);
+  __ j(not_equal, &call_runtime);
+  // Calculates square root.
+  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
+  __ xorps(xmm1, xmm1);
+  __ addsd(xmm1, xmm0);  // Convert -0 to 0.
+  __ sqrtsd(xmm1, xmm1);
+
+  __ bind(&allocate_return);
+  __ AllocateHeapNumber(rcx, rax, &call_runtime);
+  __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
+  __ movq(rax, rcx);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&call_runtime);
+  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
 }
 
 
@@ -2362,7 +2043,6 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   Label no_parameter_map;
-  __ xor_(r8, r8);
   __ testq(rbx, rbx);
   __ j(zero, &no_parameter_map, Label::kNear);
   __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
@@ -2405,7 +2085,7 @@
     __ movq(FieldOperand(rax, i), rdx);
   }
 
-  // Set up the callee in-object property.
+  // Setup the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ movq(rdx, Operand(rsp, 3 * kPointerSize));
   __ movq(FieldOperand(rax, JSObject::kHeaderSize +
@@ -2420,7 +2100,7 @@
                        Heap::kArgumentsLengthIndex * kPointerSize),
           rcx);
 
-  // Set up the elements pointer in the allocated arguments object.
+  // Setup the elements pointer in the allocated arguments object.
   // If we allocated a parameter map, edi will point there, otherwise to the
   // backing store.
   __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
@@ -2456,13 +2136,16 @@
   Label parameters_loop, parameters_test;
 
   // Load tagged parameter count into r9.
-  __ Integer32ToSmi(r9, rbx);
+  __ movq(r9, Operand(rsp, 1 * kPointerSize));
   __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
-  __ addq(r8, Operand(rsp, 1 * kPointerSize));
+  __ addq(r8, Operand(rsp, 3 * kPointerSize));
   __ subq(r8, r9);
   __ Move(r11, factory->the_hole_value());
   __ movq(rdx, rdi);
-  __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+  __ SmiToInteger64(kScratchRegister, r9);
+  __ lea(rdi, Operand(rdi, kScratchRegister,
+                      times_pointer_size,
+                      kParameterMapHeaderSize));
   // r9 = loop variable (tagged)
   // r8 = mapping index (tagged)
   // r11 = the hole value
@@ -2498,8 +2181,9 @@
   Label arguments_loop, arguments_test;
   __ movq(r8, rbx);
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-  // Untag rcx for the loop below.
+  // Untag rcx and r8 for the loop below.
   __ SmiToInteger64(rcx, rcx);
+  __ SmiToInteger64(r8, r8);
   __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
   __ subq(rdx, kScratchRegister);
   __ jmp(&arguments_test, Label::kNear);
@@ -2623,7 +2307,7 @@
   // Get the parameters pointer from the stack.
   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
 
-  // Set up the elements pointer in the allocated arguments object and
+  // Setup the elements pointer in the allocated arguments object and
   // initialize the header in the elements fixed array.
   __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
@@ -2662,6 +2346,10 @@
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
+  if (!FLAG_regexp_entry_native) {
+    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+    return;
+  }
 
   // Stack frame on entry.
   //  rsp[0]: return address
@@ -2767,40 +2455,26 @@
   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
   // First check for flat two byte string.
-  __ andb(rbx, Immediate(kIsNotStringMask |
-                         kStringRepresentationMask |
-                         kStringEncodingMask |
-                         kShortExternalStringMask));
+  __ andb(rbx, Immediate(
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be a flat ASCII string.  None of the following
-  // string type tests will succeed if subject is not a string or a short
-  // external string.
-  __ andb(rbx, Immediate(kIsNotStringMask |
-                         kStringRepresentationMask |
-                         kShortExternalStringMask));
+  // Any other flat string must be a flat ascii string.
+  __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
-  // rbx: whether subject is a string and if yes, its string representation
   // Check for flat cons string or sliced string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
   // In the case of a sliced string its offset has to be taken into account.
-  Label cons_string, external_string, check_encoding;
+  Label cons_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
-  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmpq(rbx, Immediate(kExternalStringTag));
   __ j(less, &cons_string, Label::kNear);
-  __ j(equal, &external_string);
-
-  // Catch non-string subject or short external string.
-  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
-  __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
-  __ j(not_zero, &runtime);
+  __ j(equal, &runtime);
 
   // String is sliced.
   __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
@@ -2824,16 +2498,16 @@
            Immediate(kStringRepresentationMask | kStringEncodingMask));
   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
-  // Any other flat string must be sequential ASCII or external.
+  // Any other flat string must be ascii.
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask));
-  __ j(not_zero, &external_string);
+  __ j(not_zero, &runtime);
 
   __ bind(&seq_ascii_string);
-  // rdi: subject string (sequential ASCII)
+  // rdi: subject string (sequential ascii)
   // rax: RegExp data (FixedArray)
   __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
-  __ Set(rcx, 1);  // Type is ASCII.
+  __ Set(rcx, 1);  // Type is ascii.
   __ jmp(&check_code, Label::kNear);
 
   __ bind(&seq_two_byte_string);
@@ -2849,7 +2523,7 @@
   __ JumpIfSmi(r11, &runtime);
 
   // rdi: subject string
-  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
   // r11: code
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
@@ -2857,7 +2531,7 @@
 
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // rcx: encoding of subject string (1 if ascii 0 if two_byte);
   // r11: code
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = masm->isolate()->counters();
@@ -2914,7 +2588,7 @@
   // Keep track on aliasing between argX defined above and the registers used.
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // rcx: encoding of subject string (1 if ascii 0 if two_byte);
   // r11: code
   // r14: slice offset
   // r15: original subject string
@@ -2996,18 +2670,12 @@
   // Store last subject and last input.
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
-  __ RecordWriteField(rbx,
-                      RegExpImpl::kLastSubjectOffset,
-                      rax,
-                      rdi,
-                      kDontSaveFPRegs);
+  __ movq(rcx, rbx);
+  __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
-  __ RecordWriteField(rbx,
-                      RegExpImpl::kLastInputOffset,
-                      rax,
-                      rdi,
-                      kDontSaveFPRegs);
+  __ movq(rcx, rbx);
+  __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
 
   // Get the static offsets vector filled by the native regexp code.
   __ LoadAddress(rcx,
@@ -3059,28 +2727,7 @@
   __ Throw(rax);
 
   __ bind(&termination_exception);
-  __ ThrowUncatchable(rax);
-
-  // External string.  Short external strings have already been ruled out.
-  // rdi: subject string (expected to be external)
-  // rbx: scratch
-  __ bind(&external_string);
-  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ testb(rbx, Immediate(kIsIndirectStringMask));
-    __ Assert(zero, "external string expected, but not found");
-  }
-  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ testb(rbx, Immediate(kStringEncodingMask));
-  __ j(not_zero, &seq_ascii_string);
-  __ jmp(&seq_two_byte_string);
+  __ ThrowUncatchable(TERMINATION, rax);
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
@@ -3485,7 +3132,7 @@
   __ JumpIfNotBothSequentialAsciiStrings(
       rdx, rax, rcx, rbx, &check_unequal_objects);
 
-  // Inline comparison of ASCII strings.
+  // Inline comparison of ascii strings.
   if (cc_ == equal) {
     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      rdx,
@@ -3584,52 +3231,7 @@
 }
 
 
-void InterruptStub::Generate(MacroAssembler* masm) {
-  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
-  // Cache the called function in a global property cell.  Cache states
-  // are uninitialized, monomorphic (indicated by a JSFunction), and
-  // megamorphic.
-  // rbx : cache cell for call target
-  // rdi : the function to call
-  Isolate* isolate = masm->isolate();
-  Label initialize, done;
-
-  // Load the cache state into rcx.
-  __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
-
-  // A monomorphic cache hit or an already megamorphic state: invoke the
-  // function without changing the state.
-  __ cmpq(rcx, rdi);
-  __ j(equal, &done, Label::kNear);
-  __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
-  __ j(equal, &done, Label::kNear);
-
-  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
-  // megamorphic.
-  __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
-  __ j(equal, &initialize, Label::kNear);
-  // MegamorphicSentinel is an immortal immovable object (undefined) so no
-  // write-barrier is needed.
-  __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
-          TypeFeedbackCells::MegamorphicSentinel(isolate));
-  __ jmp(&done, Label::kNear);
-
-  // An uninitialized cache is patched with the function.
-  __ bind(&initialize);
-  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
-  // No need for a write barrier here - cells are rescanned.
-
-  __ bind(&done);
-}
-
-
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  // rdi : the function to call
-  // rbx : cache cell for call target
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
@@ -3650,6 +3252,10 @@
     __ bind(&call);
   }
 
+  // Get the function to call from the stack.
+  // +2 ~ receiver, return address
+  __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
+
   // Check that the function really is a JavaScript function.
   __ JumpIfSmi(rdi, &non_function);
   // Goto slow case if we do not have a function.
@@ -3686,7 +3292,7 @@
   __ push(rcx);
   __ Set(rax, argc_ + 1);
   __ Set(rbx, 0);
-  __ SetCallKind(rcx, CALL_AS_METHOD);
+  __ SetCallKind(rcx, CALL_AS_FUNCTION);
   __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
   {
     Handle<Code> adaptor =
@@ -3708,80 +3314,14 @@
 }
 
 
-void CallConstructStub::Generate(MacroAssembler* masm) {
-  // rax : number of arguments
-  // rbx : cache cell for call target
-  // rdi : constructor function
-  Label slow, non_function_call;
-
-  // Check that function is not a smi.
-  __ JumpIfSmi(rdi, &non_function_call);
-  // Check that function is a JSFunction.
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &slow);
-
-  if (RecordCallTarget()) {
-    GenerateRecordCallTarget(masm);
-  }
-
-  // Jump to the function-specific construct stub.
-  __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
-  __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
-  __ jmp(rbx);
-
-  // rdi: called object
-  // rax: number of arguments
-  // rcx: object map
-  Label do_call;
-  __ bind(&slow);
-  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
-  __ j(not_equal, &non_function_call);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
-  __ jmp(&do_call);
-
-  __ bind(&non_function_call);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-  __ bind(&do_call);
-  // Set expected number of arguments to zero (not changing rax).
-  __ Set(rbx, 0);
-  __ SetCallKind(rcx, CALL_AS_METHOD);
-  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-}
-
-
 bool CEntryStub::NeedsImmovableCode() {
   return false;
 }
 
 
-bool CEntryStub::IsPregenerated() {
-#ifdef _WIN64
-  return result_size_ == 1;
-#else
-  return true;
-#endif
-}
-
-
-void CodeStub::GenerateStubsAheadOfTime() {
-  CEntryStub::GenerateAheadOfTime();
-  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
-  // It is important that the store buffer overflow stubs are generated first.
-  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
-}
-
-
-void CodeStub::GenerateFPStubs() {
-}
-
-
-void CEntryStub::GenerateAheadOfTime() {
-  CEntryStub stub(1, kDontSaveFPRegs);
-  stub.GetCode()->set_is_pregenerated(true);
-  CEntryStub save_doubles(1, kSaveFPRegs);
-  save_doubles.GetCode()->set_is_pregenerated(true);
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  // Throw exception in eax.
+  __ Throw(rax);
 }
 
 
@@ -3925,6 +3465,12 @@
 }
 
 
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+                                          UncatchableExceptionType type) {
+  __ ThrowUncatchable(type, rax);
+}
+
+
 void CEntryStub::Generate(MacroAssembler* masm) {
   // rax: number of arguments including receiver
   // rbx: pointer to C function  (C callee-saved)
@@ -3988,34 +3534,22 @@
                true);
 
   __ bind(&throw_out_of_memory_exception);
-  // Set external caught exception to false.
-  Isolate* isolate = masm->isolate();
-  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
-                                    isolate);
-  __ Set(rax, static_cast<int64_t>(false));
-  __ Store(external_caught, rax);
-
-  // Set pending exception and rax to out of memory exception.
-  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
-                                      isolate);
-  __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
-  __ Store(pending_exception, rax);
-  // Fall through to the next label.
+  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
 
   __ bind(&throw_termination_exception);
-  __ ThrowUncatchable(rax);
+  GenerateThrowUncatchable(masm, TERMINATION);
 
   __ bind(&throw_normal_exception);
-  __ Throw(rax);
+  GenerateThrowTOS(masm);
 }
 
 
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  Label invoke, handler_entry, exit;
+  Label invoke, exit;
   Label not_outermost_js, not_outermost_js_2;
   {  // NOLINT. Scope block confuses linter.
     MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
-    // Set up frame.
+    // Setup frame.
     __ push(rbp);
     __ movq(rbp, rsp);
 
@@ -4071,23 +3605,20 @@
   __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
 
-  // Jump to a faked try block that does the invoke, with a faked catch
-  // block that sets the pending exception.
-  __ jmp(&invoke);
-  __ bind(&handler_entry);
-  handler_offset_ = handler_entry.pos();
-  // Caught exception: Store result (exception) in the pending exception
-  // field in the JSEnv and return a failure sentinel.
+  // Call a faked try-block that does the invoke.
+  __ call(&invoke);
+
+  // Caught exception: Store result (exception) in the pending
+  // exception field in the JSEnv and return a failure sentinel.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       isolate);
   __ Store(pending_exception, rax);
   __ movq(rax, Failure::Exception(), RelocInfo::NONE);
   __ jmp(&exit);
 
-  // Invoke: Link this frame into the handler chain.  There's only one
-  // handler block in this code object, so its index is 0.
+  // Invoke: Link this frame into the handler chain.
   __ bind(&invoke);
-  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
 
   // Clear any pending exceptions.
   __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
@@ -4096,11 +3627,11 @@
   // Fake a receiver (NULL).
   __ push(Immediate(0));  // receiver
 
-  // Invoke the function by calling through JS entry trampoline builtin and
-  // pop the faked function when we return. We load the address from an
-  // external reference instead of inlining the call target address directly
-  // in the code, because the builtin stubs may not have been generated yet
-  // at the time this code is generated.
+  // Invoke the function by calling through JS entry trampoline
+  // builtin and pop the faked function when we return. We load the address
+  // from an external reference instead of inlining the call target address
+  // directly in the code, because the builtin stubs may not have been
+  // generated yet at the time this code is generated.
   if (is_construct) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate);
@@ -4209,7 +3740,7 @@
     __ bind(&miss);
   }
 
-  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
+  __ TryGetFunctionPrototype(rdx, rbx, &slow);
 
   // Check that the function prototype is a JS object.
   __ JumpIfSmi(rbx, &slow);
@@ -4226,17 +3757,14 @@
     __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
   } else {
-    // Get return address and delta to inlined map check.
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
+    __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
     if (FLAG_debug_code) {
       __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
       __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
       __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
     }
-    __ movq(kScratchRegister,
-            Operand(kScratchRegister, kOffsetToMapCheckValue));
-    __ movq(Operand(kScratchRegister, 0), rax);
   }
 
   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
@@ -4263,11 +3791,9 @@
     __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of true in the root array at the inline check site.
-    int true_offset = 0x100 +
-        (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
-    // Assert it is a 1-byte signed value.
-    ASSERT(true_offset >= 0 && true_offset < 0x100);
-    __ movl(rax, Immediate(true_offset));
+    ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+        == 0xB0 - 0x100);
+    __ movl(rax, Immediate(0xB0));  // TrueValue is at -10 * kPointerSize.
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -4286,11 +3812,9 @@
     __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of false in the root array at the inline check site.
-    int false_offset = 0x100 +
-        (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
-    // Assert it is a 1-byte signed value.
-    ASSERT(false_offset >= 0 && false_offset < 0x100);
-    __ movl(rax, Immediate(false_offset));
+    ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
+        == 0xB8 - 0x100);
+    __ movl(rax, Immediate(0xB8));  // FalseValue is at -9 * kPointerSize.
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -4380,25 +3904,85 @@
 
   // If the index is non-smi trigger the non-smi case.
   __ JumpIfNotSmi(index_, &index_not_smi_);
+
+  // Put smi-tagged index into scratch register.
+  __ movq(scratch_, index_);
   __ bind(&got_smi_index_);
 
   // Check for index out of range.
-  __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
+  __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
   __ j(above_equal, index_out_of_range_);
 
-  __ SmiToInteger32(index_, index_);
+  // We need special handling for non-flat strings.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(result_, Immediate(kStringRepresentationMask));
+  __ j(zero, &flat_string);
 
-  StringCharLoadGenerator::Generate(
-      masm, object_, index_, result_, &call_runtime_);
+  // Handle non-flat strings.
+  __ and_(result_, Immediate(kStringRepresentationMask));
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  __ cmpb(result_, Immediate(kExternalStringTag));
+  __ j(greater, &sliced_string);
+  __ j(equal, &call_runtime_);
 
+  // ConsString.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  Label assure_seq_string;
+  __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
+                 Heap::kEmptyStringRootIndex);
+  __ j(not_equal, &call_runtime_);
+  // Get the first of the two strings and load its instance type.
+  __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
+  __ jmp(&assure_seq_string, Label::kNear);
+
+  // SlicedString, unpack and add offset.
+  __ bind(&sliced_string);
+  __ addq(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
+  __ movq(object_, FieldOperand(object_, SlicedString::kParentOffset));
+
+  __ bind(&assure_seq_string);
+  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+  // If the first cons component is also non-flat, then go to runtime.
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(result_, Immediate(kStringRepresentationMask));
+  __ j(not_zero, &call_runtime_);
+  __ jmp(&flat_string);
+
+  // Check for 1-byte or 2-byte string.
+  __ bind(&flat_string);
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ testb(result_, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string);
+
+  // 2-byte string.
+  // Load the 2-byte character code into the result register.
+  __ SmiToInteger32(scratch_, scratch_);
+  __ movzxwl(result_, FieldOperand(object_,
+                                   scratch_, times_2,
+                                   SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ SmiToInteger32(scratch_, scratch_);
+  __ movzxbl(result_, FieldOperand(object_,
+                                   scratch_, times_1,
+                                   SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
   __ Integer32ToSmi(result_, result_);
   __ bind(&exit_);
 }
 
 
 void StringCharCodeAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
 
   Factory* factory = masm->isolate()->factory();
@@ -4411,6 +3995,7 @@
               DONT_DO_SMI_CHECK);
   call_helper.BeforeCall(masm);
   __ push(object_);
+  __ push(index_);
   __ push(index_);  // Consumed by runtime conversion function.
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -4419,18 +4004,19 @@
     // NumberToSmi discards numbers that are not exact integers.
     __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
-  if (!index_.is(rax)) {
+  if (!scratch_.is(rax)) {
     // Save the conversion result before the pop instructions below
     // have a chance to overwrite it.
-    __ movq(index_, rax);
+    __ movq(scratch_, rax);
   }
+  __ pop(index_);
   __ pop(object_);
   // Reload the instance type.
   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  __ JumpIfNotSmi(index_, index_out_of_range_);
+  __ JumpIfNotSmi(scratch_, index_out_of_range_);
   // Otherwise, return to the fast path.
   __ jmp(&got_smi_index_);
 
@@ -4440,7 +4026,6 @@
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
   __ push(object_);
-  __ Integer32ToSmi(index_, index_);
   __ push(index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   if (!result_.is(rax)) {
@@ -4473,8 +4058,7 @@
 
 
 void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   __ Abort("Unexpected fallthrough to CharFromCode slow case");
 
   __ bind(&slow_case_);
@@ -4501,15 +4085,14 @@
 
 
 void StringCharAtGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
   char_code_at_generator_.GenerateSlow(masm, call_helper);
   char_from_code_generator_.GenerateSlow(masm, call_helper);
 }
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label call_runtime, call_builtin;
+  Label string_add_runtime, call_builtin;
   Builtins::JavaScript builtin_id = Builtins::ADD;
 
   // Load the two arguments.
@@ -4518,14 +4101,14 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (flags_ == NO_STRING_ADD_FLAGS) {
-    __ JumpIfSmi(rax, &call_runtime);
+    __ JumpIfSmi(rax, &string_add_runtime);
     __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
-    __ j(above_equal, &call_runtime);
+    __ j(above_equal, &string_add_runtime);
 
     // First argument is a a string, test second.
-    __ JumpIfSmi(rdx, &call_runtime);
+    __ JumpIfSmi(rdx, &string_add_runtime);
     __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
-    __ j(above_equal, &call_runtime);
+    __ j(above_equal, &string_add_runtime);
   } else {
     // Here at least one of the arguments is definitely a string.
     // We convert the one that is not known to be a string.
@@ -4591,9 +4174,9 @@
   __ SmiCompare(rbx, Smi::FromInt(2));
   __ j(not_equal, &longer_than_two);
 
-  // Check that both strings are non-external ASCII strings.
+  // Check that both strings are non-external ascii strings.
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
-                                                  &call_runtime);
+                                                  &string_add_runtime);
 
   // Get the two characters forming the sub string.
   __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@@ -4608,30 +4191,20 @@
   __ ret(2 * kPointerSize);
 
   __ bind(&make_two_character_string);
-  __ Set(rdi, 2);
-  __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
-  // rbx - first byte: first character
-  // rbx - second byte: *maybe* second character
-  // Make sure that the second byte of rbx contains the second character.
-  __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
-  __ shll(rcx, Immediate(kBitsPerByte));
-  __ orl(rbx, rcx);
-  // Write both characters to the new string.
-  __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
-  __ IncrementCounter(counters->string_add_native(), 1);
-  __ ret(2 * kPointerSize);
+  __ Set(rbx, 2);
+  __ jmp(&make_flat_ascii_string);
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
+  __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
   __ j(below, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
-  __ j(above, &call_runtime);
+  __ j(above, &string_add_runtime);
 
   // If result is not supposed to be flat, allocate a cons string object. If
-  // both strings are ASCII the result is an ASCII cons string.
+  // both strings are ascii the result is an ascii cons string.
   // rax: first string
   // rbx: length of resulting flat string
   // rdx: second string
@@ -4645,8 +4218,8 @@
   __ testl(rcx, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
-  // Allocate an ASCII cons string.
-  __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
+  // Allocate an acsii cons string.
+  __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
   __ bind(&allocated);
   // Fill the fields of the cons string.
   __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
@@ -4659,7 +4232,7 @@
   __ ret(2 * kPointerSize);
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
-  // to contain only ASCII characters.
+  // to contain only ascii characters.
   // rcx: first instance type AND second instance type.
   // r8: first instance type.
   // r9: second instance type.
@@ -4671,103 +4244,111 @@
   __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
   __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
-  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
+  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &string_add_runtime);
   __ jmp(&allocated);
 
-  // We cannot encounter sliced strings or cons strings here since:
-  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
-  // Handle creating a flat result from either external or sequential strings.
-  // Locate the first characters' locations.
+  // Handle creating a flat result. First check that both strings are not
+  // external strings.
   // rax: first string
   // rbx: length of resulting flat string as smi
   // rdx: second string
   // r8: instance type of first string
   // r9: instance type of first string
-  Label first_prepared, second_prepared;
-  Label first_is_sequential, second_is_sequential;
   __ bind(&string_add_flat_result);
-
-  __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
-  // r14: length of first string
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(r8, Immediate(kStringRepresentationMask));
-  __ j(zero, &first_is_sequential, Label::kNear);
-  // Rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ testb(r8, Immediate(kShortExternalStringMask));
-  __ j(not_zero, &call_runtime);
-  __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
-  __ jmp(&first_prepared, Label::kNear);
-  __ bind(&first_is_sequential);
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
-  __ bind(&first_prepared);
-
-  // Check whether both strings have same encoding.
-  __ xorl(r8, r9);
-  __ testb(r8, Immediate(kStringEncodingMask));
-  __ j(not_zero, &call_runtime);
-
-  __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
-  // r15: length of second string
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(r9, Immediate(kStringRepresentationMask));
-  __ j(zero, &second_is_sequential, Label::kNear);
-  // Rule out short external string and load string resource.
-  STATIC_ASSERT(kShortExternalStringTag != 0);
-  __ testb(r9, Immediate(kShortExternalStringMask));
-  __ j(not_zero, &call_runtime);
-  __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
-  __ jmp(&second_prepared, Label::kNear);
-  __ bind(&second_is_sequential);
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
-  __ bind(&second_prepared);
-
-  Label non_ascii_string_add_flat_result;
-  // r9: instance type of second string
-  // First string and second string have the same encoding.
-  STATIC_ASSERT(kTwoByteStringTag == 0);
   __ SmiToInteger32(rbx, rbx);
-  __ testb(r9, Immediate(kStringEncodingMask));
+  __ movl(rcx, r8);
+  __ and_(rcx, Immediate(kStringRepresentationMask));
+  __ cmpl(rcx, Immediate(kExternalStringTag));
+  __ j(equal, &string_add_runtime);
+  __ movl(rcx, r9);
+  __ and_(rcx, Immediate(kStringRepresentationMask));
+  __ cmpl(rcx, Immediate(kExternalStringTag));
+  __ j(equal, &string_add_runtime);
+  // We cannot encounter sliced strings here since:
+  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
+  // Now check if both strings are ascii strings.
+  // rax: first string
+  // rbx: length of resulting flat string
+  // rdx: second string
+  // r8: instance type of first string
+  // r9: instance type of second string
+  Label non_ascii_string_add_flat_result;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ testl(r8, Immediate(kStringEncodingMask));
   __ j(zero, &non_ascii_string_add_flat_result);
+  __ testl(r9, Immediate(kStringEncodingMask));
+  __ j(zero, &string_add_runtime);
 
   __ bind(&make_flat_ascii_string);
-  // Both strings are ASCII strings. As they are short they are both flat.
-  __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
-  // rax: result string
+  // Both strings are ascii strings. As they are short they are both flat.
+  __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
+  // rcx: result string
+  __ movq(rbx, rcx);
   // Locate first character of result.
-  __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
-  // rcx: first char of first string
-  // rbx: first character of result
-  // r14: length of first string
-  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
-  // rbx: next character of result
-  // rdx: first char of second string
-  // r15: length of second string
-  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
+  __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument
+  __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // rax: first char of first argument
+  // rbx: result string
+  // rcx: first character of result
+  // rdx: second string
+  // rdi: length of first argument
+  StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
+  // Locate first character of second argument.
+  __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  // rbx: result string
+  // rcx: next character of result
+  // rdx: first char of second argument
+  // rdi: length of second argument
+  StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
+  __ movq(rax, rbx);
   __ IncrementCounter(counters->string_add_native(), 1);
   __ ret(2 * kPointerSize);
 
+  // Handle creating a flat two byte result.
+  // rax: first string - known to be two byte
+  // rbx: length of resulting flat string
+  // rdx: second string
+  // r8: instance type of first string
+  // r9: instance type of first string
   __ bind(&non_ascii_string_add_flat_result);
-  // Both strings are ASCII strings. As they are short they are both flat.
-  __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
-  // rax: result string
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ and_(r9, Immediate(kStringEncodingMask));
+  __ j(not_zero, &string_add_runtime);
+  // Both strings are two byte strings. As they are short they are both
+  // flat.
+  __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
+  // rcx: result string
+  __ movq(rbx, rcx);
   // Locate first character of result.
-  __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
-  // rcx: first char of first string
-  // rbx: first character of result
-  // r14: length of first string
-  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
-  // rbx: next character of result
-  // rdx: first char of second string
-  // r15: length of second string
-  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
+  __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // Locate first character of first argument.
+  __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
+  __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // rax: first char of first argument
+  // rbx: result string
+  // rcx: first character of result
+  // rdx: second argument
+  // rdi: length of first argument
+  StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
+  // Locate first character of second argument.
+  __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
+  __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  // rbx: result string
+  // rcx: next character of result
+  // rdx: first char of second argument
+  // rdi: length of second argument
+  StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
+  __ movq(rax, rbx);
   __ IncrementCounter(counters->string_add_native(), 1);
   __ ret(2 * kPointerSize);
 
   // Just jump to runtime to add the two strings.
-  __ bind(&call_runtime);
+  __ bind(&string_add_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
 
   if (call_builtin.is_linked()) {
@@ -4985,12 +4566,7 @@
 
     __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
     __ j(equal, not_found);
-    // Must be the hole (deleted entry).
-    if (FLAG_debug_code) {
-      __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-      __ cmpq(kScratchRegister, candidate);
-      __ Assert(equal, "oddball in symbol table is not undefined or the hole");
-    }
+    // Must be null (deleted entry).
     __ jmp(&next_probe[i]);
 
     __ bind(&is_string);
@@ -5004,7 +4580,7 @@
     // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
     Register temp = kScratchRegister;
 
-    // Check that the candidate is a non-external ASCII string.
+    // Check that the candidate is a non-external ascii string.
     __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
     __ JumpIfInstanceTypeIsNotSequentialAscii(
         temp, temp, &next_probe[i]);
@@ -5119,12 +4695,8 @@
 
   __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
   __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
-  Label not_original_string;
-  __ j(not_equal, &not_original_string, Label::kNear);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(kArgumentsSize);
-  __ bind(&not_original_string);
+  Label return_rax;
+  __ j(equal, &return_rax);
   // Special handling of sub-strings of length 1 and 2. One character strings
   // are handled in the runtime system (looked up in the single character
   // cache). Two character strings are looked for in the symbol cache.
@@ -5143,77 +4715,71 @@
   // Get the two characters forming the sub string.
   __ SmiToInteger32(rdx, rdx);  // From index is no longer smi.
   __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
-  __ movzxbq(rdi,
+  __ movzxbq(rcx,
              FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
 
   // Try to lookup two character string in symbol table.
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, rbx, rdi, r9, r11, r14, r15, &make_two_character_string);
-  __ IncrementCounter(counters->sub_string_native(), 1);
+      masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
   __ ret(3 * kPointerSize);
 
   __ bind(&make_two_character_string);
-  // Set up registers for allocating the two character string.
-  __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
-  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
-  __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
-  __ IncrementCounter(counters->sub_string_native(), 1);
-  __ ret(3 * kPointerSize);
-
-  __ bind(&result_longer_than_two);
-  // rax: string
-  // rbx: instance type
-  // rcx: sub string length
-  // rdx: from index (smi)
-  // Deal with different string types: update the index if necessary
-  // and put the underlying string into edi.
-  Label underlying_unpacked, sliced_string, seq_or_external_string;
-  // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
-  STATIC_ASSERT(kIsIndirectStringMask != 0);
-  __ testb(rbx, Immediate(kIsIndirectStringMask));
-  __ j(zero, &seq_or_external_string, Label::kNear);
-
-  __ testb(rbx, Immediate(kSlicedNotConsMask));
-  __ j(not_zero, &sliced_string, Label::kNear);
-  // Cons string.  Check whether it is flat, then fetch first part.
-  // Flat cons strings have an empty second part.
-  __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
-                 Heap::kEmptyStringRootIndex);
-  __ j(not_equal, &runtime);
-  __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
-  // Update instance type.
-  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
+  // Setup registers for allocating the two character string.
+  __ movq(rax, Operand(rsp, kStringOffset));
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&sliced_string);
-  // Sliced string.  Fetch parent and correct start index by offset.
-  __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
-  __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
-  // Update instance type.
-  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
-  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ jmp(&underlying_unpacked, Label::kNear);
-
-  __ bind(&seq_or_external_string);
-  // Sequential or external string.  Just move string to the correct register.
-  __ movq(rdi, rax);
-
-  __ bind(&underlying_unpacked);
+  __ Set(rcx, 2);
 
   if (FLAG_string_slices) {
     Label copy_routine;
-    // rdi: underlying subject string
-    // rbx: instance type of underlying subject string
-    // rdx: adjusted start index (smi)
-    // rcx: length
     // If coming from the make_two_character_string path, the string
     // is too short to be sliced anyways.
+    STATIC_ASSERT(2 < SlicedString::kMinLength);
+    __ jmp(&copy_routine);
+    __ bind(&result_longer_than_two);
+
+    // rax: string
+    // rbx: instance type
+    // rcx: sub string length
+    // rdx: from index (smi)
+    Label allocate_slice, sliced_string, seq_string;
     __ cmpq(rcx, Immediate(SlicedString::kMinLength));
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
+    STATIC_ASSERT(kSeqStringTag == 0);
+    __ testb(rbx, Immediate(kStringRepresentationMask));
+    __ j(zero, &seq_string, Label::kNear);
+    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+    STATIC_ASSERT(kIsIndirectStringMask != 0);
+    __ testb(rbx, Immediate(kIsIndirectStringMask));
+    // External string.  Jump to runtime.
+    __ j(zero, &runtime);
+
+    __ testb(rbx, Immediate(kSlicedNotConsMask));
+    __ j(not_zero, &sliced_string, Label::kNear);
+    // Cons string.  Check whether it is flat, then fetch first part.
+    __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
+                   Heap::kEmptyStringRootIndex);
+    __ j(not_equal, &runtime);
+    __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&sliced_string);
+    // Sliced string.  Fetch parent and correct start index by offset.
+    __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+    __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
+    __ jmp(&allocate_slice, Label::kNear);
+
+    __ bind(&seq_string);
+    // Sequential string.  Just move string to the right register.
+    __ movq(rdi, rax);
+
+    __ bind(&allocate_slice);
+    // edi: underlying subject string
+    // ebx: instance type of original subject string
+    // edx: offset
+    // ecx: length
     // Allocate new sliced string.  At this point we do not reload the instance
     // type including the string encoding because we simply rely on the info
     // provided by the original string.  It does not matter if the original
@@ -5224,96 +4790,93 @@
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ testb(rbx, Immediate(kStringEncodingMask));
     __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
+    __ AllocateAsciiSlicedString(rax, rbx, no_reg, &runtime);
     __ jmp(&set_slice_header, Label::kNear);
     __ bind(&two_byte_slice);
-    __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
+    __ AllocateTwoByteSlicedString(rax, rbx, no_reg, &runtime);
     __ bind(&set_slice_header);
+    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
     __ Integer32ToSmi(rcx, rcx);
     __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
+    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
     __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
            Immediate(String::kEmptyHashField));
-    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
-    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
-    __ IncrementCounter(counters->sub_string_native(), 1);
-    __ ret(kArgumentsSize);
+    __ jmp(&return_rax);
 
     __ bind(&copy_routine);
+  } else {
+    __ bind(&result_longer_than_two);
   }
 
-  // rdi: underlying subject string
-  // rbx: instance type of underlying subject string
-  // rdx: adjusted start index (smi)
-  // rcx: length
-  // The subject string can only be external or sequential string of either
-  // encoding at this point.
-  Label two_byte_sequential, sequential_string;
-  STATIC_ASSERT(kExternalStringTag != 0);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(rbx, Immediate(kExternalStringTag));
-  __ j(zero, &sequential_string);
-
-  // Handle external string.
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ testb(rbx, Immediate(kShortExternalStringMask));
-  __ j(not_zero, &runtime);
-  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
-  // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
-  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&sequential_string);
-  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
-  __ testb(rbx, Immediate(kStringEncodingMask));
-  __ j(zero, &two_byte_sequential);
+  // rax: string
+  // rbx: instance type
+  // rcx: result string length
+  // Check for flat ascii string
+  Label non_ascii_flat;
+  __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
 
   // Allocate the result.
-  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
+  __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
 
   // rax: result string
   // rcx: result string length
-  __ movq(r14, rsi);  // esi used by following code.
-  {  // Locate character of sub string start.
-    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
-    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
-                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  }
+  __ movq(rdx, rsi);  // esi used by following code.
   // Locate first character of result.
   __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+  // Load string argument and locate character of sub string start.
+  __ movq(rsi, Operand(rsp, kStringOffset));
+  __ movq(rbx, Operand(rsp, kFromOffset));
+  {
+    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
+    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  }
 
   // rax: result string
   // rcx: result length
+  // rdx: original value of rsi
   // rdi: first character of result
   // rsi: character of sub string start
-  // r14: original value of rsi
   StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
-  __ movq(rsi, r14);  // Restore rsi.
+  __ movq(rsi, rdx);  // Restore rsi.
+  Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(kArgumentsSize);
 
-  __ bind(&two_byte_sequential);
+  __ bind(&non_ascii_flat);
+  // rax: string
+  // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
+  // rcx: result string length
+  // Check for sequential two byte string
+  __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
+  __ j(not_equal, &runtime);
+
   // Allocate the result.
-  __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
+  __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
 
   // rax: result string
   // rcx: result string length
-  __ movq(r14, rsi);  // esi used by following code.
-  {  // Locate character of sub string start.
-    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
-    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
-                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  }
+  __ movq(rdx, rsi);  // esi used by following code.
   // Locate first character of result.
   __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+  // Load string argument and locate character of sub string start.
+  __ movq(rsi, Operand(rsp, kStringOffset));
+  __ movq(rbx, Operand(rsp, kFromOffset));
+  {
+    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
+    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  }
 
   // rax: result string
   // rcx: result length
+  // rdx: original value of rsi
   // rdi: first character of result
   // rsi: character of sub string start
-  // r14: original value of rsi
   StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
-  __ movq(rsi, r14);  // Restore esi.
+  __ movq(rsi, rdx);  // Restore esi.
+
+  __ bind(&return_rax);
   __ IncrementCounter(counters->sub_string_native(), 1);
   __ ret(kArgumentsSize);
 
@@ -5454,7 +5017,7 @@
   __ movb(scratch, Operand(left, index, times_1, 0));
   __ cmpb(scratch, Operand(right, index, times_1, 0));
   __ j(not_equal, chars_not_equal, near_jump);
-  __ incq(index);
+  __ addq(index, Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -5484,7 +5047,7 @@
   // Check that both are sequential ASCII strings.
   __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
 
-  // Inline comparison of ASCII strings.
+  // Inline comparison of ascii strings.
   __ IncrementCounter(counters->string_compare_native(), 1);
   // Drop arguments from the stack
   __ pop(rcx);
@@ -5527,15 +5090,15 @@
   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
 
   Label generic_stub;
-  Label unordered, maybe_undefined1, maybe_undefined2;
+  Label unordered;
   Label miss;
   Condition either_smi = masm->CheckEitherSmi(rax, rdx);
   __ j(either_smi, &generic_stub, Label::kNear);
 
   __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &maybe_undefined1, Label::kNear);
+  __ j(not_equal, &miss, Label::kNear);
   __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
-  __ j(not_equal, &maybe_undefined2, Label::kNear);
+  __ j(not_equal, &miss, Label::kNear);
 
   // Load left and right operand
   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
@@ -5556,25 +5119,11 @@
   __ ret(0);
 
   __ bind(&unordered);
+
   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
   __ bind(&generic_stub);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
-  __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ Cmp(rax, masm->isolate()->factory()->undefined_value());
-    __ j(not_equal, &miss);
-    __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
-    __ j(not_equal, &maybe_undefined2, Label::kNear);
-    __ jmp(&unordered);
-  }
-
-  __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
-    __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
-    __ j(equal, &unordered);
-  }
-
   __ bind(&miss);
   GenerateMiss(masm);
 }
@@ -5625,10 +5174,9 @@
 
 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::STRINGS);
+  ASSERT(GetCondition() == equal);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
-
   // Registers containing left and right operands respectively.
   Register left = rdx;
   Register right = rax;
@@ -5666,31 +5214,24 @@
 
   // Check that both strings are symbols. If they are, we're done
   // because we already know they are not identical.
-  if (equality) {
-    Label do_compare;
-    STATIC_ASSERT(kSymbolTag != 0);
-    __ and_(tmp1, tmp2);
-    __ testb(tmp1, Immediate(kIsSymbolMask));
-    __ j(zero, &do_compare, Label::kNear);
-    // Make sure rax is non-zero. At this point input operands are
-    // guaranteed to be non-zero.
-    ASSERT(right.is(rax));
-    __ ret(0);
-    __ bind(&do_compare);
-  }
+  Label do_compare;
+  STATIC_ASSERT(kSymbolTag != 0);
+  __ and_(tmp1, tmp2);
+  __ testb(tmp1, Immediate(kIsSymbolMask));
+  __ j(zero, &do_compare, Label::kNear);
+  // Make sure rax is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  ASSERT(right.is(rax));
+  __ ret(0);
 
   // Check that both strings are sequential ASCII.
   Label runtime;
+  __ bind(&do_compare);
   __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
 
   // Compare flat ASCII strings. Returns when done.
-  if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2);
-  } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
-  }
+  StringCompareStub::GenerateFlatAsciiStringEquals(
+      masm, left, right, tmp1, tmp2);
 
   // Handle more complex cases in runtime.
   __ bind(&runtime);
@@ -5698,11 +5239,7 @@
   __ push(left);
   __ push(right);
   __ push(tmp1);
-  if (equality) {
-    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
-  } else {
-    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
-  }
+  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
 
   __ bind(&miss);
   GenerateMiss(masm);
@@ -5729,62 +5266,49 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
-  Label miss;
-  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
-  __ j(either_smi, &miss, Label::kNear);
-
-  __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ Cmp(rcx, known_map_);
-  __ j(not_equal, &miss, Label::kNear);
-  __ Cmp(rbx, known_map_);
-  __ j(not_equal, &miss, Label::kNear);
-
-  __ subq(rax, rdx);
-  __ ret(0);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  {
-    // Call the runtime system in a fresh internal frame.
-    ExternalReference miss =
-        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+  // Save the registers.
+  __ pop(rcx);
+  __ push(rdx);
+  __ push(rax);
+  __ push(rcx);
 
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(rdx);
-    __ push(rax);
-    __ push(rdx);
-    __ push(rax);
-    __ Push(Smi::FromInt(op_));
-    __ CallExternalReference(miss, 3);
+  // Call the runtime system in a fresh internal frame.
+  ExternalReference miss =
+      ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+  __ EnterInternalFrame();
+  __ push(rdx);
+  __ push(rax);
+  __ Push(Smi::FromInt(op_));
+  __ CallExternalReference(miss, 3);
+  __ LeaveInternalFrame();
 
-    // Compute the entry point of the rewritten stub.
-    __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
-    __ pop(rax);
-    __ pop(rdx);
-  }
+  // Compute the entry point of the rewritten stub.
+  __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+
+  // Restore registers.
+  __ pop(rcx);
+  __ pop(rax);
+  __ pop(rdx);
+  __ push(rcx);
 
   // Do a tail call to the rewritten stub.
   __ jmp(rdi);
 }
 
 
-void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
-                                                        Label* miss,
-                                                        Label* done,
-                                                        Register properties,
-                                                        Handle<String> name,
-                                                        Register r0) {
+MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss,
+    Label* done,
+    Register properties,
+    String* name,
+    Register r0) {
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
   // property. It's true even if some slots represent deleted properties
-  // (their names are the hole value).
+  // (their names are the null value).
   for (int i = 0; i < kInlinedProbes; i++) {
     // r0 points to properties hash.
     // Compute the masked index: (hash + i + i * i) & mask.
@@ -5813,18 +5337,11 @@
     __ Cmp(entity_name, Handle<String>(name));
     __ j(equal, miss);
 
-    Label the_hole;
-    // Check for the hole and skip.
-    __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
-    __ j(equal, &the_hole, Label::kNear);
-
     // Check if the entry name is not a symbol.
     __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
     __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
              Immediate(kIsSymbolMask));
     __ j(zero, miss);
-
-    __ bind(&the_hole);
   }
 
   StringDictionaryLookupStub stub(properties,
@@ -5833,10 +5350,12 @@
                                   StringDictionaryLookupStub::NEGATIVE_LOOKUP);
   __ Push(Handle<Object>(name));
   __ push(Immediate(name->Hash()));
-  __ CallStub(&stub);
+  MaybeObject* result = masm->TryCallStub(&stub);
+  if (result->IsFailure()) return result;
   __ testq(r0, r0);
   __ j(not_zero, miss);
   __ jmp(done);
+  return result;
 }
 
 
@@ -5851,11 +5370,6 @@
                                                         Register name,
                                                         Register r0,
                                                         Register r1) {
-  ASSERT(!elements.is(r0));
-  ASSERT(!elements.is(r1));
-  ASSERT(!name.is(r0));
-  ASSERT(!name.is(r1));
-
   // Assert that name contains a string.
   if (FLAG_debug_code) __ AbortIfNotString(name);
 
@@ -5898,8 +5412,6 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
-  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
-  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -5985,369 +5497,6 @@
 }
 
 
-struct AheadOfTimeWriteBarrierStubList {
-  Register object, value, address;
-  RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
-  // Used in RegExpExecStub.
-  { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
-  // Used in CompileArrayPushCall.
-  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
-  // Used in CompileStoreGlobal.
-  { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
-  // Used in StoreStubCompiler::CompileStoreField and
-  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
-  { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
-  // GenerateStoreField calls the stub with two different permutations of
-  // registers.  This is the second.
-  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
-  // StoreIC::GenerateNormal via GenerateDictionaryStore.
-  { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
-  // KeyedStoreIC::GenerateGeneric.
-  { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
-  // KeyedStoreStubCompiler::GenerateStoreFastElement.
-  { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
-  { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
-  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
-  // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
-  // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
-  { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
-  // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
-  // and ElementsTransitionGenerator::GenerateDoubleToObject
-  { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
-  // ElementsTransitionGenerator::GenerateDoubleToObject
-  { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
-  // StoreArrayLiteralElementStub::Generate
-  { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
-  // Null termination.
-  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-bool RecordWriteStub::IsPregenerated() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    if (object_.is(entry->object) &&
-        value_.is(entry->value) &&
-        address_.is(entry->address) &&
-        remembered_set_action_ == entry->action &&
-        save_fp_regs_mode_ == kDontSaveFPRegs) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
-  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
-  stub1.GetCode()->set_is_pregenerated(true);
-  StoreBufferOverflowStub stub2(kSaveFPRegs);
-  stub2.GetCode()->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
-  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
-       !entry->object.is(no_reg);
-       entry++) {
-    RecordWriteStub stub(entry->object,
-                         entry->value,
-                         entry->address,
-                         entry->action,
-                         kDontSaveFPRegs);
-    stub.GetCode()->set_is_pregenerated(true);
-  }
-}
-
-
-// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
-// the value has just been written into the object, now this stub makes sure
-// we keep the GC informed.  The word in the object where the value has been
-// written is in the address register.
-void RecordWriteStub::Generate(MacroAssembler* masm) {
-  Label skip_to_incremental_noncompacting;
-  Label skip_to_incremental_compacting;
-
-  // The first two instructions are generated with labels so as to get the
-  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
-  // forth between a compare instructions (a nop in this position) and the
-  // real branch when we start and stop incremental heap marking.
-  // See RecordWriteStub::Patch for details.
-  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
-  __ jmp(&skip_to_incremental_compacting, Label::kFar);
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ ret(0);
-  }
-
-  __ bind(&skip_to_incremental_noncompacting);
-  GenerateIncremental(masm, INCREMENTAL);
-
-  __ bind(&skip_to_incremental_compacting);
-  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
-
-  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
-  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-  masm->set_byte_at(0, kTwoByteNopInstruction);
-  masm->set_byte_at(2, kFiveByteNopInstruction);
-}
-
-
-void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
-  regs_.Save(masm);
-
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    Label dont_need_remembered_set;
-
-    __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
-    __ JumpIfNotInNewSpace(regs_.scratch0(),
-                           regs_.scratch0(),
-                           &dont_need_remembered_set);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch0(),
-                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                     not_zero,
-                     &dont_need_remembered_set);
-
-    // First notify the incremental marker if necessary, then update the
-    // remembered set.
-    CheckNeedsToInformIncrementalMarker(
-        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
-    InformIncrementalMarker(masm, mode);
-    regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-
-    __ bind(&dont_need_remembered_set);
-  }
-
-  CheckNeedsToInformIncrementalMarker(
-      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
-  InformIncrementalMarker(masm, mode);
-  regs_.Restore(masm);
-  __ ret(0);
-}
-
-
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
-#ifdef _WIN64
-  Register arg3 = r8;
-  Register arg2 = rdx;
-  Register arg1 = rcx;
-#else
-  Register arg3 = rdx;
-  Register arg2 = rsi;
-  Register arg1 = rdi;
-#endif
-  Register address =
-      arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
-  ASSERT(!address.is(regs_.object()));
-  ASSERT(!address.is(arg1));
-  __ Move(address, regs_.address());
-  __ Move(arg1, regs_.object());
-  if (mode == INCREMENTAL_COMPACTION) {
-    // TODO(gc) Can we just set address arg2 in the beginning?
-    __ Move(arg2, address);
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ movq(arg2, Operand(address, 0));
-  }
-  __ LoadAddress(arg3, ExternalReference::isolate_address());
-  int argument_count = 3;
-
-  AllowExternalCallThatCantCauseGC scope(masm);
-  __ PrepareCallCFunction(argument_count);
-  if (mode == INCREMENTAL_COMPACTION) {
-    __ CallCFunction(
-        ExternalReference::incremental_evacuation_record_write_function(
-            masm->isolate()),
-        argument_count);
-  } else {
-    ASSERT(mode == INCREMENTAL);
-    __ CallCFunction(
-        ExternalReference::incremental_marking_record_write_function(
-            masm->isolate()),
-        argument_count);
-  }
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
-}
-
-
-void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
-    MacroAssembler* masm,
-    OnNoNeedToInformIncrementalMarker on_no_need,
-    Mode mode) {
-  Label on_black;
-  Label need_incremental;
-  Label need_incremental_pop_object;
-
-  // Let's look at the color of the object:  If it is not black we don't have
-  // to inform the incremental marker.
-  __ JumpIfBlack(regs_.object(),
-                 regs_.scratch0(),
-                 regs_.scratch1(),
-                 &on_black,
-                 Label::kNear);
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ ret(0);
-  }
-
-  __ bind(&on_black);
-
-  // Get the value from the slot.
-  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
-
-  if (mode == INCREMENTAL_COMPACTION) {
-    Label ensure_not_white;
-
-    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kEvacuationCandidateMask,
-                     zero,
-                     &ensure_not_white,
-                     Label::kNear);
-
-    __ CheckPageFlag(regs_.object(),
-                     regs_.scratch1(),  // Scratch.
-                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
-                     zero,
-                     &need_incremental);
-
-    __ bind(&ensure_not_white);
-  }
-
-  // We need an extra register for this, so we push the object register
-  // temporarily.
-  __ push(regs_.object());
-  __ EnsureNotWhite(regs_.scratch0(),  // The value.
-                    regs_.scratch1(),  // Scratch.
-                    regs_.object(),  // Scratch.
-                    &need_incremental_pop_object,
-                    Label::kNear);
-  __ pop(regs_.object());
-
-  regs_.Restore(masm);
-  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
-  } else {
-    __ ret(0);
-  }
-
-  __ bind(&need_incremental_pop_object);
-  __ pop(regs_.object());
-
-  __ bind(&need_incremental);
-
-  // Fall through when we need to inform the incremental marker.
-}
-
-
-void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : element value to store
-  //  -- rbx    : array literal
-  //  -- rdi    : map of array literal
-  //  -- rcx    : element index as smi
-  //  -- rdx    : array literal index in function
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  Label element_done;
-  Label double_elements;
-  Label smi_element;
-  Label slow_elements;
-  Label fast_elements;
-
-  __ CheckFastElements(rdi, &double_elements);
-
-  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
-  __ JumpIfSmi(rax, &smi_element);
-  __ CheckFastSmiOnlyElements(rdi, &fast_elements);
-
-  // Store into the array literal requires a elements transition. Call into
-  // the runtime.
-
-  __ bind(&slow_elements);
-  __ pop(rdi);  // Pop return address and remember to put back later for tail
-                // call.
-  __ push(rbx);
-  __ push(rcx);
-  __ push(rax);
-  __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
-  __ push(rdx);
-  __ push(rdi);  // Return return address so that tail call returns to right
-                 // place.
-  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
-
-  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
-  __ bind(&fast_elements);
-  __ SmiToInteger32(kScratchRegister, rcx);
-  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
-  __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
-                           FixedArrayBase::kHeaderSize));
-  __ movq(Operand(rcx, 0), rax);
-  // Update the write barrier for the array store.
-  __ RecordWrite(rbx, rcx, rax,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ ret(0);
-
-  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
-  // FAST_ELEMENTS, and value is Smi.
-  __ bind(&smi_element);
-  __ SmiToInteger32(kScratchRegister, rcx);
-  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
-  __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
-                       FixedArrayBase::kHeaderSize), rax);
-  __ ret(0);
-
-  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
-  __ bind(&double_elements);
-
-  __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
-  __ SmiToInteger32(r11, rcx);
-  __ StoreNumberToDoubleElements(rax,
-                                 r9,
-                                 r11,
-                                 xmm0,
-                                 &slow_elements);
-  __ ret(0);
-}
-
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 6a1a18f..4058118 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -48,8 +48,6 @@
                                    ArgumentType argument_type)
       : type_(type), argument_type_(argument_type) {}
   void Generate(MacroAssembler* masm);
-  static void GenerateOperation(MacroAssembler* masm,
-                                TranscendentalCache::Type type);
  private:
   TranscendentalCache::Type type_;
   ArgumentType argument_type_;
@@ -57,32 +55,7 @@
   Major MajorKey() { return TranscendentalCache; }
   int MinorKey() { return type_ | argument_type_; }
   Runtime::FunctionId RuntimeFunction();
-};
-
-
-class StoreBufferOverflowStub: public CodeStub {
- public:
-  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
-      : save_doubles_(save_fp) { }
-
-  void Generate(MacroAssembler* masm);
-
-  virtual bool IsPregenerated() { return true; }
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
-  NO_GENERIC_BINARY_FLAGS = 0,
-  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
+  void GenerateOperation(MacroAssembler* masm);
 };
 
 
@@ -151,7 +124,7 @@
     return UnaryOpIC::ToState(operand_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_unary_op_type(operand_type_);
   }
 };
@@ -237,7 +210,7 @@
     return BinaryOpIC::ToState(operands_type_);
   }
 
-  virtual void FinishCode(Handle<Code> code) {
+  virtual void FinishCode(Code* code) {
     code->set_binary_op_type(operands_type_);
     code->set_binary_op_result_type(result_type_);
   }
@@ -424,12 +397,13 @@
 
   void Generate(MacroAssembler* masm);
 
-  static void GenerateNegativeLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register properties,
-                                     Handle<String> name,
-                                     Register r0);
+  MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
+      MacroAssembler* masm,
+      Label* miss,
+      Label* done,
+      Register properties,
+      String* name,
+      Register r0);
 
   static void GeneratePositiveLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -439,8 +413,6 @@
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -453,7 +425,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryLookup; }
+  Major MajorKey() { return StringDictionaryNegativeLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -474,246 +446,6 @@
 };
 
 
-class RecordWriteStub: public CodeStub {
- public:
-  RecordWriteStub(Register object,
-                  Register value,
-                  Register address,
-                  RememberedSetAction remembered_set_action,
-                  SaveFPRegsMode fp_mode)
-      : object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
-        regs_(object,   // An input reg.
-              address,  // An input reg.
-              value) {  // One scratch reg.
-  }
-
-  enum Mode {
-    STORE_BUFFER_ONLY,
-    INCREMENTAL,
-    INCREMENTAL_COMPACTION
-  };
-
-  virtual bool IsPregenerated();
-  static void GenerateFixedRegStubsAheadOfTime();
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
-  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
-  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
-
-  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
-  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
-
-  static Mode GetMode(Code* stub) {
-    byte first_instruction = stub->instruction_start()[0];
-    byte second_instruction = stub->instruction_start()[2];
-
-    if (first_instruction == kTwoByteJumpInstruction) {
-      return INCREMENTAL;
-    }
-
-    ASSERT(first_instruction == kTwoByteNopInstruction);
-
-    if (second_instruction == kFiveByteJumpInstruction) {
-      return INCREMENTAL_COMPACTION;
-    }
-
-    ASSERT(second_instruction == kFiveByteNopInstruction);
-
-    return STORE_BUFFER_ONLY;
-  }
-
-  static void Patch(Code* stub, Mode mode) {
-    switch (mode) {
-      case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
-               GetMode(stub) == INCREMENTAL_COMPACTION);
-        stub->instruction_start()[0] = kTwoByteNopInstruction;
-        stub->instruction_start()[2] = kFiveByteNopInstruction;
-        break;
-      case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        stub->instruction_start()[0] = kTwoByteJumpInstruction;
-        break;
-      case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
-        stub->instruction_start()[0] = kTwoByteNopInstruction;
-        stub->instruction_start()[2] = kFiveByteJumpInstruction;
-        break;
-    }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 7);
-  }
-
- private:
-  // This is a helper class for freeing up 3 scratch registers, where the third
-  // is always rcx (needed for shift operations).  The input is two registers
-  // that must be preserved and one scratch register provided by the caller.
-  class RegisterAllocation {
-   public:
-    RegisterAllocation(Register object,
-                       Register address,
-                       Register scratch0)
-        : object_orig_(object),
-          address_orig_(address),
-          scratch0_orig_(scratch0),
-          object_(object),
-          address_(address),
-          scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
-      scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
-      if (scratch0.is(rcx)) {
-        scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
-      }
-      if (object.is(rcx)) {
-        object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
-      }
-      if (address.is(rcx)) {
-        address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
-      }
-      ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
-    }
-
-    void Save(MacroAssembler* masm) {
-      ASSERT(!address_orig_.is(object_));
-      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
-      // We don't have to save scratch0_orig_ because it was given to us as
-      // a scratch register.  But if we had to switch to a different reg then
-      // we should save the new scratch0_.
-      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
-      if (!rcx.is(scratch0_orig_) &&
-          !rcx.is(object_orig_) &&
-          !rcx.is(address_orig_)) {
-        masm->push(rcx);
-      }
-      masm->push(scratch1_);
-      if (!address_.is(address_orig_)) {
-        masm->push(address_);
-        masm->movq(address_, address_orig_);
-      }
-      if (!object_.is(object_orig_)) {
-        masm->push(object_);
-        masm->movq(object_, object_orig_);
-      }
-    }
-
-    void Restore(MacroAssembler* masm) {
-      // These will have been preserved the entire time, so we just need to move
-      // them back.  Only in one case is the orig_ reg different from the plain
-      // one, since only one of them can alias with rcx.
-      if (!object_.is(object_orig_)) {
-        masm->movq(object_orig_, object_);
-        masm->pop(object_);
-      }
-      if (!address_.is(address_orig_)) {
-        masm->movq(address_orig_, address_);
-        masm->pop(address_);
-      }
-      masm->pop(scratch1_);
-      if (!rcx.is(scratch0_orig_) &&
-          !rcx.is(object_orig_) &&
-          !rcx.is(address_orig_)) {
-        masm->pop(rcx);
-      }
-      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
-    }
-
-    // If we have to call into C then we need to save and restore all caller-
-    // saved registers that were not already preserved.
-
-    // The three scratch registers (incl. rcx) will be restored by other means
-    // so we don't bother pushing them here.  Rbx, rbp and r12-15 are callee
-    // save and don't need to be preserved.
-    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
-      masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
-    }
-
-    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
-                                           SaveFPRegsMode mode) {
-      masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
-    }
-
-    inline Register object() { return object_; }
-    inline Register address() { return address_; }
-    inline Register scratch0() { return scratch0_; }
-    inline Register scratch1() { return scratch1_; }
-
-   private:
-    Register object_orig_;
-    Register address_orig_;
-    Register scratch0_orig_;
-    Register object_;
-    Register address_;
-    Register scratch0_;
-    Register scratch1_;
-    // Third scratch register is always rcx.
-
-    Register GetRegThatIsNotRcxOr(Register r1,
-                                  Register r2,
-                                  Register r3) {
-      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
-        Register candidate = Register::FromAllocationIndex(i);
-        if (candidate.is(rcx)) continue;
-        if (candidate.is(r1)) continue;
-        if (candidate.is(r2)) continue;
-        if (candidate.is(r3)) continue;
-        return candidate;
-      }
-      UNREACHABLE();
-      return no_reg;
-    }
-    friend class RecordWriteStub;
-  };
-
-  enum OnNoNeedToInformIncrementalMarker {
-    kReturnOnNoNeedToInformIncrementalMarker,
-    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
-  };
-
-  void Generate(MacroAssembler* masm);
-  void GenerateIncremental(MacroAssembler* masm, Mode mode);
-  void CheckNeedsToInformIncrementalMarker(
-      MacroAssembler* masm,
-      OnNoNeedToInformIncrementalMarker on_no_need,
-      Mode mode);
-  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
-
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
-  void Activate(Code* code) {
-    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-  }
-
-  class ObjectBits: public BitField<int, 0, 4> {};
-  class ValueBits: public BitField<int, 4, 4> {};
-  class AddressBits: public BitField<int, 8, 4> {};
-  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
-  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
-
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
-  Label slow_;
-  RegisterAllocation regs_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index a8d39b2..507bbd4 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,7 +30,6 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "codegen.h"
-#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -39,90 +38,17 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
-  masm->set_has_frame(true);
+  masm->EnterInternalFrame();
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
-  masm->set_has_frame(false);
+  masm->LeaveInternalFrame();
 }
 
 
 #define __ masm.
 
-
-UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
-  size_t actual_size;
-  // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
-  if (buffer == NULL) {
-    // Fallback to library function if function cannot be created.
-    switch (type) {
-      case TranscendentalCache::SIN: return &sin;
-      case TranscendentalCache::COS: return &cos;
-      case TranscendentalCache::TAN: return &tan;
-      case TranscendentalCache::LOG: return &log;
-      default: UNIMPLEMENTED();
-    }
-  }
-
-  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-  // xmm0: raw double input.
-  // Move double input into registers.
-  __ push(rbx);
-  __ push(rdi);
-  __ movq(rbx, xmm0);
-  __ push(rbx);
-  __ fld_d(Operand(rsp, 0));
-  TranscendentalCacheStub::GenerateOperation(&masm, type);
-  // The return value is expected to be in xmm0.
-  __ fstp_d(Operand(rsp, 0));
-  __ pop(rbx);
-  __ movq(xmm0, rbx);
-  __ pop(rdi);
-  __ pop(rbx);
-  __ Ret();
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  ASSERT(desc.reloc_size == 0);
-
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
-UnaryMathFunction CreateSqrtFunction() {
-  size_t actual_size;
-  // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
-  if (buffer == NULL) return &sqrt;
-
-  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-  // xmm0: raw double input.
-  // Move double input into registers.
-  __ sqrtsd(xmm0, xmm0);
-  __ Ret();
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  ASSERT(desc.reloc_size == 0);
-
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
-  return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
 #ifdef _WIN64
 typedef double (*ModuloFunction)(double, double);
 // Define custom fmod implementation.
@@ -213,367 +139,6 @@
 
 #endif
 
-#undef __
-
-// -------------------------------------------------------------------------
-// Code generators
-
-#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rbx    : target map
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  // Set transitioned map.
-  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
-                      HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rbx    : target map
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  // The fail label is not actually used since we do not allocate.
-  Label allocated, new_backing_store, only_change_map, done;
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
-  __ j(equal, &only_change_map);
-
-  // Check backing store for COW-ness.  For COW arrays we have to
-  // allocate a new backing store.
-  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
-  __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
-                 Heap::kFixedCOWArrayMapRootIndex);
-  __ j(equal, &new_backing_store);
-  // Check if the backing store is in new-space. If not, we need to allocate
-  // a new one since the old one is in pointer-space.
-  // If in new space, we can reuse the old backing store because it is
-  // the same size.
-  __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
-
-  __ movq(r14, r8);  // Destination array equals source array.
-
-  // r8 : source FixedArray
-  // r9 : elements array length
-  // r14: destination FixedDoubleArray
-  // Set backing store's map
-  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
-
-  __ bind(&allocated);
-  // Set transitioned map.
-  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
-                      HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Convert smis to doubles and holes to hole NaNs.  The Array's length
-  // remains unchanged.
-  STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
-  STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
-
-  Label loop, entry, convert_hole;
-  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
-  // r15: the-hole NaN
-  __ jmp(&entry);
-
-  // Allocate new backing store.
-  __ bind(&new_backing_store);
-  __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
-  __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
-  // Set backing store's map
-  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
-  // Set receiver's backing store.
-  __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
-  __ movq(r11, r14);
-  __ RecordWriteField(rdx,
-                      JSObject::kElementsOffset,
-                      r11,
-                      r15,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Set backing store's length.
-  __ Integer32ToSmi(r11, r9);
-  __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
-  __ jmp(&allocated);
-
-  __ bind(&only_change_map);
-  // Set transitioned map.
-  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
-                      HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&done);
-
-  // Conversion loop.
-  __ bind(&loop);
-  __ movq(rbx,
-          FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
-  // r9 : current element's index
-  // rbx: current element (smi-tagged)
-  __ JumpIfNotSmi(rbx, &convert_hole);
-  __ SmiToInteger32(rbx, rbx);
-  __ cvtlsi2sd(xmm0, rbx);
-  __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
-           xmm0);
-  __ jmp(&entry);
-  __ bind(&convert_hole);
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-    __ Assert(equal, "object found in smi-only array");
-  }
-
-  __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
-  __ bind(&entry);
-  __ decq(r9);
-  __ j(not_sign, &loop);
-
-  __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rbx    : target map
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label loop, entry, convert_hole, gc_required, only_change_map;
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
-  __ j(equal, &only_change_map);
-
-  __ push(rax);
-
-  __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
-  // r8 : source FixedDoubleArray
-  // r9 : number of elements
-  __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
-  __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
-  // r11: destination FixedArray
-  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
-  __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
-  __ Integer32ToSmi(r14, r9);
-  __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
-
-  // Prepare for conversion loop.
-  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
-  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
-  // rsi: the-hole NaN
-  // rdi: pointer to the-hole
-  __ jmp(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ pop(rax);
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ jmp(fail);
-
-  // Box doubles into heap numbers.
-  __ bind(&loop);
-  __ movq(r14, FieldOperand(r8,
-                            r9,
-                            times_pointer_size,
-                            FixedDoubleArray::kHeaderSize));
-  // r9 : current element's index
-  // r14: current element
-  __ cmpq(r14, rsi);
-  __ j(equal, &convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(rax, r15, &gc_required);
-  // rax: new heap number
-  __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
-  __ movq(FieldOperand(r11,
-                       r9,
-                       times_pointer_size,
-                       FixedArray::kHeaderSize),
-          rax);
-  __ movq(r15, r9);
-  __ RecordWriteArray(r11,
-                      rax,
-                      r15,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&entry, Label::kNear);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ movq(FieldOperand(r11,
-                       r9,
-                       times_pointer_size,
-                       FixedArray::kHeaderSize),
-          rdi);
-
-  __ bind(&entry);
-  __ decq(r9);
-  __ j(not_sign, &loop);
-
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
-  __ RecordWriteField(rdx,
-                      JSObject::kElementsOffset,
-                      r11,
-                      r15,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(rax);
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
-  __ bind(&only_change_map);
-  // Set transitioned map.
-  __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
-                      HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void StringCharLoadGenerator::Generate(MacroAssembler* masm,
-                                       Register string,
-                                       Register index,
-                                       Register result,
-                                       Label* call_runtime) {
-  // Fetch the instance type of the receiver into result register.
-  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ testb(result, Immediate(kIsIndirectStringMask));
-  __ j(zero, &check_sequential, Label::kNear);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ testb(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
-  __ addq(index, result);
-  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
-
-  // Handle cons strings.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
-                 Heap::kEmptyStringRootIndex);
-  __ j(not_equal, call_runtime);
-  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // Distinguish sequential and external strings. Only these two string
-  // representations can reach here (slices and flat cons strings have been
-  // reduced to the underlying sequential or external string).
-  Label seq_string;
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ testb(result, Immediate(kStringRepresentationMask));
-  __ j(zero, &seq_string, Label::kNear);
-
-  // Handle external strings.
-  Label ascii_external, done;
-  if (FLAG_debug_code) {
-    // Assert that we do not have a cons or slice (indirect strings) here.
-    // Sequential strings have already been ruled out.
-    __ testb(result, Immediate(kIsIndirectStringMask));
-    __ Assert(zero, "external string expected, but not found");
-  }
-  // Rule out short external strings.
-  STATIC_CHECK(kShortExternalStringTag != 0);
-  __ testb(result, Immediate(kShortExternalStringTag));
-  __ j(not_zero, call_runtime);
-  // Check encoding.
-  STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ testb(result, Immediate(kStringEncodingMask));
-  __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
-  __ j(not_equal, &ascii_external, Label::kNear);
-  // Two-byte string.
-  __ movzxwl(result, Operand(result, index, times_2, 0));
-  __ jmp(&done, Label::kNear);
-  __ bind(&ascii_external);
-  // Ascii string.
-  __ movzxbl(result, Operand(result, index, times_1, 0));
-  __ jmp(&done, Label::kNear);
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii;
-  __ bind(&seq_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ testb(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii, Label::kNear);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzxwl(result, FieldOperand(string,
-                                  index,
-                                  times_2,
-                                  SeqTwoByteString::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii);
-  __ movzxbl(result, FieldOperand(string,
-                                  index,
-                                  times_1,
-                                  SeqAsciiString::kHeaderSize));
-  __ bind(&done);
-}
 
 #undef __
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 2e80751..a0648ce 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -69,21 +69,6 @@
 };
 
 
-class StringCharLoadGenerator : public AllStatic {
- public:
-  // Generates the code for handling different string types and loading the
-  // indexed character into |result|.  We expect |index| as untagged input and
-  // |result| as untagged output.
-  static void Generate(MacroAssembler* masm,
-                       Register string,
-                       Register index,
-                       Register result,
-                       Label* call_runtime);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 80e22c6..ae5045f 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,7 +27,7 @@
 
 // CPU specific code for x64 independent of OS goes here.
 
-#if defined(__GNUC__) && !defined(__MINGW64__)
+#ifdef __GNUC__
 #include "third_party/valgrind/valgrind.h"
 #endif
 
@@ -41,7 +41,7 @@
 namespace v8 {
 namespace internal {
 
-void CPU::SetUp() {
+void CPU::Setup() {
   CpuFeatures::Probe();
 }
 
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index eec83d9..423e6f2 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -100,66 +100,65 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Store the registers containing live values on the expression stack to
-    // make sure that these are correctly updated during GC. Non object values
-    // are stored as as two smis causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      ASSERT(!reg.is(kScratchRegister));
-      if ((object_regs & (1 << r)) != 0) {
-        __ push(reg);
-      }
-      // Store the 64-bit value as two smis.
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ movq(kScratchRegister, reg);
-        __ Integer32ToSmi(reg, reg);
-        __ push(reg);
-        __ sar(kScratchRegister, Immediate(32));
-        __ Integer32ToSmi(kScratchRegister, kScratchRegister);
-        __ push(kScratchRegister);
-      }
+  // Store the registers containing live values on the expression stack to
+  // make sure that these are correctly updated during GC. Non object values
+  // are stored as as two smis causing it to be untouched by GC.
+  ASSERT((object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+  ASSERT((object_regs & non_object_regs) == 0);
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    Register reg = { r };
+    ASSERT(!reg.is(kScratchRegister));
+    if ((object_regs & (1 << r)) != 0) {
+      __ push(reg);
     }
+    // Store the 64-bit value as two smis.
+    if ((non_object_regs & (1 << r)) != 0) {
+      __ movq(kScratchRegister, reg);
+      __ Integer32ToSmi(reg, reg);
+      __ push(reg);
+      __ sar(kScratchRegister, Immediate(32));
+      __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+      __ push(kScratchRegister);
+    }
+  }
 
 #ifdef DEBUG
-    __ RecordComment("// Calling from debug break to runtime - come in - over");
+  __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-    __ Set(rax, 0);  // No arguments (argc == 0).
-    __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+  __ Set(rax, 0);  // No arguments (argc == 0).
+  __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
 
-    CEntryStub ceb(1);
-    __ CallStub(&ceb);
+  CEntryStub ceb(1);
+  __ CallStub(&ceb);
 
-    // Restore the register values from the expression stack.
-    for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if (FLAG_debug_code) {
-        __ Set(reg, kDebugZapValue);
-      }
-      if ((object_regs & (1 << r)) != 0) {
-        __ pop(reg);
-      }
-      // Reconstruct the 64-bit value from two smis.
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ pop(kScratchRegister);
-        __ SmiToInteger32(kScratchRegister, kScratchRegister);
-        __ shl(kScratchRegister, Immediate(32));
-        __ pop(reg);
-        __ SmiToInteger32(reg, reg);
-        __ or_(reg, kScratchRegister);
-      }
+  // Restore the register values from the expression stack.
+  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+    int r = JSCallerSavedCode(i);
+    Register reg = { r };
+    if (FLAG_debug_code) {
+      __ Set(reg, kDebugZapValue);
     }
-
-    // Get rid of the internal frame.
+    if ((object_regs & (1 << r)) != 0) {
+      __ pop(reg);
+    }
+    // Reconstruct the 64-bit value from two smis.
+    if ((non_object_regs & (1 << r)) != 0) {
+      __ pop(kScratchRegister);
+      __ SmiToInteger32(kScratchRegister, kScratchRegister);
+      __ shl(kScratchRegister, Immediate(32));
+      __ pop(reg);
+      __ SmiToInteger32(reg, reg);
+      __ or_(reg, kScratchRegister);
+    }
   }
 
+  // Get rid of the internal frame.
+  __ LeaveInternalFrame();
+
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
@@ -229,36 +228,8 @@
 }
 
 
-void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
   // Register state just before return from JS function (from codegen-x64.cc).
-  // ----------- S t a t e -------------
-  //  -- rax: return value
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
-}
-
-
-void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-x64.cc).
-  // ----------- S t a t e -------------
-  //  -- rdi : function
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
-  // Register state for CallFunctionStub (from code-stubs-x64.cc).
-  // ----------- S t a t e -------------
-  //  -- rdi : function
-  //  -- rbx: cache cell for call target
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
-}
-
-
-void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
-  // Register state for CallConstructStub (from code-stubs-x64.cc).
   // rax is the actual number of arguments not encoded as a smi, see comment
   // above IC call.
   // ----------- S t a t e -------------
@@ -269,16 +240,21 @@
 }
 
 
-void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
-  // Register state for CallConstructStub (from code-stubs-x64.cc).
-  // rax is the actual number of arguments not encoded as a smi, see comment
-  // above IC call.
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // Register state just before return from JS function (from codegen-x64.cc).
   // ----------- S t a t e -------------
-  //  -- rax: number of arguments
-  //  -- rbx: cache cell for call target
+  //  -- rax: return value
   // -----------------------------------
-  // The number of arguments in rax is not smi encoded.
-  Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
+  Generate_DebugBreakCallHelper(masm, rax.bit(), 0, true);
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+  // Register state for stub CallFunction (from CallFunctionStub in ic-x64.cc).
+  // ----------- S t a t e -------------
+  //  No registers used on entry.
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, 0, 0, false);
 }
 
 
@@ -287,7 +263,9 @@
   Label check_codesize;
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
-  __ Nop(Assembler::kDebugBreakSlotLength);
+  for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
+    __ nop();
+  }
   ASSERT_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 2adf587..f322312 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -87,19 +87,13 @@
 #endif
   }
 
-  Isolate* isolate = code->GetIsolate();
 
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = isolate->deoptimizer_data();
+  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
-  // We might be in the middle of incremental marking with compaction.
-  // Tell collector to treat this code object in a special way and
-  // ignore all slots that might have been recorded on it.
-  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
-
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -111,8 +105,7 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
-                                        Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -138,18 +131,14 @@
   ASSERT(*(call_target_address - 3) == 0x73 &&  // jae
          *(call_target_address - 2) == 0x07 &&  // offset
          *(call_target_address - 1) == 0xe8);   // call
-  *(call_target_address - 3) = 0x66;  // 2 byte nop part 1
-  *(call_target_address - 2) = 0x90;  // 2 byte nop part 2
+  *(call_target_address - 3) = 0x90;  // nop
+  *(call_target_address - 2) = 0x90;  // nop
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
-
-  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, call_target_address, replacement_code);
 }
 
 
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
-                                         Address pc_after,
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
                                          Code* check_code,
                                          Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -157,16 +146,13 @@
          Assembler::target_address_at(call_target_address));
   // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
   // restore the conditional branch.
-  ASSERT(*(call_target_address - 3) == 0x66 &&  // 2 byte nop part 1
-         *(call_target_address - 2) == 0x90 &&  // 2 byte nop part 2
+  ASSERT(*(call_target_address - 3) == 0x90 &&  // nop
+         *(call_target_address - 2) == 0x90 &&  // nop
          *(call_target_address - 1) == 0xe8);   // call
   *(call_target_address - 3) = 0x73;  // jae
   *(call_target_address - 2) = 0x07;  // offset
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
-
-  check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
-      unoptimized_code, call_target_address, check_code);
 }
 
 
@@ -206,13 +192,12 @@
   ASSERT(Translation::BEGIN == opcode);
   USE(opcode);
   int count = iterator.Next();
-  iterator.Skip(1);  // Drop JS frame count.
   ASSERT(count == 1);
   USE(count);
 
   opcode = static_cast<Translation::Opcode>(iterator.Next());
   USE(opcode);
-  ASSERT(Translation::JS_FRAME == opcode);
+  ASSERT(Translation::FRAME == opcode);
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
@@ -248,7 +233,9 @@
   output_ = new FrameDescription*[1];
   output_[0] = new(output_frame_size) FrameDescription(
       output_frame_size, function_);
-  output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
+#ifdef DEBUG
+  output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
 
   // Clear the incoming parameters in the optimized frame to avoid
   // confusing the garbage collector.
@@ -313,7 +300,7 @@
     output_[0] = input_;
     output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
   } else {
-    // Set up the frame pointer and the context pointer.
+    // Setup the frame pointer and the context pointer.
     output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
     output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
 
@@ -337,219 +324,13 @@
 }
 
 
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
-                                                 int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
-  // Arguments adaptor can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  intptr_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // A marker value is used in place of the context.
-  output_offset -= kPointerSize;
-  intptr_t context = reinterpret_cast<intptr_t>(
-      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  output_frame->SetFrameSlot(output_offset, context);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; context (adaptor sentinel)\n",
-           top_address + output_offset, output_offset, context);
-  }
-
-  // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(function);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; function\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* adaptor_trampoline =
-      builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
-  intptr_t pc_value = reinterpret_cast<intptr_t>(
-      adaptor_trampoline->instruction_start() +
-      isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
-  output_frame->SetPc(pc_value);
-}
-
-
-void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
-                                              int frame_index) {
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
-  unsigned height = iterator->Next();
-  unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
-    PrintF("  translating construct stub => height=%d\n", height_in_bytes);
-  }
-
-  unsigned fixed_frame_size = 6 * kPointerSize;
-  unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
-  // Allocate and store the output frame description.
-  FrameDescription* output_frame =
-      new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::CONSTRUCT);
-
-  // Construct stub can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
-  output_[frame_index] = output_frame;
-
-  // The top address of the frame is computed from the previous
-  // frame's top and this frame's size.
-  intptr_t top_address;
-  top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
-  output_frame->SetTop(top_address);
-
-  // Compute the incoming parameter translation.
-  int parameter_count = height;
-  unsigned output_offset = output_frame_size;
-  for (int i = 0; i < parameter_count; ++i) {
-    output_offset -= kPointerSize;
-    DoTranslateCommand(iterator, frame_index, output_offset);
-  }
-
-  // Read caller's PC from the previous frame.
-  output_offset -= kPointerSize;
-  intptr_t callers_pc = output_[frame_index - 1]->GetPc();
-  output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; caller's pc\n",
-           top_address + output_offset, output_offset, callers_pc);
-  }
-
-  // Read caller's FP from the previous frame, and set this frame's FP.
-  output_offset -= kPointerSize;
-  intptr_t value = output_[frame_index - 1]->GetFp();
-  output_frame->SetFrameSlot(output_offset, value);
-  intptr_t fp_value = top_address + output_offset;
-  output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; caller's fp\n",
-           fp_value, output_offset, value);
-  }
-
-  // The context can be gotten from the previous frame.
-  output_offset -= kPointerSize;
-  value = output_[frame_index - 1]->GetContext();
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; context\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // A marker value is used in place of the function.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; function (construct sentinel)\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  // Number of incoming arguments.
-  output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; argc (%d)\n",
-           top_address + output_offset, output_offset, value, height - 1);
-  }
-
-  // The newly allocated object was passed as receiver in the artificial
-  // constructor stub environment created by HEnvironment::CopyForInlining().
-  output_offset -= kPointerSize;
-  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
-    PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
-           V8PRIxPTR " ; allocated receiver\n",
-           top_address + output_offset, output_offset, value);
-  }
-
-  ASSERT(0 == output_offset);
-
-  Builtins* builtins = isolate_->builtins();
-  Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
-  intptr_t pc = reinterpret_cast<intptr_t>(
-      construct_stub->instruction_start() +
-      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
-}
-
-
-void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
-                                   int frame_index) {
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+                                 int frame_index) {
+  // Read the ast node id, function, and frame height for this output frame.
+  Translation::Opcode opcode =
+      static_cast<Translation::Opcode>(iterator->Next());
+  USE(opcode);
+  ASSERT(Translation::FRAME == opcode);
   int node_id = iterator->Next();
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
@@ -569,7 +350,9 @@
   // Allocate and store the output frame description.
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, function);
-  output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
+#ifdef DEBUG
+  output_frame->SetKind(Code::FUNCTION);
+#endif
 
   bool is_bottommost = (0 == frame_index);
   bool is_topmost = (output_count_ - 1 == frame_index);
@@ -657,7 +440,6 @@
     value = reinterpret_cast<intptr_t>(function->context());
   }
   output_frame->SetFrameSlot(output_offset, value);
-  output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(rsi.code(), value);
   if (FLAG_trace_deopt) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
@@ -816,10 +598,7 @@
 
   Isolate* isolate = masm()->isolate();
 
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
-  }
+  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
   // Preserve deoptimizer object in register rax and get the input
   // frame descriptor pointer.
   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -865,11 +644,8 @@
   __ PrepareCallCFunction(2);
   __ movq(arg1, rax);
   __ LoadAddress(arg2, ExternalReference::isolate_address());
-  {
-    AllowExternalCallThatCantCauseGC scope(masm());
-    __ CallCFunction(
-        ExternalReference::compute_output_frames_function(isolate), 2);
-  }
+  __ CallCFunction(
+      ExternalReference::compute_output_frames_function(isolate), 2);
   __ pop(rax);
 
   // Replace the current frame with the output frames.
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index adeda0b..1b8871f 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -34,7 +34,6 @@
 #if defined(V8_TARGET_ARCH_X64)
 
 #include "disasm.h"
-#include "lazy-instance.h"
 
 namespace disasm {
 
@@ -110,7 +109,6 @@
   { 0xC3, UNSET_OP_ORDER, "ret" },
   { 0xC9, UNSET_OP_ORDER, "leave" },
   { 0xF4, UNSET_OP_ORDER, "hlt" },
-  { 0xFC, UNSET_OP_ORDER, "cld" },
   { 0xCC, UNSET_OP_ORDER, "int3" },
   { 0x60, UNSET_OP_ORDER, "pushad" },
   { 0x61, UNSET_OP_ORDER, "popad" },
@@ -270,8 +268,7 @@
 }
 
 
-static v8::internal::LazyInstance<InstructionTable>::type instruction_table =
-    LAZY_INSTANCE_INITIALIZER;
+static InstructionTable instruction_table;
 
 
 static InstructionDesc cmov_instructions[16] = {
@@ -913,19 +910,15 @@
           switch (modrm_byte) {
             case 0xE0: mnem = "fchs"; break;
             case 0xE1: mnem = "fabs"; break;
-            case 0xE3: mnem = "fninit"; break;
             case 0xE4: mnem = "ftst"; break;
             case 0xE8: mnem = "fld1"; break;
             case 0xEB: mnem = "fldpi"; break;
             case 0xED: mnem = "fldln2"; break;
             case 0xEE: mnem = "fldz"; break;
-            case 0xF0: mnem = "f2xm1"; break;
             case 0xF1: mnem = "fyl2x"; break;
-            case 0xF2: mnem = "fptan"; break;
             case 0xF5: mnem = "fprem1"; break;
             case 0xF7: mnem = "fincstp"; break;
             case 0xF8: mnem = "fprem"; break;
-            case 0xFD: mnem = "fscale"; break;
             case 0xFE: mnem = "fsin"; break;
             case 0xFF: mnem = "fcos"; break;
             default: UnimplementedInstruction();
@@ -1041,18 +1034,7 @@
       }
     } else {
       get_modrm(*current, &mod, &regop, &rm);
-      if (opcode == 0x1f) {
-        current++;
-        if (rm == 4) {  // SIB byte present.
-          current++;
-        }
-        if (mod == 1) {  // Byte displacement.
-          current += 1;
-        } else if (mod == 2) {  // 32-bit displacement.
-          current += 4;
-        }  // else no immediate displacement.
-        AppendToBuffer("nop");
-      } else if (opcode == 0x28) {
+      if (opcode == 0x28) {
         AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
         current += PrintRightXMMOperand(current);
       } else if (opcode == 0x29) {
@@ -1196,7 +1178,7 @@
     int mod, regop, rm;
     get_modrm(*current, &mod, &regop, &rm);
     current++;
-    if (rm == 4) {  // SIB byte present.
+    if (regop == 4) {  // SIB byte present.
       current++;
     }
     if (mod == 1) {  // Byte displacement.
@@ -1340,7 +1322,7 @@
     data++;
   }
 
-  const InstructionDesc& idesc = instruction_table.Get().Get(current);
+  const InstructionDesc& idesc = instruction_table.Get(current);
   byte_size_operand_ = idesc.byte_size_operation;
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 3e3d63d..7012c76 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,32 +31,32 @@
 namespace v8 {
 namespace internal {
 
-const int kNumRegs = 16;
-const RegList kJSCallerSaved =
+static const int kNumRegs = 16;
+static const RegList kJSCallerSaved =
     1 << 0 |  // rax
     1 << 1 |  // rcx
     1 << 2 |  // rdx
     1 << 3 |  // rbx - used as a caller-saved register in JavaScript code
     1 << 7;   // rdi - callee function
 
-const int kNumJSCallerSaved = 5;
+static const int kNumJSCallerSaved = 5;
 
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 // Number of registers for which space is reserved in safepoints.
-const int kNumSafepointRegisters = 16;
+static const int kNumSafepointRegisters = 16;
 
 // ----------------------------------------------------
 
 class StackHandlerConstants : public AllStatic {
  public:
-  static const int kNextOffset     = 0 * kPointerSize;
-  static const int kCodeOffset     = 1 * kPointerSize;
-  static const int kStateOffset    = 2 * kPointerSize;
-  static const int kContextOffset  = 3 * kPointerSize;
-  static const int kFPOffset       = 4 * kPointerSize;
+  static const int kNextOffset    = 0 * kPointerSize;
+  static const int kContextOffset = 1 * kPointerSize;
+  static const int kFPOffset      = 2 * kPointerSize;
+  static const int kStateOffset   = 3 * kPointerSize;
+  static const int kPCOffset      = 4 * kPointerSize;
 
-  static const int kSize = kFPOffset + kPointerSize;
+  static const int kSize = kPCOffset + kPointerSize;
 };
 
 
@@ -87,9 +87,6 @@
 
 class StandardFrameConstants : public AllStatic {
  public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
   static const int kExpressionsOffset = -3 * kPointerSize;
   static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
@@ -115,8 +112,6 @@
 class ArgumentsAdaptorFrameConstants : public AllStatic {
  public:
   static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
-  static const int kFrameSize =
-      StandardFrameConstants::kFixedFrameSize + kPointerSize;
 };
 
 
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 85c5e75..556523f 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,6 +44,11 @@
 #define __ ACCESS_MASM(masm_)
 
 
+static unsigned GetPropertyId(Property* property) {
+  return property->id();
+}
+
+
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -100,54 +105,26 @@
 };
 
 
-int FullCodeGenerator::self_optimization_header_size() {
-  return 20;
-}
-
-
 // Generate code for a JS function.  On entry to the function the receiver
 // and arguments have been pushed on the stack left to right, with the
 // return address on top of them.  The actual argument count matches the
 // formal parameter count expected by the function.
 //
 // The live registers are:
-//   o rdi: the JS function object being called (i.e. ourselves)
+//   o rdi: the JS function object being called (ie, ourselves)
 //   o rsi: our context
 //   o rbp: our caller's frame pointer
 //   o rsp: stack pointer (pointing to return address)
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-x64.h for its layout.
-void FullCodeGenerator::Generate() {
-  CompilationInfo* info = info_;
-  handler_table_ =
-      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+  ASSERT(info_ == NULL);
+  info_ = info;
+  scope_ = info->scope();
   SetFunctionPosition(function());
   Comment cmnt(masm_, "[ function compiled by full code generator");
 
-  // We can optionally optimize based on counters rather than statistical
-  // sampling.
-  if (info->ShouldSelfOptimize()) {
-    if (FLAG_trace_opt_verbose) {
-      PrintF("[adding self-optimization header to %s]\n",
-             *info->function()->debug_name()->ToCString());
-    }
-    has_self_optimization_header_ = true;
-    MaybeObject* maybe_cell = isolate()->heap()->AllocateJSGlobalPropertyCell(
-        Smi::FromInt(Compiler::kCallsUntilPrimitiveOpt));
-    JSGlobalPropertyCell* cell;
-    if (maybe_cell->To(&cell)) {
-      __ movq(rax, Handle<JSGlobalPropertyCell>(cell),
-              RelocInfo::EMBEDDED_OBJECT);
-      __ SmiAddConstant(FieldOperand(rax, JSGlobalPropertyCell::kValueOffset),
-                        Smi::FromInt(-1));
-      Handle<Code> compile_stub(
-          isolate()->builtins()->builtin(Builtins::kLazyRecompile));
-      __ j(zero, compile_stub, RelocInfo::CODE_TARGET);
-      ASSERT(masm_->pc_offset() == self_optimization_header_size());
-    }
-  }
-
 #ifdef DEBUG
   if (strlen(FLAG_stop_at) > 0 &&
       info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
@@ -159,7 +136,7 @@
   // with undefined when called as functions (without an explicit
   // receiver object). rcx is zero for method calls and non-zero for
   // function calls.
-  if (!info->is_classic_mode() || info->is_native()) {
+  if (info->is_strict_mode() || info->is_native()) {
     Label ok;
     __ testq(rcx, rcx);
     __ j(zero, &ok, Label::kNear);
@@ -170,11 +147,6 @@
     __ bind(&ok);
   }
 
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done below).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
@@ -223,9 +195,11 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier.  This clobbers rax and rbx.
-        __ RecordWriteContextSlot(
-            rsi, context_offset, rax, rbx, kDontSaveFPRegs);
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have use a third register to avoid
+        // clobbering rsi.
+        __ movq(rcx, rsi);
+        __ RecordWrite(rcx, context_offset, rax, rbx);
       }
     }
   }
@@ -252,15 +226,9 @@
     //   function, receiver address, parameter count.
     // The stub will rewrite receiver and parameter count if the previous
     // stack frame was an arguments adapter frame.
-    ArgumentsAccessStub::Type type;
-    if (!is_classic_mode()) {
-      type = ArgumentsAccessStub::NEW_STRICT;
-    } else if (function()->has_duplicate_parameters()) {
-      type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
-    } else {
-      type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
-    }
-    ArgumentsAccessStub stub(type);
+    ArgumentsAccessStub stub(
+        is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
+                         : ArgumentsAccessStub::NEW_NON_STRICT_SLOW);
     __ CallStub(&stub);
 
     SetVar(arguments, rax, rbx, rdx);
@@ -282,11 +250,8 @@
       // For named function expressions, declare the function name as a
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        VariableProxy* proxy = scope()->function();
-        ASSERT(proxy->var()->mode() == CONST ||
-               proxy->var()->mode() == CONST_HARMONY);
-        ASSERT(proxy->var()->location() != Variable::UNALLOCATED);
-        EmitDeclaration(proxy, proxy->var()->mode(), NULL);
+        int ignored = 0;
+        EmitDeclaration(scope()->function(), Variable::CONST, NULL, &ignored);
       }
       VisitDeclarations(scope()->declarations());
     }
@@ -322,8 +287,7 @@
 }
 
 
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
-                                       Label* back_edge_target) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
@@ -413,7 +377,7 @@
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
   codegen()->GetVar(result_register(), var);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -435,7 +399,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -468,7 +432,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -527,7 +491,7 @@
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
-  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
   codegen()->DoTest(this);
 }
 
@@ -591,7 +555,7 @@
 
 
 void FullCodeGenerator::TestContext::Plug(bool flag) const {
-  codegen()->PrepareForBailoutBeforeSplit(condition(),
+  codegen()->PrepareForBailoutBeforeSplit(TOS_REG,
                                           true,
                                           true_label_,
                                           false_label_);
@@ -674,16 +638,15 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ movq(location, src);
-
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
-    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
+    __ RecordWrite(scratch0, offset, src, scratch1);
   }
 }
 
 
-void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
                                                      bool should_normalize,
                                                      Label* if_true,
                                                      Label* if_false) {
@@ -694,7 +657,13 @@
 
   Label skip;
   if (should_normalize) __ jmp(&skip, Label::kNear);
-  PrepareForBailout(expr, TOS_REG);
+
+  ForwardBailoutStack* current = forward_bailout_stack_;
+  while (current != NULL) {
+    PrepareForBailout(current->expr(), state);
+    current = current->parent();
+  }
+
   if (should_normalize) {
     __ CompareRoot(rax, Heap::kTrueValueRootIndex);
     Split(equal, if_true, if_false, NULL);
@@ -704,17 +673,16 @@
 
 
 void FullCodeGenerator::EmitDeclaration(VariableProxy* proxy,
-                                        VariableMode mode,
-                                        FunctionLiteral* function) {
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function,
+                                        int* global_count) {
   // If it was not possible to allocate the variable at compile time, we
   // need to "declare" it at runtime to make sure it actually exists in the
   // local context.
   Variable* variable = proxy->var();
-  bool binding_needs_init = (function == NULL) &&
-      (mode == CONST || mode == CONST_HARMONY || mode == LET);
   switch (variable->location()) {
     case Variable::UNALLOCATED:
-      ++global_count_;
+      ++(*global_count);
       break;
 
     case Variable::PARAMETER:
@@ -723,7 +691,7 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ movq(StackOperand(variable), result_register());
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(StackOperand(variable), kScratchRegister);
@@ -747,16 +715,10 @@
         VisitForAccumulatorValue(function);
         __ movq(ContextOperand(rsi, variable->index()), result_register());
         int offset = Context::SlotOffset(variable->index());
-        // We know that we have written a function, which is not a smi.
-        __ RecordWriteContextSlot(rsi,
-                                  offset,
-                                  result_register(),
-                                  rcx,
-                                  kDontSaveFPRegs,
-                                  EMIT_REMEMBERED_SET,
-                                  OMIT_SMI_CHECK);
+        __ movq(rbx, rsi);
+        __ RecordWrite(rbx, offset, result_register(), rcx);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
         __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
         __ movq(ContextOperand(rsi, variable->index()), kScratchRegister);
@@ -769,13 +731,11 @@
       Comment cmnt(masm_, "[ Declaration");
       __ push(rsi);
       __ Push(variable->name());
-      // Declaration nodes are always introduced in one of four modes.
-      ASSERT(mode == VAR ||
-             mode == CONST ||
-             mode == CONST_HARMONY ||
-             mode == LET);
-      PropertyAttributes attr =
-          (mode == CONST || mode == CONST_HARMONY) ? READ_ONLY : NONE;
+      // Declaration nodes are always introduced in one of three modes.
+      ASSERT(mode == Variable::VAR ||
+             mode == Variable::CONST ||
+             mode == Variable::LET);
+      PropertyAttributes attr = (mode == Variable::CONST) ? READ_ONLY : NONE;
       __ Push(Smi::FromInt(attr));
       // Push initial value, if any.
       // Note: For variables we must not push an initial value (such as
@@ -783,7 +743,7 @@
       // must not destroy the current value.
       if (function != NULL) {
         VisitForStackValue(function);
-      } else if (binding_needs_init) {
+      } else if (mode == Variable::CONST || mode == Variable::LET) {
         __ PushRoot(Heap::kTheHoleValueRootIndex);
       } else {
         __ Push(Smi::FromInt(0));  // Indicates no initial value.
@@ -795,6 +755,9 @@
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) { }
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
@@ -908,8 +871,6 @@
   __ cmpq(rax, null_value);
   __ j(equal, &exit);
 
-  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
   // Convert the object to a JS object.
   Label convert, done_convert;
   __ JumpIfSmi(rax, &convert);
@@ -921,17 +882,51 @@
   __ bind(&done_convert);
   __ push(rax);
 
-  // Check for proxies.
-  Label call_runtime;
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
-  __ j(below_equal, &call_runtime);
-
   // Check cache validity in generated code. This is a fast case for
   // the JSObject::IsSimpleEnum cache validity checks. If we cannot
   // guarantee cache validity, call the runtime system to check cache
   // validity or get the property names in a fixed array.
-  __ CheckEnumCache(null_value, &call_runtime);
+  Label next, call_runtime;
+  Register empty_fixed_array_value = r8;
+  __ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Register empty_descriptor_array_value = r9;
+  __ LoadRoot(empty_descriptor_array_value,
+              Heap::kEmptyDescriptorArrayRootIndex);
+  __ movq(rcx, rax);
+  __ bind(&next);
+
+  // Check that there are no elements.  Register rcx contains the
+  // current JS object we've reached through the prototype chain.
+  __ cmpq(empty_fixed_array_value,
+          FieldOperand(rcx, JSObject::kElementsOffset));
+  __ j(not_equal, &call_runtime);
+
+  // Check that instance descriptors are not empty so that we can
+  // check for an enum cache.  Leave the map in rbx for the subsequent
+  // prototype load.
+  __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
+  __ JumpIfSmi(rdx, &call_runtime);
+
+  // Check that there is an enum cache in the non-empty instance
+  // descriptors (rdx).  This is the case if the next enumeration
+  // index field does not contain a smi.
+  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
+  __ JumpIfSmi(rdx, &call_runtime);
+
+  // For all objects but the receiver, check that the cache is empty.
+  Label check_prototype;
+  __ cmpq(rcx, rax);
+  __ j(equal, &check_prototype, Label::kNear);
+  __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+  __ cmpq(rdx, empty_fixed_array_value);
+  __ j(not_equal, &call_runtime);
+
+  // Load the prototype from the map and loop if non-null.
+  __ bind(&check_prototype);
+  __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
+  __ cmpq(rcx, null_value);
+  __ j(not_equal, &next);
 
   // The enum cache is valid.  Load the map of the object being
   // iterated over and use the cache for the iteration.
@@ -958,7 +953,7 @@
   __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
   __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
-  // Set up the four remaining stack slots.
+  // Setup the four remaining stack slots.
   __ push(rax);  // Map.
   __ push(rdx);  // Enumeration cache.
   __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
@@ -967,33 +962,14 @@
   __ jmp(&loop);
 
   // We got a fixed array in register rax. Iterate through that.
-  Label non_proxy;
   __ bind(&fixed_array);
-
-  Handle<JSGlobalPropertyCell> cell =
-      isolate()->factory()->NewJSGlobalPropertyCell(
-          Handle<Object>(
-              Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
-  RecordTypeFeedbackCell(stmt->PrepareId(), cell);
-  __ LoadHeapObject(rbx, cell);
-  __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
-          Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
-
-  __ Move(rbx, Smi::FromInt(1));  // Smi indicates slow check
-  __ movq(rcx, Operand(rsp, 0 * kPointerSize));  // Get enumerated object
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(rcx, LAST_JS_PROXY_TYPE, rcx);
-  __ j(above, &non_proxy);
-  __ Move(rbx, Smi::FromInt(0));  // Zero indicates proxy
-  __ bind(&non_proxy);
-  __ push(rbx);  // Smi
-  __ push(rax);  // Array
+  __ Push(Smi::FromInt(0));  // Map (0) - force slow check.
+  __ push(rax);
   __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ push(rax);  // Fixed array length (as smi).
   __ Push(Smi::FromInt(0));  // Initial index.
 
   // Generate code for doing the condition check.
-  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
   __ movq(rax, Operand(rsp, 0 * kPointerSize));  // Get the current index.
   __ cmpq(rax, Operand(rsp, 1 * kPointerSize));  // Compare to the array length.
@@ -1007,22 +983,17 @@
                             index.scale,
                             FixedArray::kHeaderSize));
 
-  // Get the expected map from the stack or a smi in the
+  // Get the expected map from the stack or a zero map in the
   // permanent slow case into register rdx.
   __ movq(rdx, Operand(rsp, 3 * kPointerSize));
 
   // Check if the expected map still matches that of the enumerable.
-  // If not, we may have to filter the key.
+  // If not, we have to filter the key.
   Label update_each;
   __ movq(rcx, Operand(rsp, 4 * kPointerSize));
   __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
   __ j(equal, &update_each, Label::kNear);
 
-  // For proxies, no filtering is done.
-  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  __ Cmp(rdx, Smi::FromInt(0));
-  __ j(equal, &update_each, Label::kNear);
-
   // Convert the entry to a string or null if it isn't a property
   // anymore. If the property has been removed while iterating, we
   // just skip it.
@@ -1039,7 +1010,7 @@
   __ movq(result_register(), rbx);
   // Perform the assignment as if via '='.
   { EffectContext context(this);
-    EmitAssignment(stmt->each());
+    EmitAssignment(stmt->each(), stmt->AssignmentId());
   }
 
   // Generate code for the body of the loop.
@@ -1050,7 +1021,7 @@
   __ bind(loop_statement.continue_label());
   __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
 
-  EmitStackCheck(stmt, &loop);
+  EmitStackCheck(stmt);
   __ jmp(&loop);
 
   // Remove the pointers stored on the stack.
@@ -1058,7 +1029,6 @@
   __ addq(rsp, Immediate(5 * kPointerSize));
 
   // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(&exit);
   decrement_loop_depth();
 }
@@ -1077,7 +1047,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(info->language_mode());
+    FastNewClosureStub stub(info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ Push(info);
     __ CallStub(&stub);
   } else {
@@ -1107,7 +1077,7 @@
   Scope* s = scope();
   while (s != NULL) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
                 Immediate(0));
@@ -1121,7 +1091,7 @@
     // If no outer scope calls eval, we do not need to check more
     // context extensions.  If we have reached an eval scope, we check
     // all extensions from this point.
-    if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+    if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
     s = s->outer_scope();
   }
 
@@ -1167,7 +1137,7 @@
 
   for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
     if (s->num_heap_slots() > 0) {
-      if (s->calls_non_strict_eval()) {
+      if (s->calls_eval()) {
         // Check that extension is NULL.
         __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
                 Immediate(0));
@@ -1198,23 +1168,16 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
-  if (var->mode() == DYNAMIC_GLOBAL) {
+  if (var->mode() == Variable::DYNAMIC_GLOBAL) {
     EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
     __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
+  } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
     __ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->mode() == CONST ||
-        local->mode() == CONST_HARMONY ||
-        local->mode() == LET) {
+    if (local->mode() == Variable::CONST) {
       __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
       __ j(not_equal, done);
-      if (local->mode() == CONST) {
-        __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-      } else {  // LET || CONST_HARMONY
-        __ Push(var->name());
-        __ CallRuntime(Runtime::kThrowReferenceError, 1);
-      }
+      __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
     }
     __ jmp(done);
   }
@@ -1245,63 +1208,23 @@
     case Variable::LOCAL:
     case Variable::CONTEXT: {
       Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
-      if (var->binding_needs_init()) {
-        // var->scope() may be NULL when the proxy is located in eval code and
-        // refers to a potential outside binding. Currently those bindings are
-        // always looked up dynamically, i.e. in that case
-        //     var->location() == LOOKUP.
-        // always holds.
-        ASSERT(var->scope() != NULL);
-
-        // Check if the binding really needs an initialization check. The check
-        // can be skipped in the following situation: we have a LET or CONST
-        // binding in harmony mode, both the Variable and the VariableProxy have
-        // the same declaration scope (i.e. they are both in global code, in the
-        // same function or in the same eval code) and the VariableProxy is in
-        // the source physically located after the initializer of the variable.
-        //
-        // We cannot skip any initialization checks for CONST in non-harmony
-        // mode because const variables may be declared but never initialized:
-        //   if (false) { const x; }; var y = x;
-        //
-        // The condition on the declaration scopes is a conservative check for
-        // nested functions that access a binding and are called before the
-        // binding is initialized:
-        //   function() { f(); let x = 1; function f() { x = 2; } }
-        //
-        bool skip_init_check;
-        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
-          skip_init_check = false;
-        } else {
-          // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
-          skip_init_check = var->mode() != CONST &&
-              var->initializer_position() < proxy->position();
+      if (var->mode() != Variable::LET && var->mode() != Variable::CONST) {
+        context()->Plug(var);
+      } else {
+        // Let and const need a read barrier.
+        Label done;
+        GetVar(rax, var);
+        __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+        __ j(not_equal, &done, Label::kNear);
+        if (var->mode() == Variable::LET) {
+          __ Push(var->name());
+          __ CallRuntime(Runtime::kThrowReferenceError, 1);
+        } else {  // Variable::CONST
+          __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
         }
-
-        if (!skip_init_check) {
-          // Let and const need a read barrier.
-          Label done;
-          GetVar(rax, var);
-          __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-          __ j(not_equal, &done, Label::kNear);
-          if (var->mode() == LET || var->mode() == CONST_HARMONY) {
-            // Throw a reference error when using an uninitialized let/const
-            // binding in harmony mode.
-            __ Push(var->name());
-            __ CallRuntime(Runtime::kThrowReferenceError, 1);
-          } else {
-            // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST);
-            __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-          }
-          __ bind(&done);
-          context()->Plug(rax);
-          break;
-        }
+        __ bind(&done);
+        context()->Plug(rax);
       }
-      context()->Plug(var);
       break;
     }
 
@@ -1377,22 +1300,12 @@
 }
 
 
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
-  if (expression == NULL) {
-    __ PushRoot(Heap::kNullValueRootIndex);
-  } else {
-    VisitForStackValue(expression);
-  }
-}
-
-
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
-  Handle<FixedArray> constant_properties = expr->constant_properties();
   __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
-  __ Push(constant_properties);
+  __ Push(expr->constant_properties());
   int flags = expr->fast_elements()
       ? ObjectLiteral::kFastElements
       : ObjectLiteral::kNoFlags;
@@ -1400,15 +1313,10 @@
       ? ObjectLiteral::kHasFunction
       : ObjectLiteral::kNoFlags;
   __ Push(Smi::FromInt(flags));
-  int properties_count = constant_properties->length() / 2;
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    __ CallStub(&stub);
+    __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
   }
 
   // If result_saved is true the result is on top of the stack.  If
@@ -1420,7 +1328,6 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore();
 
-  AccessorTable accessor_table(isolate()->zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
@@ -1443,9 +1350,9 @@
             VisitForAccumulatorValue(value);
             __ Move(rcx, key->handle());
             __ movq(rdx, Operand(rsp, 0));
-            Handle<Code> ic = is_classic_mode()
-                ? isolate()->builtins()->StoreIC_Initialize()
-                : isolate()->builtins()->StoreIC_Initialize_Strict();
+            Handle<Code> ic = is_strict_mode()
+                ? isolate()->builtins()->StoreIC_Initialize_Strict()
+                : isolate()->builtins()->StoreIC_Initialize();
             __ call(ic, RelocInfo::CODE_TARGET, key->id());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1465,28 +1372,19 @@
           __ Drop(3);
         }
         break;
-      case ObjectLiteral::Property::GETTER:
-        accessor_table.lookup(key)->second->getter = value;
-        break;
       case ObjectLiteral::Property::SETTER:
-        accessor_table.lookup(key)->second->setter = value;
+      case ObjectLiteral::Property::GETTER:
+        __ push(Operand(rsp, 0));  // Duplicate receiver.
+        VisitForStackValue(key);
+        __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
+                Smi::FromInt(1) :
+                Smi::FromInt(0));
+        VisitForStackValue(value);
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
         break;
     }
   }
 
-  // Emit code to define accessors, using only a single call to the runtime for
-  // each pair of corresponding getters and setters.
-  for (AccessorTable::Iterator it = accessor_table.begin();
-       it != accessor_table.end();
-       ++it) {
-    __ push(Operand(rsp, 0));  // Duplicate receiver.
-    VisitForStackValue(it->first);
-    EmitAccessor(it->second->getter);
-    EmitAccessor(it->second->setter);
-    __ Push(Smi::FromInt(NONE));
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
-  }
-
   if (expr->has_function()) {
     ASSERT(result_saved);
     __ push(Operand(rsp, 0));
@@ -1506,42 +1404,24 @@
 
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
-  ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
-  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
 
   __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
-  __ Push(constant_elements);
-  Heap* heap = isolate()->heap();
-  if (has_constant_fast_elements &&
-      constant_elements_values->map() == heap->fixed_cow_array_map()) {
-    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
-    // change, so it's possible to specialize the stub in advance.
-    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+  __ Push(expr->constant_elements());
+  if (expr->constant_elements()->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
     FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
-        length);
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
     __ CallStub(&stub);
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
   } else {
-    ASSERT(constant_elements_kind == FAST_ELEMENTS ||
-           constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
-           FLAG_smi_only_arrays);
-    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
-    // change, so it's possible to specialize the stub in advance.
-    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
-        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
-    FastCloneShallowArrayStub stub(mode, length);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::CLONE_ELEMENTS, length);
     __ CallStub(&stub);
   }
 
@@ -1564,28 +1444,14 @@
     }
     VisitForAccumulatorValue(subexpr);
 
-    if (constant_elements_kind == FAST_ELEMENTS) {
-      // Fast-case array literal with ElementsKind of FAST_ELEMENTS, they cannot
-      // transition and don't need to call the runtime stub.
-      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-      __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
-      __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
-      // Store the subexpression value in the array's elements.
-      __ movq(FieldOperand(rbx, offset), result_register());
-      // Update the write barrier for the array store.
-      __ RecordWriteField(rbx, offset, result_register(), rcx,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          INLINE_SMI_CHECK);
-    } else {
-      // Store the subexpression value in the array's elements.
-      __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
-      __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
-      __ Move(rcx, Smi::FromInt(i));
-      __ Move(rdx, Smi::FromInt(expr->literal_index()));
-      StoreArrayLiteralElementStub stub;
-      __ CallStub(&stub);
-    }
+    // Store the subexpression value in the array's elements.
+    __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
+    __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ movq(FieldOperand(rbx, offset), result_register());
+
+    // Update the write barrier for the array store.
+    __ RecordWrite(rbx, offset, result_register(), rcx);
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1716,14 +1582,14 @@
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET, prop->id());
+  __ call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
 }
 
 
@@ -1800,7 +1666,7 @@
 }
 
 
-void FullCodeGenerator::EmitAssignment(Expression* expr) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
   // Invalid left-hand sides are rewritten to have a 'throw
   // ReferenceError' on the left-hand side.
   if (!expr->IsValidLeftHandSide()) {
@@ -1832,9 +1698,9 @@
       __ movq(rdx, rax);
       __ pop(rax);  // Restore value.
       __ Move(rcx, prop->key()->AsLiteral()->handle());
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
       __ call(ic);
       break;
     }
@@ -1845,13 +1711,14 @@
       __ movq(rcx, rax);
       __ pop(rdx);
       __ pop(rax);  // Restore value.
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
       __ call(ic);
       break;
     }
   }
+  PrepareForBailoutForId(bailout_ast_id, TOS_REG);
   context()->Plug(rax);
 }
 
@@ -1862,9 +1729,9 @@
     // Global var, const, or let.
     __ Move(rcx, var->name());
     __ movq(rdx, GlobalObjectOperand());
-    Handle<Code> ic = is_classic_mode()
-        ? isolate()->builtins()->StoreIC_Initialize()
-        : isolate()->builtins()->StoreIC_Initialize_Strict();
+    Handle<Code> ic = is_strict_mode()
+        ? isolate()->builtins()->StoreIC_Initialize_Strict()
+        : isolate()->builtins()->StoreIC_Initialize();
     __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
   } else if (op == Token::INIT_CONST) {
     // Const initializers need a write barrier.
@@ -1889,13 +1756,13 @@
       __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
     }
 
-  } else if (var->mode() == LET && op != Token::INIT_LET) {
+  } else if (var->mode() == Variable::LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
     if (var->IsLookupSlot()) {
       __ push(rax);  // Value.
       __ push(rsi);  // Context.
       __ Push(var->name());
-      __ Push(Smi::FromInt(language_mode()));
+      __ Push(Smi::FromInt(strict_mode_flag()));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     } else {
       ASSERT(var->IsStackAllocated() || var->IsContextSlot());
@@ -1910,14 +1777,12 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWriteContextSlot(
-            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
       }
     }
 
-  } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
+  } else if (var->mode() != Variable::CONST) {
+    // Assignment to var or initializing assignment to let.
     if (var->IsStackAllocated() || var->IsContextSlot()) {
       MemOperand location = VarOperand(var, rcx);
       if (FLAG_debug_code && op == Token::INIT_LET) {
@@ -1930,15 +1795,14 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWriteContextSlot(
-            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
       }
     } else {
       ASSERT(var->IsLookupSlot());
       __ push(rax);  // Value.
       __ push(rsi);  // Context.
       __ Push(var->name());
-      __ Push(Smi::FromInt(language_mode()));
+      __ Push(Smi::FromInt(strict_mode_flag()));
       __ CallRuntime(Runtime::kStoreContextSlot, 4);
     }
   }
@@ -1970,9 +1834,9 @@
   } else {
     __ pop(rdx);
   }
-  Handle<Code> ic = is_classic_mode()
-      ? isolate()->builtins()->StoreIC_Initialize()
-      : isolate()->builtins()->StoreIC_Initialize_Strict();
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->StoreIC_Initialize_Strict()
+      : isolate()->builtins()->StoreIC_Initialize();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -2010,9 +1874,9 @@
   }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = is_classic_mode()
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  Handle<Code> ic = is_strict_mode()
+      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+      : isolate()->builtins()->KeyedStoreIC_Initialize();
   __ call(ic, RelocInfo::CODE_TARGET, expr->id());
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -2117,7 +1981,6 @@
   // Record source position for debugger.
   SetSourcePosition(expr->position());
   CallFunctionStub stub(arg_count, flags);
-  __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   __ CallStub(&stub);
   RecordJSReturnSite(expr);
   // Restore context register.
@@ -2127,7 +1990,8 @@
 }
 
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
+                                                      int arg_count) {
   // Push copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
     __ push(Operand(rsp, arg_count * kPointerSize));
@@ -2138,14 +2002,17 @@
   // Push the receiver of the enclosing function and do runtime call.
   __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize));
 
-  // Push the language mode.
-  __ Push(Smi::FromInt(language_mode()));
+  // Push the strict mode flag. In harmony mode every eval call
+  // is a strict mode eval call.
+  StrictModeFlag strict_mode = strict_mode_flag();
+  if (FLAG_harmony_block_scoping) {
+    strict_mode = kStrictMode;
+  }
+  __ Push(Smi::FromInt(strict_mode));
 
-  // Push the start position of the scope the calls resides in.
-  __ Push(Smi::FromInt(scope()->start_position()));
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+  __ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
+                 ? Runtime::kResolvePossiblyDirectEvalNoLookup
+                 : Runtime::kResolvePossiblyDirectEval, 4);
 }
 
 
@@ -2176,10 +2043,27 @@
         VisitForStackValue(args->at(i));
       }
 
+      // If we know that eval can only be shadowed by eval-introduced
+      // variables we attempt to load the global eval function directly in
+      // generated code. If we succeed, there is no need to perform a
+      // context lookup in the runtime system.
+      Label done;
+      Variable* var = proxy->var();
+      if (!var->IsUnallocated() && var->mode() == Variable::DYNAMIC_GLOBAL) {
+        Label slow;
+        EmitLoadGlobalCheckExtensions(var, NOT_INSIDE_TYPEOF, &slow);
+        // Push the function and resolve eval.
+        __ push(rax);
+        EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
+        __ jmp(&done);
+        __ bind(&slow);
+      }
+
       // Push a copy of the function (found below the arguments) and resolve
       // eval.
       __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
-      EmitResolvePossiblyDirectEval(arg_count);
+      EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
+      __ bind(&done);
 
       // The runtime call returns a pair of values in rax (function) and
       // rdx (receiver). Touch up the stack with the right values.
@@ -2189,7 +2073,6 @@
     // Record source position for debugger.
     SetSourcePosition(expr->position());
     CallFunctionStub stub(arg_count, RECEIVER_MIGHT_BE_IMPLICIT);
-    __ movq(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
     __ CallStub(&stub);
     RecordJSReturnSite(expr);
     // Restore context register.
@@ -2292,29 +2175,14 @@
   __ Set(rax, arg_count);
   __ movq(rdi, Operand(rsp, arg_count * kPointerSize));
 
-  // Record call targets in unoptimized code, but not in the snapshot.
-  CallFunctionFlags flags;
-  if (!Serializer::enabled()) {
-    flags = RECORD_CALL_TARGET;
-    Handle<Object> uninitialized =
-        TypeFeedbackCells::UninitializedSentinel(isolate());
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
-    RecordTypeFeedbackCell(expr->id(), cell);
-    __ Move(rbx, cell);
-  } else {
-    flags = NO_CALL_FUNCTION_FLAGS;
-  }
-
-  CallConstructStub stub(flags);
-  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+  Handle<Code> construct_builtin =
+      isolate()->builtins()->JSConstructCall();
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2326,7 +2194,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ JumpIfSmi(rax, if_true);
   __ jmp(if_false);
 
@@ -2334,8 +2202,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2347,7 +2214,7 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
   Split(non_negative_smi, if_true, if_false, fall_through);
 
@@ -2355,8 +2222,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2380,15 +2246,14 @@
   __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   __ j(below, if_false);
   __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(below_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2402,15 +2267,14 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(above_equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2426,7 +2290,7 @@
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsUndetectable));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(not_zero, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2434,8 +2298,7 @@
 
 
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
-    CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+    ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2511,13 +2374,12 @@
          Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
   __ jmp(if_true);
 
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2531,15 +2393,14 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2553,15 +2414,14 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2575,7 +2435,7 @@
 
   __ JumpIfSmi(rax, if_false);
   __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
@@ -2583,8 +2443,8 @@
 
 
 
-void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2607,15 +2467,14 @@
   __ bind(&check_frame_marker);
   __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
          Smi::FromInt(StackFrame::CONSTRUCT));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
@@ -2631,15 +2490,14 @@
 
   __ pop(rbx);
   __ cmpq(rax, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
 
 
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in rdx and the formal
@@ -2653,8 +2511,8 @@
 }
 
 
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -2676,8 +2534,7 @@
 }
 
 
-void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
@@ -2688,24 +2545,20 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  // Assume that there are only two callable types, and one of them is at
-  // either end of the type range for JS object types. Saves extra comparisons.
-  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
   // Map is now in rax.
   __ j(below, &null);
-  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                FIRST_SPEC_OBJECT_TYPE + 1);
-  __ j(equal, &function);
 
-  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_SPEC_OBJECT_TYPE - 1);
-  __ j(equal, &function);
-  // Assume that there is no larger type.
-  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+  __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
+  __ j(above_equal, &function);
 
-  // Check if the constructor in the map is a JS function.
+  // Check if the constructor in the map is a function.
   __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
   __ j(not_equal, &non_function_constructor);
@@ -2737,7 +2590,7 @@
 }
 
 
-void FullCodeGenerator::EmitLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
   // Conditionally generate a log call.
   // Args:
   //   0 (literal string): The type of logging (corresponds to the flags).
@@ -2745,7 +2598,6 @@
   //   1 (string): Format string.  Access the string at argument index 2
   //     with '%2s' (see Logger::LogRuntime for all the formats).
   //   2 (array): Arguments to the format string.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT_EQ(args->length(), 3);
   if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
     VisitForStackValue(args->at(1));
@@ -2758,8 +2610,8 @@
 }
 
 
-void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
 
   Label slow_allocate_heapnumber;
   Label heapnumber_allocated;
@@ -2778,12 +2630,9 @@
   // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
   __ PrepareCallCFunction(1);
 #ifdef _WIN64
-  __ movq(rcx, ContextOperand(context_register(), Context::GLOBAL_INDEX));
-  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
-
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
 #else
-  __ movq(rdi, ContextOperand(context_register(), Context::GLOBAL_INDEX));
-  __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
 #endif
   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
 
@@ -2803,10 +2652,9 @@
 }
 
 
-void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   SubStringStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2816,10 +2664,9 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -2830,8 +2677,7 @@
 }
 
 
-void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -2849,68 +2695,18 @@
 }
 
 
-void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
-  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->handle()));
-
-  VisitForAccumulatorValue(args->at(0));  // Load the object.
-
-  Label runtime, done;
-  Register object = rax;
-  Register result = rax;
-  Register scratch = rcx;
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  __ Assert(equal, "Trying to get date field from non-date.");
-#endif
-
-  if (index->value() == 0) {
-    __ movq(result, FieldOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ movq(scratch, stamp);
-      __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
-      __ j(not_equal, &runtime, Label::kNear);
-      __ movq(result, FieldOperand(object, JSDate::kValueOffset +
-                                           kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2);
-#ifdef _WIN64
-  __ movq(rcx, object);
-  __ movq(rdx, index, RelocInfo::NONE);
-#else
-  __ movq(rdi, object);
-  __ movq(rsi, index, RelocInfo::NONE);
-#endif
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    __ bind(&done);
-  }
-  context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
   // Load the arguments on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
-  MathPowStub stub(MathPowStub::ON_STACK);
+  MathPowStub stub;
   __ CallStub(&stub);
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
@@ -2930,15 +2726,14 @@
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ movq(rdx, rax);
-  __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
+  __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
 
   __ bind(&done);
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
   ASSERT_EQ(args->length(), 1);
 
   // Load the argument on the stack and call the stub.
@@ -2950,8 +2745,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -2969,8 +2763,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -2978,6 +2771,7 @@
 
   Register object = rbx;
   Register index = rax;
+  Register scratch = rcx;
   Register result = rdx;
 
   __ pop(object);
@@ -2987,6 +2781,7 @@
   Label done;
   StringCharCodeAtGenerator generator(object,
                                       index,
+                                      scratch,
                                       result,
                                       &need_conversion,
                                       &need_conversion,
@@ -3015,8 +2810,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 2);
 
   VisitForStackValue(args->at(0));
@@ -3024,7 +2818,8 @@
 
   Register object = rbx;
   Register index = rax;
-  Register scratch = rdx;
+  Register scratch1 = rcx;
+  Register scratch2 = rdx;
   Register result = rax;
 
   __ pop(object);
@@ -3034,7 +2829,8 @@
   Label done;
   StringCharAtGenerator generator(object,
                                   index,
-                                  scratch,
+                                  scratch1,
+                                  scratch2,
                                   result,
                                   &need_conversion,
                                   &need_conversion,
@@ -3063,8 +2859,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3076,8 +2871,7 @@
 }
 
 
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
@@ -3089,11 +2883,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathSin(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::SIN,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3101,11 +2894,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathCos(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::COS,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3113,23 +2905,10 @@
 }
 
 
-void FullCodeGenerator::EmitMathTan(CallRuntime* expr) {
-  // Load the argument on the stack and call the stub.
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
-  VisitForStackValue(args->at(0));
-  __ CallStub(&stub);
-  context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitMathLog(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the stub.
   TranscendentalCacheStub stub(TranscendentalCache::LOG,
                                TranscendentalCacheStub::TAGGED);
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallStub(&stub);
@@ -3137,9 +2916,8 @@
 }
 
 
-void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) {
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
   // Load the argument on the stack and call the runtime function.
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 1);
   VisitForStackValue(args->at(0));
   __ CallRuntime(Runtime::kMath_sqrt, 1);
@@ -3147,8 +2925,7 @@
 }
 
 
-void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
   ASSERT(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
@@ -3157,31 +2934,18 @@
   }
   VisitForAccumulatorValue(args->last());  // Function.
 
-  // Check for proxy.
-  Label proxy, done;
-  __ CmpObjectType(rax, JS_FUNCTION_PROXY_TYPE, rbx);
-  __ j(equal, &proxy);
-
   // InvokeFunction requires the function in rdi. Move it in there.
   __ movq(rdi, result_register());
   ParameterCount count(arg_count);
   __ InvokeFunction(rdi, count, CALL_FUNCTION,
                     NullCallWrapper(), CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ jmp(&done);
-
-  __ bind(&proxy);
-  __ push(rax);
-  __ CallRuntime(Runtime::kCall, args->length());
-  __ bind(&done);
-
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
   RegExpConstructResultStub stub;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3191,8 +2955,7 @@
 }
 
 
-void FullCodeGenerator::EmitSwapElements(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3247,33 +3010,14 @@
   __ movq(Operand(index_2, 0), object);
   __ movq(Operand(index_1, 0), temp);
 
-  Label no_remembered_set;
-  __ CheckPageFlag(elements,
-                   temp,
-                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
-                   not_zero,
-                   &no_remembered_set,
-                   Label::kNear);
-  // Possible optimization: do a check that both values are Smis
-  // (or them and test against Smi mask.)
+  Label new_space;
+  __ InNewSpace(elements, temp, equal, &new_space);
 
-  // We are swapping two objects in an array and the incremental marker never
-  // pauses in the middle of scanning a single object.  Therefore the
-  // incremental marker is not disturbed, so we don't need to call the
-  // RecordWrite stub that notifies the incremental marker.
-  __ RememberedSetHelper(elements,
-                         index_1,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
-  __ RememberedSetHelper(elements,
-                         index_2,
-                         temp,
-                         kDontSaveFPRegs,
-                         MacroAssembler::kFallThroughAtEnd);
+  __ movq(object, elements);
+  __ RecordWriteHelper(object, index_1, temp);
+  __ RecordWriteHelper(elements, index_2, temp);
 
-  __ bind(&no_remembered_set);
-
+  __ bind(&new_space);
   // We are done. Drop elements from the stack, and return undefined.
   __ addq(rsp, Immediate(3 * kPointerSize));
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3287,8 +3031,7 @@
 }
 
 
-void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   ASSERT_NE(NULL, args->at(0)->AsLiteral());
@@ -3344,8 +3087,7 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExpEquivalent(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
   ASSERT_EQ(2, args->length());
 
   Register right = rax;
@@ -3383,8 +3125,7 @@
 }
 
 
-void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
@@ -3398,7 +3139,7 @@
 
   __ testl(FieldOperand(rax, String::kHashFieldOffset),
            Immediate(String::kContainsCachedArrayIndexMask));
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   __ j(zero, if_true);
   __ jmp(if_false);
 
@@ -3406,8 +3147,7 @@
 }
 
 
-void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
@@ -3423,11 +3163,10 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
   Label bailout, return_result, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-  ZoneList<Expression*>* args = expr->arguments();
   ASSERT(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
@@ -3613,7 +3352,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Get the separator ASCII character value.
+  // Get the separator ascii character value.
   // Register "string" holds the separator.
   __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ Set(index, 0);
@@ -3757,16 +3496,14 @@
       if (property != NULL) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
-            ? kNonStrictMode : kStrictMode;
-        __ Push(Smi::FromInt(strict_mode_flag));
+        __ Push(Smi::FromInt(strict_mode_flag()));
         __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
         context()->Plug(rax);
       } else if (proxy != NULL) {
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+        ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ Push(var->name());
@@ -3808,41 +3545,17 @@
         // Unary NOT has no side effects so it's only necessary to visit the
         // subexpression.  Match the optimizing compiler by not branching.
         VisitForEffect(expr->expression());
-      } else if (context()->IsTest()) {
-        const TestContext* test = TestContext::cast(context());
-        // The labels are swapped for the recursive call.
-        VisitForControl(expr->expression(),
-                        test->false_label(),
-                        test->true_label(),
-                        test->fall_through());
-        context()->Plug(test->true_label(), test->false_label());
       } else {
-        // We handle value contexts explicitly rather than simply visiting
-        // for control and plugging the control flow into the context,
-        // because we need to prepare a pair of extra administrative AST ids
-        // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
-        Label materialize_true, materialize_false, done;
-        VisitForControl(expr->expression(),
-                        &materialize_false,
-                        &materialize_true,
-                        &materialize_true);
-        __ bind(&materialize_true);
-        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
-        if (context()->IsAccumulatorValue()) {
-          __ LoadRoot(rax, Heap::kTrueValueRootIndex);
-        } else {
-          __ PushRoot(Heap::kTrueValueRootIndex);
-        }
-        __ jmp(&done, Label::kNear);
-        __ bind(&materialize_false);
-        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
-        if (context()->IsAccumulatorValue()) {
-          __ LoadRoot(rax, Heap::kFalseValueRootIndex);
-        } else {
-          __ PushRoot(Heap::kFalseValueRootIndex);
-        }
-        __ bind(&done);
+        Label materialize_true, materialize_false;
+        Label* if_true = NULL;
+        Label* if_false = NULL;
+        Label* fall_through = NULL;
+        // Notice that the labels are swapped.
+        context()->PrepareTest(&materialize_true, &materialize_false,
+                               &if_false, &if_true, &fall_through);
+        if (context()->IsTest()) ForwardBailoutToChild(expr);
+        VisitForControl(expr->expression(), if_true, if_false, fall_through);
+        context()->Plug(if_false, if_true);  // Labels swapped.
       }
       break;
     }
@@ -4047,9 +3760,9 @@
     case NAMED_PROPERTY: {
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       __ pop(rdx);
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->StoreIC_Initialize()
-          : isolate()->builtins()->StoreIC_Initialize_Strict();
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->StoreIC_Initialize_Strict()
+          : isolate()->builtins()->StoreIC_Initialize();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4064,9 +3777,9 @@
     case KEYED_PROPERTY: {
       __ pop(rcx);
       __ pop(rdx);
-      Handle<Code> ic = is_classic_mode()
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      Handle<Code> ic = is_strict_mode()
+          ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
+          : isolate()->builtins()->KeyedStoreIC_Initialize();
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4114,25 +3827,20 @@
     context()->Plug(rax);
   } else {
     // This expression cannot throw a reference error at the top level.
-    VisitInDuplicateContext(expr);
+    VisitInCurrentContext(expr);
   }
 }
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Expression* sub_expr,
-                                                 Handle<String> check) {
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
+                                                 Handle<String> check,
+                                                 Label* if_true,
+                                                 Label* if_false,
+                                                 Label* fall_through) {
   { AccumulatorValueContext context(this);
-    VisitForTypeofValue(sub_expr);
+    VisitForTypeofValue(expr);
   }
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
 
   if (check->Equals(isolate()->heap()->number_symbol())) {
     __ JumpIfSmi(rax, if_true);
@@ -4167,11 +3875,9 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(rax, if_false);
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
-    __ j(equal, if_true);
-    __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
-    Split(equal, if_true, if_false, fall_through);
+    STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
+    Split(above_equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(rax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -4189,7 +3895,18 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+                                                    Label* if_true,
+                                                    Label* if_false,
+                                                    Label* fall_through) {
+  VisitForAccumulatorValue(expr);
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  Split(equal, if_true, if_false, fall_through);
 }
 
 
@@ -4197,10 +3914,6 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr)) return;
-
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
   Label materialize_true, materialize_false;
@@ -4210,13 +3923,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
+    context()->Plug(if_true, if_false);
+    return;
+  }
+
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
-      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
       break;
@@ -4225,7 +3945,7 @@
       VisitForStackValue(expr->right());
       InstanceofStub stub(InstanceofStub::kNoFlags);
       __ CallStub(&stub);
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ testq(rax, rax);
        // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
@@ -4239,25 +3959,33 @@
         case Token::EQ_STRICT:
         case Token::EQ:
           cc = equal;
+          __ pop(rdx);
           break;
         case Token::LT:
           cc = less;
+          __ pop(rdx);
           break;
         case Token::GT:
-          cc = greater;
+          // Reverse left and right sizes to obtain ECMA-262 conversion order.
+          cc = less;
+          __ movq(rdx, result_register());
+          __ pop(rax);
          break;
         case Token::LTE:
-          cc = less_equal;
+          // Reverse left and right sizes to obtain ECMA-262 conversion order.
+          cc = greater_equal;
+          __ movq(rdx, result_register());
+          __ pop(rax);
           break;
         case Token::GTE:
           cc = greater_equal;
+          __ pop(rdx);
           break;
         case Token::IN:
         case Token::INSTANCEOF:
         default:
           UNREACHABLE();
       }
-      __ pop(rdx);
 
       bool inline_smi_code = ShouldInlineSmiCase(op);
       JumpPatchSite patch_site(masm_);
@@ -4277,7 +4005,7 @@
       __ call(ic, RelocInfo::CODE_TARGET, expr->id());
       patch_site.EmitPatchInfo();
 
-      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
       __ testq(rax, rax);
       Split(cc, if_true, if_false, fall_through);
     }
@@ -4289,9 +4017,8 @@
 }
 
 
-void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
-                                              Expression* sub_expr,
-                                              NilValue nil) {
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+  Comment cmnt(masm_, "[ CompareToNull");
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4299,20 +4026,14 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(sub_expr);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Heap::RootListIndex nil_value = nil == kNullValue ?
-      Heap::kNullValueRootIndex :
-      Heap::kUndefinedValueRootIndex;
-  __ CompareRoot(rax, nil_value);
-  if (expr->op() == Token::EQ_STRICT) {
+  VisitForAccumulatorValue(expr->expression());
+  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  __ CompareRoot(rax, Heap::kNullValueRootIndex);
+  if (expr->is_strict()) {
     Split(equal, if_true, if_false, fall_through);
   } else {
-    Heap::RootListIndex other_nil_value = nil == kNullValue ?
-        Heap::kUndefinedValueRootIndex :
-        Heap::kNullValueRootIndex;
     __ j(equal, if_true);
-    __ CompareRoot(rax, other_nil_value);
+    __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
     __ j(equal, if_true);
     __ JumpIfSmi(rax, if_false);
     // It can be an undetectable object.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 0632ce4..9d55594 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -221,7 +221,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ movq(scratch0, value);
-  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
+  __ RecordWrite(elements, scratch1, scratch0);
 }
 
 
@@ -462,58 +462,30 @@
   __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
   __ shr(rdi, Immediate(String::kHashShift));
   __ xor_(rcx, rdi);
-  int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-  __ and_(rcx, Immediate(mask));
+  __ and_(rcx, Immediate(KeyedLookupCache::kCapacityMask));
 
   // Load the key (consisting of map and symbol) from the cache and
   // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
   ExternalReference cache_keys
       = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ movq(rdi, rcx);
-    __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
-    __ LoadAddress(kScratchRegister, cache_keys);
-    int off = kPointerSize * i * 2;
-    __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
-    __ j(not_equal, &try_next_entry);
-    __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
-    __ j(equal, &hit_on_nth_entry[i]);
-    __ bind(&try_next_entry);
-  }
-
-  int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
-  __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+  __ movq(rdi, rcx);
+  __ shl(rdi, Immediate(kPointerSizeLog2 + 1));
+  __ LoadAddress(kScratchRegister, cache_keys);
+  __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, 0));
   __ j(not_equal, &slow);
-  __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+  __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, kPointerSize));
   __ j(not_equal, &slow);
 
   // Get field offset, which is a 32-bit integer.
   ExternalReference cache_field_offsets
       = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    if (i != 0) {
-      __ addl(rcx, Immediate(i));
-    }
-    __ LoadAddress(kScratchRegister, cache_field_offsets);
-    __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
-    __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
-    __ subq(rdi, rcx);
-    __ j(above_equal, &property_array_property);
-    if (i != 0) {
-      __ jmp(&load_in_object_property);
-    }
-  }
+  __ LoadAddress(kScratchRegister, cache_field_offsets);
+  __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
+  __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+  __ subq(rdi, rcx);
+  __ j(above_equal, &property_array_property);
 
   // Load in-object property.
-  __ bind(&load_in_object_property);
   __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
   __ addq(rcx, rdi);
   __ movq(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
@@ -559,12 +531,14 @@
 
   Register receiver = rdx;
   Register index = rax;
-  Register scratch = rcx;
+  Register scratch1 = rbx;
+  Register scratch2 = rcx;
   Register result = rax;
 
   StringCharAtGenerator char_at_generator(receiver,
                                           index,
-                                          scratch,
+                                          scratch1,
+                                          scratch2,
                                           result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -632,42 +606,45 @@
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
-  Label fast_object_with_map_check, fast_object_without_map_check;
-  Label fast_double_with_map_check, fast_double_without_map_check;
-  Label transition_smi_elements, finish_object_store, non_double_value;
-  Label transition_double_elements;
+  Label slow, slow_with_tagged_index, fast, array, extra;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow_with_tagged_index);
   // Get the map from the receiver.
-  __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
+  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow_with_tagged_index);
   // Check that the key is a smi.
   __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
   __ SmiToInteger32(rcx, rcx);
 
-  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
+  __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
+  __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
   __ j(below, &slow);
+  __ CmpInstanceType(rbx, JS_PROXY_TYPE);
+  __ j(equal, &slow);
+  __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
+  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  // Check array bounds.
+  // Check that the object is in fast mode and writable.
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &slow);
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   // rax: value
   // rbx: FixedArray
   // rcx: index
-  __ j(above, &fast_object_with_map_check);
+  __ j(above, &fast);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -689,20 +666,9 @@
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   __ j(below_equal, &slow);
   // Increment index to get new length.
-  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &check_extra_double);
   __ leal(rdi, Operand(rcx, 1));
   __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  __ jmp(&fast_object_without_map_check);
-
-  __ bind(&check_extra_double);
-  // rdi: elements array's map
-  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ j(not_equal, &slow);
-  __ leal(rdi, Operand(rcx, 1));
-  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  __ jmp(&fast_double_without_map_check);
+  __ jmp(&fast);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -712,6 +678,9 @@
   // rdx: receiver (a JSArray)
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &slow);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
@@ -719,100 +688,30 @@
   __ j(below_equal, &extra);
 
   // Fast case: Do the store.
-  __ bind(&fast_object_with_map_check);
+  __ bind(&fast);
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
-  // rdx: receiver (a JSArray)
-  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &fast_double_with_map_check);
-  __ bind(&fast_object_without_map_check);
-  // Smi stores don't require further checks.
   Label non_smi_value;
-  __ JumpIfNotSmi(rax, &non_smi_value);
-  // It's irrelevant whether array is smi-only or not when writing a smi.
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
+  __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
   __ ret(0);
-
   __ bind(&non_smi_value);
-  // Writing a non-smi, check whether array allows non-smi elements.
-  // r9: receiver's map
-  __ CheckFastObjectElements(r9, &transition_smi_elements);
-  __ bind(&finish_object_store);
-  __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ movq(rdx, rax);  // Preserve the value which is returned.
-  __ RecordWriteArray(
-      rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  // Slow case that needs to retain rcx for use by RecordWrite.
+  // Update write barrier for the elements array address.
+  __ movq(rdx, rax);
+  __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
   __ ret(0);
-
-  __ bind(&fast_double_with_map_check);
-  // Check for fast double array case. If this fails, call through to the
-  // runtime.
-  // rdi: elements array's map
-  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ j(not_equal, &slow);
-  __ bind(&fast_double_without_map_check);
-  // If the value is a number, store it as a double in the FastDoubleElements
-  // array.
-  __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
-                                 &transition_double_elements);
-  __ ret(0);
-
-  __ bind(&transition_smi_elements);
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ movq(r9, FieldOperand(rax, HeapObject::kMapOffset));
-  __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         rbx,
-                                         rdi,
-                                         &slow);
-  ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         rbx,
-                                         rdi,
-                                         &slow);
-  ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         rbx,
-                                         rdi,
-                                         &slow);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
-  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
 }
 
 
 // The generated code does not accept smi keys.
 // The generated code falls through if both probes miss.
-void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
-                                               int argc,
-                                               Code::Kind kind,
-                                               Code::ExtraICState extra_state) {
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind,
+                                          Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rdx                      : receiver
@@ -822,7 +721,7 @@
   // Probe the stub cache.
   Code::Flags flags = Code::ComputeFlags(kind,
                                          MONOMORPHIC,
-                                         extra_state,
+                                         extra_ic_state,
                                          NORMAL,
                                          argc);
   Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rdx, rcx, rbx,
@@ -895,7 +794,7 @@
 
 
 // The generated code falls through if the call should be handled by runtime.
-void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   // rcx                    : function name
   // rsp[0]                 : return address
@@ -922,10 +821,10 @@
 }
 
 
-void CallICBase::GenerateMiss(MacroAssembler* masm,
-                              int argc,
-                              IC::UtilityId id,
-                              Code::ExtraICState extra_state) {
+static void GenerateCallMiss(MacroAssembler* masm,
+                             int argc,
+                             IC::UtilityId id,
+                             Code::ExtraICState extra_ic_state) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -947,22 +846,21 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   // Enter an internal frame.
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
+  __ EnterInternalFrame();
 
-    // Push the receiver and the name of the function.
-    __ push(rdx);
-    __ push(rcx);
+  // Push the receiver and the name of the function.
+  __ push(rdx);
+  __ push(rcx);
 
-    // Call the entry.
-    CEntryStub stub(1);
-    __ Set(rax, 2);
-    __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
-    __ CallStub(&stub);
+  // Call the entry.
+  CEntryStub stub(1);
+  __ Set(rax, 2);
+  __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
+  __ CallStub(&stub);
 
-    // Move result to rdi and exit the internal frame.
-    __ movq(rdi, rax);
-  }
+  // Move result to rdi and exit the internal frame.
+  __ movq(rdi, rax);
+  __ LeaveInternalFrame();
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -983,7 +881,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   ParameterCount actual(argc);
@@ -1015,6 +913,39 @@
 }
 
 
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc, Code::kNoExtraICState);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm,
+                          int argc,
+                          Code::ExtraICState extra_ic_state) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss, extra_ic_state);
+}
+
+
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   // rcx                      : function name
@@ -1071,14 +1002,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ push(rcx);  // save the key
-    __ push(rdx);  // pass the receiver
-    __ push(rcx);  // pass the key
-    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-    __ pop(rcx);  // restore the key
-  }
+  __ EnterInternalFrame();
+  __ push(rcx);  // save the key
+  __ push(rdx);  // pass the receiver
+  __ push(rcx);  // pass the key
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(rcx);  // restore the key
+  __ LeaveInternalFrame();
   __ movq(rdi, rax);
   __ jmp(&do_call);
 
@@ -1142,12 +1072,27 @@
   __ JumpIfSmi(rcx, &miss);
   Condition cond = masm->IsObjectStringType(rcx, rax, rax);
   __ j(NegateCondition(cond), &miss);
-  CallICBase::GenerateNormal(masm, argc);
+  GenerateCallNormal(masm, argc);
   __ bind(&miss);
   GenerateMiss(masm, argc);
 }
 
 
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss, Code::kNoExtraICState);
+}
+
+
 static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
                                              Register object,
                                              Register key,
@@ -1267,12 +1212,7 @@
   __ movq(mapped_location, rax);
   __ lea(r9, mapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx,
-                 r9,
-                 r8,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 INLINE_SMI_CHECK);
+  __ RecordWrite(rbx, r9, r8);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in rbx.
@@ -1281,12 +1221,7 @@
   __ movq(unmapped_location, rax);
   __ lea(r9, unmapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx,
-                 r9,
-                 r8,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 INLINE_SMI_CHECK);
+  __ RecordWrite(rbx, r9, r8);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -1473,10 +1408,11 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   //
-  // This accepts as a receiver anything JSArray::SetElementsLength accepts
-  // (currently anything except for external arrays which means anything with
-  // elements of FixedArray type).  Value must be a number, but only smis are
-  // accepted as the most common case.
+  // This accepts as a receiver anything JSObject::SetElementsLength accepts
+  // (currently anything except for external and pixel arrays which means
+  // anything with elements of FixedArray type.), but currently is restricted
+  // to JSArray.
+  // Value must be a number, but only smis are accepted as the most common case.
 
   Label miss;
 
@@ -1498,13 +1434,6 @@
   __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
   __ j(not_equal, &miss);
 
-  // Check that the array has fast properties, otherwise the length
-  // property might have been redefined.
-  __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
-  __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(equal, &miss);
-
   // Check that value is a smi.
   __ JumpIfNotSmi(value, &miss);
 
@@ -1633,51 +1562,6 @@
 }
 
 
-void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rbx     : target map
-  //  -- rdx     : receiver
-  //  -- rsp[0]  : return address
-  // -----------------------------------
-  // Must return the modified receiver in eax.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &fail);
-    __ movq(rax, rdx);
-    __ Ret();
-    __ bind(&fail);
-  }
-
-  __ pop(rbx);
-  __ push(rdx);
-  __ push(rbx);  // return address
-  __ TailCallRuntime(Runtime::kTransitionElementsSmiToDouble, 1, 1);
-}
-
-
-void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rbx     : target map
-  //  -- rdx     : receiver
-  //  -- rsp[0]  : return address
-  // -----------------------------------
-  // Must return the modified receiver in eax.
-  if (!FLAG_trace_elements_transitions) {
-    Label fail;
-    ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
-    __ movq(rax, rdx);
-    __ Ret();
-    __ bind(&fail);
-  }
-
-  __ pop(rbx);
-  __ push(rdx);
-  __ push(rbx);  // return address
-  __ TailCallRuntime(Runtime::kTransitionElementsDoubleToObject, 1, 1);
-}
-
-
 #undef __
 
 
@@ -1689,9 +1573,11 @@
     case Token::LT:
       return less;
     case Token::GT:
-      return greater;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return less;
     case Token::LTE:
-      return less_equal;
+      // Reverse left and right operands to obtain ECMA-262 conversion order.
+      return greater_equal;
     case Token::GTE:
       return greater_equal;
     default:
@@ -1723,9 +1609,6 @@
     rewritten = stub.GetCode();
   } else {
     ICCompareStub stub(op_, state);
-    if (state == KNOWN_OBJECTS) {
-      stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
-    }
     rewritten = stub.GetCode();
   }
   set_target(*rewritten);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 2ba2c57..b82dc54 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -67,15 +67,9 @@
 #define __ masm()->
 
 bool LCodeGen::GenerateCode() {
-  HPhase phase("Z_Code generation", chunk());
+  HPhase phase("Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
-
-  // Open a frame scope to indicate that there is a frame on the stack.  The
-  // MANUAL indicates that the scope shouldn't actually generate code to set up
-  // the frame (that is done in GeneratePrologue).
-  FrameScope frame_scope(masm_, StackFrame::MANUAL);
-
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -139,7 +133,7 @@
   // when called as functions (without an explicit receiver
   // object). rcx is zero for method calls and non-zero for function
   // calls.
-  if (!info_->is_classic_mode() || info_->is_native()) {
+  if (info_->is_strict_mode() || info_->is_native()) {
     Label ok;
     __ testq(rcx, rcx);
     __ j(zero, &ok, Label::kNear);
@@ -211,8 +205,11 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier. This clobbers rax and rbx.
-        __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+        // Update the write barrier. This clobbers all involved
+        // registers, so we have use a third register to avoid
+        // clobbering rsi.
+        __ movq(rcx, rsi);
+        __ RecordWrite(rcx, context_offset, rax, rbx);
       }
     }
     Comment(";;; End allocate local context");
@@ -263,9 +260,6 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
-      Comment(";;; Deferred code @%d: %s.",
-              code->instruction_index(),
-              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -328,12 +322,6 @@
 }
 
 
-double LCodeGen::ToDouble(LConstantOperand* op) const {
-  Handle<Object> value = chunk_->LookupLiteral(op);
-  return value->Number();
-}
-
-
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   Handle<Object> literal = chunk_->LookupLiteral(op);
   ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
@@ -368,19 +356,7 @@
 
   WriteTranslation(environment->outer(), translation);
   int closure_id = DefineDeoptimizationLiteral(environment->closure());
-  switch (environment->frame_type()) {
-    case JS_FUNCTION:
-      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
-      break;
-    case JS_CONSTRUCT:
-      translation->BeginConstructStubFrame(closure_id, translation_size);
-      break;
-    case ARGUMENTS_ADAPTOR:
-      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
-      break;
-    default:
-      UNREACHABLE();
-  }
+  translation->BeginFrame(environment->ast_id(), closure_id, height);
   for (int i = 0; i < translation_size; ++i) {
     LOperand* value = environment->values()->at(i);
     // spilled_registers_ and spilled_double_registers_ are either
@@ -516,14 +492,10 @@
     // |>------------  translation_size ------------<|
 
     int frame_count = 0;
-    int jsframe_count = 0;
     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
       ++frame_count;
-      if (e->frame_type() == JS_FUNCTION) {
-        ++jsframe_count;
-      }
     }
-    Translation translation(&translations_, frame_count, jsframe_count);
+    Translation translation(&translations_, frame_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
     int pc_offset = masm()->pc_offset();
@@ -540,6 +512,7 @@
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+  ASSERT(entry != NULL);
   if (entry == NULL) {
     Abort("bailout was not prepared");
     return;
@@ -562,6 +535,7 @@
 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   int length = deoptimizations_.length();
   if (length == 0) return;
+  ASSERT(FLAG_deopt);
   Handle<DeoptimizationInputData> data =
       factory()->NewDeoptimizationInputData(length, TENURED);
 
@@ -637,7 +611,7 @@
     Safepoint::DeoptMode deopt_mode) {
   ASSERT(kind == expected_safepoint_kind_);
 
-  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  const ZoneList<LOperand*>* operands = pointers->operands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deopt_mode);
@@ -999,11 +973,11 @@
         DeoptimizeIf(no_condition, instr->environment());
       }
     } else if (right->IsStackSlot()) {
-      __ orl(kScratchRegister, ToOperand(right));
+      __ or_(kScratchRegister, ToOperand(right));
       DeoptimizeIf(sign, instr->environment());
     } else {
       // Test the non-zero operand for negative sign.
-      __ orl(kScratchRegister, ToRegister(right));
+      __ or_(kScratchRegister, ToRegister(right));
       DeoptimizeIf(sign, instr->environment());
     }
     __ bind(&done);
@@ -1168,13 +1142,8 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  Handle<Object> value = instr->value();
-  if (value->IsSmi()) {
-    __ Move(ToRegister(instr->result()), value);
-  } else {
-    __ LoadHeapObject(ToRegister(instr->result()),
-                      Handle<HeapObject>::cast(value));
-  }
+  ASSERT(instr->result()->IsRegister());
+  __ Move(ToRegister(instr->result()), instr->value());
 }
 
 
@@ -1223,49 +1192,6 @@
 }
 
 
-void LCodeGen::DoDateField(LDateField* instr) {
-  Register object = ToRegister(instr->InputAt(0));
-  Register result = ToRegister(instr->result());
-  Smi* index = instr->index();
-  Label runtime, done;
-  ASSERT(object.is(result));
-  ASSERT(object.is(rax));
-
-#ifdef DEBUG
-  __ AbortIfSmi(object);
-  __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
-  __ Assert(equal, "Trying to get date field from non-date.");
-#endif
-
-  if (index->value() == 0) {
-    __ movq(result, FieldOperand(object, JSDate::kValueOffset));
-  } else {
-    if (index->value() < JSDate::kFirstUncachedField) {
-      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
-      __ movq(kScratchRegister, stamp);
-      __ cmpq(kScratchRegister, FieldOperand(object,
-                                             JSDate::kCacheStampOffset));
-      __ j(not_equal, &runtime, Label::kNear);
-      __ movq(result, FieldOperand(object, JSDate::kValueOffset +
-                                           kPointerSize * index->value()));
-      __ jmp(&done);
-    }
-    __ bind(&runtime);
-    __ PrepareCallCFunction(2);
-#ifdef _WIN64
-  __ movq(rcx, object);
-  __ movq(rdx, index, RelocInfo::NONE);
-#else
-  __ movq(rdi, object);
-  __ movq(rsi, index, RelocInfo::NONE);
-#endif
-    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
-    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    __ bind(&done);
-  }
-}
-
-
 void LCodeGen::DoBitNotI(LBitNotI* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->Equals(instr->result()));
@@ -1531,51 +1457,39 @@
 }
 
 
+void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
+  if (right->IsConstantOperand()) {
+    int32_t value = ToInteger32(LConstantOperand::cast(right));
+    if (left->IsRegister()) {
+      __ cmpl(ToRegister(left), Immediate(value));
+    } else {
+      __ cmpl(ToOperand(left), Immediate(value));
+    }
+  } else if (right->IsRegister()) {
+    __ cmpl(ToRegister(left), ToRegister(right));
+  } else {
+    __ cmpl(ToRegister(left), ToOperand(right));
+  }
+}
+
+
 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
   LOperand* left = instr->InputAt(0);
   LOperand* right = instr->InputAt(1);
   int false_block = chunk_->LookupDestination(instr->false_block_id());
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-  Condition cc = TokenToCondition(instr->op(), instr->is_double());
 
-  if (left->IsConstantOperand() && right->IsConstantOperand()) {
-    // We can statically evaluate the comparison.
-    double left_val = ToDouble(LConstantOperand::cast(left));
-    double right_val = ToDouble(LConstantOperand::cast(right));
-    int next_block =
-      EvalComparison(instr->op(), left_val, right_val) ? true_block
-                                                       : false_block;
-    EmitGoto(next_block);
+  if (instr->is_double()) {
+    // Don't base result on EFLAGS when a NaN is involved. Instead
+    // jump to the false block.
+    __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+    __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
   } else {
-    if (instr->is_double()) {
-      // Don't base result on EFLAGS when a NaN is involved. Instead
-      // jump to the false block.
-      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
-      __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
-    } else {
-      int32_t value;
-      if (right->IsConstantOperand()) {
-        value = ToInteger32(LConstantOperand::cast(right));
-        __ cmpl(ToRegister(left), Immediate(value));
-      } else if (left->IsConstantOperand()) {
-        value = ToInteger32(LConstantOperand::cast(left));
-        if (right->IsRegister()) {
-          __ cmpl(ToRegister(right), Immediate(value));
-        } else {
-          __ cmpl(ToOperand(right), Immediate(value));
-        }
-        // We transposed the operands. Reverse the condition.
-        cc = ReverseCondition(cc);
-      } else {
-        if (right->IsRegister()) {
-          __ cmpl(ToRegister(left), ToRegister(right));
-        } else {
-          __ cmpl(ToRegister(left), ToOperand(right));
-        }
-      }
-    }
-    EmitBranch(true_block, false_block, cc);
+    EmitCmpI(left, right);
   }
+
+  Condition cc = TokenToCondition(instr->op(), instr->is_double());
+  EmitBranch(true_block, false_block, cc);
 }
 
 
@@ -1600,33 +1514,30 @@
 }
 
 
-void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
+void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
+
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  // If the expression is known to be untagged or a smi, then it's definitely
-  // not null, and it can't be a an undetectable object.
   if (instr->hydrogen()->representation().IsSpecialization() ||
       instr->hydrogen()->type().IsSmi()) {
+    // If the expression is known to untagged or smi, then it's definitely
+    // not null, and it can't be a an undetectable object.
+    // Jump directly to the false block.
     EmitGoto(false_block);
     return;
   }
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
-      Heap::kNullValueRootIndex :
-      Heap::kUndefinedValueRootIndex;
-  __ CompareRoot(reg, nil_value);
-  if (instr->kind() == kStrictEquality) {
+
+  __ CompareRoot(reg, Heap::kNullValueRootIndex);
+  if (instr->is_strict()) {
     EmitBranch(true_block, false_block, equal);
   } else {
-    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
-        Heap::kUndefinedValueRootIndex :
-        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ CompareRoot(reg, other_nil_value);
+    __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1679,30 +1590,6 @@
 }
 
 
-Condition LCodeGen::EmitIsString(Register input,
-                                 Register temp1,
-                                 Label* is_not_string) {
-  __ JumpIfSmi(input, is_not_string);
-  Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
-
-  return cond;
-}
-
-
-void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
-  Register reg = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-  Label* false_label = chunk_->GetAssemblyLabel(false_block);
-
-  Condition true_cond = EmitIsString(reg, temp, false_label);
-
-  EmitBranch(true_block, false_block, true_cond);
-}
-
-
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1734,21 +1621,6 @@
 }
 
 
-void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  Token::Value op = instr->op();
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
-  int false_block = chunk_->LookupDestination(instr->false_block_id());
-
-  Handle<Code> ic = CompareIC::GetUninitialized(op);
-  CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
-  Condition condition = TokenToCondition(op, false);
-  __ testq(rax, rax);
-
-  EmitBranch(true_block, false_block, condition);
-}
-
-
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1812,48 +1684,35 @@
 
 
 // Branches to a label or falls through with the answer in the z flag.
-// Trashes the temp register.
+// Trashes the temp register and possibly input (if it and temp are aliased).
 void LCodeGen::EmitClassOfTest(Label* is_true,
                                Label* is_false,
                                Handle<String> class_name,
                                Register input,
-                               Register temp,
-                               Register temp2) {
-  ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
-
+                               Register temp) {
   __ JumpIfSmi(input, is_false);
+  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+  __ j(below, is_false);
 
+  // Map is now in temp.
+  // Functions have class 'Function'.
+  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    // Assuming the following assertions, we can use the same compares to test
-    // for both being a function type and being in the object type range.
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  FIRST_SPEC_OBJECT_TYPE + 1);
-    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
-                  LAST_SPEC_OBJECT_TYPE - 1);
-    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-    __ j(below, is_false);
-    __ j(equal, is_true);
-    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
-    __ j(equal, is_true);
+    __ j(above_equal, is_true);
   } else {
-    // Faster code path to avoid two compares: subtract lower bound from the
-    // actual type and do a signed compare with the width of the type range.
-    __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
-    __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
-    __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
-                             FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-    __ j(above, is_false);
+    __ j(above_equal, is_false);
   }
 
-  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
 
+  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
+  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1882,7 +1741,6 @@
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
-  Register temp2 = ToRegister(instr->TempAt(1));
   Handle<String> class_name = instr->hydrogen()->class_name();
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1891,7 +1749,7 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
+  EmitClassOfTest(true_label, false_label, class_name, input, temp);
 
   EmitBranch(true_block, false_block, equal);
 }
@@ -1932,8 +1790,9 @@
     virtual void Generate() {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() { return instr_; }
+
     Label* map_check() { return &map_check_; }
+
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -1957,10 +1816,9 @@
   Register map = ToRegister(instr->TempAt(0));
   __ movq(map, FieldOperand(object, HeapObject::kMapOffset));
   __ bind(deferred->map_check());  // Label for calculating code patching.
-  Handle<JSGlobalPropertyCell> cache_cell =
-      factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
-  __ movq(kScratchRegister, cache_cell, RelocInfo::GLOBAL_PROPERTY_CELL);
-  __ cmpq(map, Operand(kScratchRegister, 0));
+  __ movq(kScratchRegister, factory()->the_hole_value(),
+          RelocInfo::EMBEDDED_OBJECT);
+  __ cmpq(map, kScratchRegister);  // Patched to cached map.
   __ j(not_equal, &cache_miss, Label::kNear);
   // Patched to load either true or false.
   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -1998,7 +1856,7 @@
     InstanceofStub stub(flags);
 
     __ push(ToRegister(instr->InputAt(0)));
-    __ PushHeapObject(instr->function());
+    __ Push(instr->function());
 
     static const int kAdditionalDelta = 10;
     int delta =
@@ -2042,6 +1900,9 @@
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = TokenToCondition(op, false);
+  if (op == Token::GT || op == Token::LTE) {
+    condition = ReverseCondition(condition);
+  }
   Label true_value, done;
   __ testq(rax, rax);
   __ j(condition, &true_value, Label::kNear);
@@ -2068,8 +1929,14 @@
 
 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadGlobalCell(result, instr->hydrogen()->cell());
-  if (instr->hydrogen()->RequiresHoleCheck()) {
+  if (result.is(rax)) {
+    __ load_rax(instr->hydrogen()->cell().location(),
+                RelocInfo::GLOBAL_PROPERTY_CELL);
+  } else {
+    __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ movq(result, Operand(result, 0));
+  }
+  if (instr->hydrogen()->check_hole_value()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
   }
@@ -2089,28 +1956,25 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
-  Register value = ToRegister(instr->value());
-  Handle<JSGlobalPropertyCell> cell_handle = instr->hydrogen()->cell();
-
+  Register value = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+  ASSERT(!value.is(temp));
+  bool check_hole = instr->hydrogen()->check_hole_value();
+  if (!check_hole && value.is(rax)) {
+    __ store_rax(instr->hydrogen()->cell().location(),
+                 RelocInfo::GLOBAL_PROPERTY_CELL);
+    return;
+  }
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    // We have a temp because CompareRoot might clobber kScratchRegister.
-    Register cell = ToRegister(instr->TempAt(0));
-    ASSERT(!value.is(cell));
-    __ movq(cell, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
+  __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
+  if (check_hole) {
+    __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
-    // Store the value.
-    __ movq(Operand(cell, 0), value);
-  } else {
-    // Store the value.
-    __ movq(kScratchRegister, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ movq(Operand(kScratchRegister, 0), value);
   }
-  // Cells are always rescanned, so no write barrier here.
+  __ movq(Operand(temp, 0), value);
 }
 
 
@@ -2119,7 +1983,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->name());
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
@@ -2130,53 +1994,18 @@
   Register context = ToRegister(instr->context());
   Register result = ToRegister(instr->result());
   __ movq(result, ContextOperand(context, instr->slot_index()));
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
-    } else {
-      Label is_not_hole;
-      __ j(not_equal, &is_not_hole, Label::kNear);
-      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
-      __ bind(&is_not_hole);
-    }
-  }
 }
 
 
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-
-  Operand target = ContextOperand(context, instr->slot_index());
-
-  Label skip_assignment;
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
-    if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
-    } else {
-      __ j(not_equal, &skip_assignment);
-    }
-  }
-  __ movq(target, value);
-
-  if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  __ movq(ContextOperand(context, instr->slot_index()), value);
+  if (instr->needs_write_barrier()) {
     int offset = Context::SlotOffset(instr->slot_index());
     Register scratch = ToRegister(instr->TempAt(0));
-    __ RecordWriteContextSlot(context,
-                              offset,
-                              value,
-                              scratch,
-                              kSaveFPRegs,
-                              EMIT_REMEMBERED_SET,
-                              check_needed);
+    __ RecordWrite(context, offset, value, scratch);
   }
-
-  __ bind(&skip_assignment);
 }
 
 
@@ -2196,9 +2025,9 @@
                                                Register object,
                                                Handle<Map> type,
                                                Handle<String> name) {
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   type->LookupInDescriptors(NULL, *name, &lookup);
-  ASSERT(lookup.IsFound() &&
+  ASSERT(lookup.IsProperty() &&
          (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
   if (lookup.type() == FIELD) {
     int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2214,7 +2043,7 @@
     }
   } else {
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
-    __ LoadHeapObject(result, function);
+    LoadHeapObject(result, Handle<HeapObject>::cast(function));
   }
 }
 
@@ -2394,15 +2223,17 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result(ToDoubleRegister(instr->result()));
 
-  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-      sizeof(kHoleNanLower32);
-  Operand hole_check_operand = BuildFastArrayOperand(
-      instr->elements(),
-      instr->key(),
-      FAST_DOUBLE_ELEMENTS,
-      offset);
-  __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
-  DeoptimizeIf(equal, instr->environment());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+        sizeof(kHoleNanLower32);
+    Operand hole_check_operand = BuildFastArrayOperand(
+        instr->elements(),
+        instr->key(),
+        FAST_DOUBLE_ELEMENTS,
+        offset);
+    __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+    DeoptimizeIf(equal, instr->environment());
+  }
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2474,7 +2305,6 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -2543,9 +2373,14 @@
 }
 
 
-void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   Register receiver = ToRegister(instr->receiver());
   Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  ASSERT(receiver.is(rax));  // Used for parameter count.
+  ASSERT(function.is(rdi));  // Required by InvokeFunction.
+  ASSERT(ToRegister(instr->result()).is(rax));
 
   // If the receiver is null or undefined, we have to pass the global
   // object as a receiver to normal functions. Values have to be
@@ -2588,17 +2423,6 @@
   __ movq(receiver,
           FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   __ bind(&receiver_ok);
-}
-
-
-void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
-  Register receiver = ToRegister(instr->receiver());
-  Register function = ToRegister(instr->function());
-  Register length = ToRegister(instr->length());
-  Register elements = ToRegister(instr->elements());
-  ASSERT(receiver.is(rax));  // Used for parameter count.
-  ASSERT(function.is(rdi));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(rax));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
@@ -2627,7 +2451,7 @@
   RecordPosition(pointers->position());
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
-  ParameterCount actual(rax);
+  v8::internal::ParameterCount actual(rax);
   __ InvokeFunction(function, actual, CALL_FUNCTION,
                     safepoint_generator, CALL_AS_METHOD);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2642,7 +2466,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
+  __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
@@ -2660,14 +2484,6 @@
 }
 
 
-void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  __ push(rsi);  // The context is the first argument.
-  __ PushHeapObject(instr->hydrogen()->pairs());
-  __ Push(Smi::FromInt(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
-}
-
-
 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   Register result = ToRegister(instr->result());
   __ movq(result, GlobalObjectOperand());
@@ -2685,48 +2501,35 @@
                                  int arity,
                                  LInstruction* instr,
                                  CallKind call_kind) {
-  bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
-      function->shared()->formal_parameter_count() == arity;
+  // Change context if needed.
+  bool change_context =
+      (info()->closure()->context() != function->context()) ||
+      scope()->contains_with() ||
+      (scope()->num_heap_slots() > 0);
+  if (change_context) {
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+  }
+
+  // Set rax to arguments count if adaption is not needed. Assumes that rax
+  // is available to write to at this point.
+  if (!function->NeedsArgumentsAdaption()) {
+    __ Set(rax, arity);
+  }
 
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
 
-  if (can_invoke_directly) {
-    __ LoadHeapObject(rdi, function);
-
-    // Change context if needed.
-    bool change_context =
-        (info()->closure()->context() != function->context()) ||
-        scope()->contains_with() ||
-        (scope()->num_heap_slots() > 0);
-    if (change_context) {
-      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-    }
-
-    // Set rax to arguments count if adaption is not needed. Assumes that rax
-    // is available to write to at this point.
-    if (!function->NeedsArgumentsAdaption()) {
-      __ Set(rax, arity);
-    }
-
-    // Invoke function.
-    __ SetCallKind(rcx, call_kind);
-    if (*function == *info()->closure()) {
-      __ CallSelf();
-    } else {
-      __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-    }
-
-    // Set up deoptimization.
-    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+  // Invoke function.
+  __ SetCallKind(rcx, call_kind);
+  if (*function == *info()->closure()) {
+    __ CallSelf();
   } else {
-    // We need to adapt arguments.
-    SafepointGenerator generator(
-        this, pointers, Safepoint::kLazyDeopt);
-    ParameterCount count(arity);
-    __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+    __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
   }
 
+  // Setup deoptimization.
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+
   // Restore context.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
 }
@@ -2734,6 +2537,7 @@
 
 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
+  __ Move(rdi, instr->function());
   CallKnownFunction(instr->function(),
                     instr->arity(),
                     instr,
@@ -2814,7 +2618,6 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2948,158 +2751,65 @@
   XMMRegister xmm_scratch = xmm0;
   XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
-
-  // Note that according to ECMA-262 15.8.2.13:
-  // Math.pow(-Infinity, 0.5) == Infinity
-  // Math.sqrt(-Infinity) == NaN
-  Label done, sqrt;
-  // Check base for -Infinity.  According to IEEE-754, double-precision
-  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
-  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
-  __ movq(xmm_scratch, kScratchRegister);
-  __ ucomisd(xmm_scratch, input_reg);
-  // Comparing -Infinity with NaN results in "unordered", which sets the
-  // zero flag as if both were equal.  However, it also sets the carry flag.
-  __ j(not_equal, &sqrt, Label::kNear);
-  __ j(carry, &sqrt, Label::kNear);
-  // If input is -Infinity, return Infinity.
-  __ xorps(input_reg, input_reg);
-  __ subsd(input_reg, xmm_scratch);
-  __ jmp(&done, Label::kNear);
-
-  // Square root.
-  __ bind(&sqrt);
   __ xorps(xmm_scratch, xmm_scratch);
   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   __ sqrtsd(input_reg, input_reg);
-  __ bind(&done);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
+  LOperand* left = instr->InputAt(0);
+  XMMRegister left_reg = ToDoubleRegister(left);
+  ASSERT(!left_reg.is(xmm1));
+  LOperand* right = instr->InputAt(1);
+  XMMRegister result_reg = ToDoubleRegister(instr->result());
   Representation exponent_type = instr->hydrogen()->right()->representation();
-  // Having marked this as a call, we can use any registers.
-  // Just make sure that the input/output registers are the expected ones.
-
-  // Choose register conforming to calling convention (when bailing out).
-#ifdef _WIN64
-  Register exponent = rdx;
-#else
-  Register exponent = rdi;
-#endif
-  ASSERT(!instr->InputAt(1)->IsRegister() ||
-         ToRegister(instr->InputAt(1)).is(exponent));
-  ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
-         ToDoubleRegister(instr->InputAt(1)).is(xmm1));
-  ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
-
-  if (exponent_type.IsTagged()) {
-    Label no_deopt;
-    __ JumpIfSmi(exponent, &no_deopt);
-    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
-    DeoptimizeIf(not_equal, instr->environment());
-    __ bind(&no_deopt);
-    MathPowStub stub(MathPowStub::TAGGED);
-    __ CallStub(&stub);
+  if (exponent_type.IsDouble()) {
+    __ PrepareCallCFunction(2);
+    // Move arguments to correct registers
+    __ movaps(xmm0, left_reg);
+    ASSERT(ToDoubleRegister(right).is(xmm1));
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 2);
   } else if (exponent_type.IsInteger32()) {
-    MathPowStub stub(MathPowStub::INTEGER);
-    __ CallStub(&stub);
-  } else {
-    ASSERT(exponent_type.IsDouble());
-    MathPowStub stub(MathPowStub::DOUBLE);
-    __ CallStub(&stub);
-  }
-}
-
-
-void LCodeGen::DoRandom(LRandom* instr) {
-  class DeferredDoRandom: public LDeferredCode {
-   public:
-    DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LRandom* instr_;
-  };
-
-  DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
-
-  // Having marked this instruction as a call we can use any
-  // registers.
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-
-  // Choose the right register for the first argument depending on
-  // calling convention.
+    __ PrepareCallCFunction(2);
+    // Move arguments to correct registers: xmm0 and edi (not rdi).
+    // On Windows, the registers are xmm0 and edx.
+    __ movaps(xmm0, left_reg);
 #ifdef _WIN64
-  ASSERT(ToRegister(instr->InputAt(0)).is(rcx));
-  Register global_object = rcx;
+    ASSERT(ToRegister(right).is(rdx));
 #else
-  ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
-  Register global_object = rdi;
+    ASSERT(ToRegister(right).is(rdi));
 #endif
+    __ CallCFunction(
+        ExternalReference::power_double_int_function(isolate()), 2);
+  } else {
+    ASSERT(exponent_type.IsTagged());
+    Register right_reg = ToRegister(right);
 
-  static const int kSeedSize = sizeof(uint32_t);
-  STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
+    Label non_smi, call;
+    __ JumpIfNotSmi(right_reg, &non_smi);
+    __ SmiToInteger32(right_reg, right_reg);
+    __ cvtlsi2sd(xmm1, right_reg);
+    __ jmp(&call);
 
-  __ movq(global_object,
-          FieldOperand(global_object, GlobalObject::kGlobalContextOffset));
-  static const int kRandomSeedOffset =
-      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
-  __ movq(rbx, FieldOperand(global_object, kRandomSeedOffset));
-  // rbx: FixedArray of the global context's random seeds
+    __ bind(&non_smi);
+    __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+    DeoptimizeIf(not_equal, instr->environment());
+    __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
 
-  // Load state[0].
-  __ movl(rax, FieldOperand(rbx, ByteArray::kHeaderSize));
-  // If state[0] == 0, call runtime to initialize seeds.
-  __ testl(rax, rax);
-  __ j(zero, deferred->entry());
-  // Load state[1].
-  __ movl(rcx, FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize));
-
-  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
-  // Only operate on the lower 32 bit of rax.
-  __ movl(rdx, rax);
-  __ andl(rdx, Immediate(0xFFFF));
-  __ imull(rdx, rdx, Immediate(18273));
-  __ shrl(rax, Immediate(16));
-  __ addl(rax, rdx);
-  // Save state[0].
-  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
-
-  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
-  __ movl(rdx, rcx);
-  __ andl(rdx, Immediate(0xFFFF));
-  __ imull(rdx, rdx, Immediate(36969));
-  __ shrl(rcx, Immediate(16));
-  __ addl(rcx, rdx);
-  // Save state[1].
-  __ movl(FieldOperand(rbx, ByteArray::kHeaderSize + kSeedSize), rcx);
-
-  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
-  __ shll(rax, Immediate(14));
-  __ andl(rcx, Immediate(0x3FFFF));
-  __ addl(rax, rcx);
-
-  __ bind(deferred->exit());
-  // Convert 32 random bits in rax to 0.(32 random bits) in a double
-  // by computing:
-  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
-  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-  __ movd(xmm2, rcx);
-  __ movd(xmm1, rax);
-  __ cvtss2sd(xmm2, xmm2);
-  __ xorps(xmm1, xmm2);
-  __ subsd(xmm1, xmm2);
-}
-
-
-void LCodeGen::DoDeferredRandom(LRandom* instr) {
-  __ PrepareCallCFunction(1);
-  __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+    __ bind(&call);
+    __ PrepareCallCFunction(2);
+    // Move arguments to correct registers xmm0 and xmm1.
+    __ movaps(xmm0, left_reg);
+    // Right argument is already in xmm1.
+    __ CallCFunction(
+        ExternalReference::power_double_double_function(isolate()), 2);
+  }
+  // Return value is in xmm0.
+  __ movaps(result_reg, xmm0);
+  // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  // Return value is in rax.
 }
 
 
@@ -3111,14 +2821,6 @@
 }
 
 
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   TranscendentalCacheStub stub(TranscendentalCache::COS,
@@ -3158,9 +2860,6 @@
     case kMathSin:
       DoMathSin(instr);
       break;
-    case kMathTan:
-      DoMathTan(instr);
-      break;
     case kMathLog:
       DoMathLog(instr);
       break;
@@ -3210,13 +2909,13 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(rdi));
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
+  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ Drop(1);
 }
 
 
@@ -3234,6 +2933,7 @@
 
 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   ASSERT(ToRegister(instr->result()).is(rax));
+  __ Move(rdi, instr->target());
   CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
 }
 
@@ -3242,9 +2942,9 @@
   ASSERT(ToRegister(instr->InputAt(0)).is(rdi));
   ASSERT(ToRegister(instr->result()).is(rax));
 
-  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+  Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
   __ Set(rax, instr->arity());
-  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+  CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
 }
 
 
@@ -3263,36 +2963,21 @@
   }
 
   // Do the store.
-  HType type = instr->hydrogen()->value()->type();
-  SmiCheck check_needed =
-      type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   if (instr->is_in_object()) {
     __ movq(FieldOperand(object, offset), value);
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
+    if (instr->needs_write_barrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWriteField(object,
-                          offset,
-                          value,
-                          temp,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
+      __ RecordWrite(object, offset, value, temp);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
     __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
     __ movq(FieldOperand(temp, offset), value);
-    if (instr->hydrogen()->NeedsWriteBarrier()) {
+    if (instr->needs_write_barrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWriteField(temp,
-                          offset,
-                          value,
-                          object,
-                          kSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          check_needed);
+      __ RecordWrite(temp, offset, value, object);
     }
   }
 }
@@ -3303,7 +2988,7 @@
   ASSERT(ToRegister(instr->value()).is(rax));
 
   __ Move(rcx, instr->hydrogen()->name());
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->StoreIC_Initialize_Strict()
       : isolate()->builtins()->StoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -3340,7 +3025,6 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
-      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3352,25 +3036,17 @@
 
 
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
-  if (instr->length()->IsRegister()) {
-    Register reg = ToRegister(instr->length());
-    if (FLAG_debug_code) {
-      __ AbortIfNotZeroExtended(reg);
-    }
-    if (instr->index()->IsConstantOperand()) {
-      __ cmpq(reg,
+  if (instr->index()->IsConstantOperand()) {
+    if (instr->length()->IsRegister()) {
+      __ cmpq(ToRegister(instr->length()),
               Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
     } else {
-      Register reg2 = ToRegister(instr->index());
-      if (FLAG_debug_code) {
-        __ AbortIfNotZeroExtended(reg2);
-      }
-      __ cmpq(reg, reg2);
-    }
-  } else {
-    if (instr->index()->IsConstantOperand()) {
       __ cmpq(ToOperand(instr->length()),
               Immediate(ToInteger32(LConstantOperand::cast(instr->index()))));
+    }
+  } else {
+    if (instr->length()->IsRegister()) {
+      __ cmpq(ToRegister(instr->length()), ToRegister(instr->index()));
     } else {
       __ cmpq(ToOperand(instr->length()), ToRegister(instr->index()));
     }
@@ -3400,20 +3076,12 @@
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    HType type = instr->hydrogen()->value()->type();
-    SmiCheck check_needed =
-        type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key, FieldOperand(elements,
                              key,
                              times_pointer_size,
                              FixedArray::kHeaderSize));
-    __ RecordWrite(elements,
-                   key,
-                   value,
-                   kSaveFPRegs,
-                   EMIT_REMEMBERED_SET,
-                   check_needed);
+    __ RecordWrite(elements, key, value);
   }
 }
 
@@ -3442,54 +3110,13 @@
   ASSERT(ToRegister(instr->key()).is(rcx));
   ASSERT(ToRegister(instr->value()).is(rax));
 
-  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+  Handle<Code> ic = instr->strict_mode()
       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
       : isolate()->builtins()->KeyedStoreIC_Initialize();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
-void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
-  Register object_reg = ToRegister(instr->object());
-  Register new_map_reg = ToRegister(instr->new_map_reg());
-
-  Handle<Map> from_map = instr->original_map();
-  Handle<Map> to_map = instr->transitioned_map();
-  ElementsKind from_kind = from_map->elements_kind();
-  ElementsKind to_kind = to_map->elements_kind();
-
-  Label not_applicable;
-  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
-  __ j(not_equal, &not_applicable);
-  __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
-  if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
-    // Write barrier.
-    ASSERT_NE(instr->temp_reg(), NULL);
-    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
-                        ToRegister(instr->temp_reg()), kDontSaveFPRegs);
-  } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
-      to_kind == FAST_DOUBLE_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(rdx));
-    ASSERT(new_map_reg.is(rbx));
-    __ movq(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
-             RelocInfo::CODE_TARGET, instr);
-  } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
-    Register fixed_object_reg = ToRegister(instr->temp_reg());
-    ASSERT(fixed_object_reg.is(rdx));
-    ASSERT(new_map_reg.is(rbx));
-    __ movq(fixed_object_reg, object_reg);
-    CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
-             RelocInfo::CODE_TARGET, instr);
-  } else {
-    UNREACHABLE();
-  }
-  __ bind(&not_applicable);
-}
-
-
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   EmitPushTaggedOperand(instr->left());
   EmitPushTaggedOperand(instr->right());
@@ -3504,19 +3131,85 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
 
+  Register string = ToRegister(instr->string());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  StringCharLoadGenerator::Generate(masm(),
-                                    ToRegister(instr->string()),
-                                    ToRegister(instr->index()),
-                                    ToRegister(instr->result()),
-                                    deferred->entry());
+  // Fetch the instance type of the receiver into result register.
+  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ testb(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential, Label::kNear);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ testb(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string, Label::kNear);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ addq(index, result);
+  __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded, Label::kNear);
+
+  // Handle conses.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
+                 Heap::kEmptyStringRootIndex);
+  __ j(not_equal, deferred->entry());
+  __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ testb(result, Immediate(kStringRepresentationMask));
+  __ j(not_zero, deferred->entry());
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ testb(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string, Label::kNear);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  Label done;
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movzxwl(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  // ASCII string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ movzxbl(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&done);
   __ bind(deferred->exit());
 }
 
@@ -3558,7 +3251,6 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3635,7 +3327,6 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3694,7 +3385,6 @@
 void LCodeGen::EmitNumberUntagD(Register input_reg,
                                 XMMRegister result_reg,
                                 bool deoptimize_on_undefined,
-                                bool deoptimize_on_minus_zero,
                                 LEnvironment* env) {
   Label load_smi, done;
 
@@ -3722,15 +3412,6 @@
   }
   // Heap number to XMM conversion.
   __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  if (deoptimize_on_minus_zero) {
-    XMMRegister xmm_scratch = xmm0;
-    __ xorps(xmm_scratch, xmm_scratch);
-    __ ucomisd(xmm_scratch, result_reg);
-    __ j(not_equal, &done, Label::kNear);
-    __ movmskpd(kScratchRegister, result_reg);
-    __ testq(kScratchRegister, Immediate(1));
-    DeoptimizeIf(not_zero, env);
-  }
   __ jmp(&done, Label::kNear);
 
   // Smi to XMM conversion
@@ -3741,6 +3422,16 @@
 }
 
 
+class DeferredTaggedToI: public LDeferredCode {
+ public:
+  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+      : LDeferredCode(codegen), instr_(instr) { }
+  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+ private:
+  LTaggedToI* instr_;
+};
+
+
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3789,16 +3480,6 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI: public LDeferredCode {
-   public:
-    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LTaggedToI* instr_;
-  };
-
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3822,7 +3503,6 @@
 
   EmitNumberUntagD(input_reg, result_reg,
                    instr->hydrogen()->deoptimize_on_undefined(),
-                   instr->hydrogen()->deoptimize_on_minus_zero(),
                    instr->environment());
 }
 
@@ -3928,37 +3608,20 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  Register reg = ToRegister(instr->value());
-  Handle<JSFunction> target = instr->hydrogen()->target();
-  if (isolate()->heap()->InNewSpace(*target)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(target);
-    __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
-    __ cmpq(reg, Operand(kScratchRegister, 0));
-  } else {
-    __ Cmp(reg, target);
-  }
+  ASSERT(instr->InputAt(0)->IsRegister());
+  Register reg = ToRegister(instr->InputAt(0));
+  __ Cmp(reg, instr->hydrogen()->target());
   DeoptimizeIf(not_equal, instr->environment());
 }
 
 
-void LCodeGen::DoCheckMapCommon(Register reg,
-                                Handle<Map> map,
-                                CompareMapMode mode,
-                                LEnvironment* env) {
-  Label success;
-  __ CompareMap(reg, map, &success, mode);
-  DeoptimizeIf(not_equal, env);
-  __ bind(&success);
-}
-
-
 void LCodeGen::DoCheckMap(LCheckMap* instr) {
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   Register reg = ToRegister(input);
-  Handle<Map> map = instr->hydrogen()->map();
-  DoCheckMapCommon(reg, map, instr->hydrogen()->mode(), instr->environment());
+  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         instr->hydrogen()->map());
+  DeoptimizeIf(not_equal, instr->environment());
 }
 
 
@@ -4013,6 +3676,18 @@
 }
 
 
+void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
+  if (heap()->InNewSpace(*object)) {
+    Handle<JSGlobalPropertyCell> cell =
+        factory()->NewJSGlobalPropertyCell(object);
+    __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+    __ movq(result, Operand(result, 0));
+  } else {
+    __ Move(result, object);
+  }
+}
+
+
 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
   Register reg = ToRegister(instr->TempAt(0));
 
@@ -4020,139 +3695,32 @@
   Handle<JSObject> current_prototype = instr->prototype();
 
   // Load prototype object.
-  __ LoadHeapObject(reg, current_prototype);
+  LoadHeapObject(reg, current_prototype);
 
   // Check prototype maps up to the holder.
   while (!current_prototype.is_identical_to(holder)) {
-    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+    __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+           Handle<Map>(current_prototype->map()));
+    DeoptimizeIf(not_equal, instr->environment());
     current_prototype =
         Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
     // Load next prototype object.
-    __ LoadHeapObject(reg, current_prototype);
+    LoadHeapObject(reg, current_prototype);
   }
 
   // Check the holder map.
-    DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
-                     ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-}
-
-
-void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
-  class DeferredAllocateObject: public LDeferredCode {
-   public:
-    DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LAllocateObject* instr_;
-  };
-
-  DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
-
-  Register result = ToRegister(instr->result());
-  Register scratch = ToRegister(instr->TempAt(0));
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-  Handle<Map> initial_map(constructor->initial_map());
-  int instance_size = initial_map->instance_size();
-  ASSERT(initial_map->pre_allocated_property_fields() +
-         initial_map->unused_property_fields() -
-         initial_map->inobject_properties() == 0);
-
-  // Allocate memory for the object.  The initial map might change when
-  // the constructor's prototype changes, but instance size and property
-  // counts remain unchanged (if slack tracking finished).
-  ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
-  __ AllocateInNewSpace(instance_size,
-                        result,
-                        no_reg,
-                        scratch,
-                        deferred->entry(),
-                        TAG_OBJECT);
-
-  // Load the initial map.
-  Register map = scratch;
-  __ LoadHeapObject(scratch, constructor);
-  __ movq(map, FieldOperand(scratch, JSFunction::kPrototypeOrInitialMapOffset));
-
-  if (FLAG_debug_code) {
-    __ AbortIfSmi(map);
-    __ cmpb(FieldOperand(map, Map::kInstanceSizeOffset),
-            Immediate(instance_size >> kPointerSizeLog2));
-    __ Assert(equal, "Unexpected instance size");
-    __ cmpb(FieldOperand(map, Map::kPreAllocatedPropertyFieldsOffset),
-            Immediate(initial_map->pre_allocated_property_fields()));
-    __ Assert(equal, "Unexpected pre-allocated property fields count");
-    __ cmpb(FieldOperand(map, Map::kUnusedPropertyFieldsOffset),
-            Immediate(initial_map->unused_property_fields()));
-    __ Assert(equal, "Unexpected unused property fields count");
-    __ cmpb(FieldOperand(map, Map::kInObjectPropertiesOffset),
-            Immediate(initial_map->inobject_properties()));
-    __ Assert(equal, "Unexpected in-object property fields count");
-  }
-
-  // Initialize map and fields of the newly allocated object.
-  ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
-  __ movq(FieldOperand(result, JSObject::kMapOffset), map);
-  __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
-  __ movq(FieldOperand(result, JSObject::kElementsOffset), scratch);
-  __ movq(FieldOperand(result, JSObject::kPropertiesOffset), scratch);
-  if (initial_map->inobject_properties() != 0) {
-    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
-    for (int i = 0; i < initial_map->inobject_properties(); i++) {
-      int property_offset = JSObject::kHeaderSize + i * kPointerSize;
-      __ movq(FieldOperand(result, property_offset), scratch);
-    }
-  }
-
-  __ bind(deferred->exit());
-}
-
-
-void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
-  Register result = ToRegister(instr->result());
-  Handle<JSFunction> constructor = instr->hydrogen()->constructor();
-
-  // TODO(3095996): Get rid of this. For now, we need to make the
-  // result register contain a valid pointer because it is already
-  // contained in the register pointer map.
-  __ Set(result, 0);
-
-  PushSafepointRegistersScope scope(this);
-  __ PushHeapObject(constructor);
-  CallRuntimeFromDeferred(Runtime::kNewObject, 1, instr);
-  __ StoreToSafepointRegisterSlot(result, rax);
+  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Handle<Map>(current_prototype->map()));
+  DeoptimizeIf(not_equal, instr->environment());
 }
 
 
 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
-  Heap* heap = isolate()->heap();
-  ElementsKind boilerplate_elements_kind =
-      instr->hydrogen()->boilerplate_elements_kind();
-
-  // Deopt if the array literal boilerplate ElementsKind is of a type different
-  // than the expected one. The check isn't necessary if the boilerplate has
-  // already been converted to FAST_ELEMENTS.
-  if (boilerplate_elements_kind != FAST_ELEMENTS) {
-    __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
-    __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
-    // Load the map's "bit field 2".
-    __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
-    // Retrieve elements_kind from bit field 2.
-    __ and_(rbx, Immediate(Map::kElementsKindMask));
-    __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
-                           Map::kElementsKindShift));
-    DeoptimizeIf(not_equal, instr->environment());
-  }
-
-  // Set up the parameters to the stub/runtime call.
+  // Setup the parameters to the stub/runtime call.
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
-  // Boilerplate already exists, constant elements are never accessed.
-  // Pass an empty fixed array.
-  __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
+  __ Push(instr->hydrogen()->constant_elements());
 
   // Pick the right runtime function or stub to call.
   int length = instr->hydrogen()->length();
@@ -4168,160 +3736,26 @@
     CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
   } else {
     FastCloneShallowArrayStub::Mode mode =
-        boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
-            ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-            : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+        FastCloneShallowArrayStub::CLONE_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   }
 }
 
 
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
-                            Register result,
-                            Register source,
-                            int* offset) {
-  ASSERT(!source.is(rcx));
-  ASSERT(!result.is(rcx));
-
-  // Only elements backing stores for non-COW arrays need to be copied.
-  Handle<FixedArrayBase> elements(object->elements());
-  bool has_elements = elements->length() > 0 &&
-      elements->map() != isolate()->heap()->fixed_cow_array_map();
-
-  // Increase the offset so that subsequent objects end up right after
-  // this object and its backing store.
-  int object_offset = *offset;
-  int object_size = object->map()->instance_size();
-  int elements_offset = *offset + object_size;
-  int elements_size = has_elements ? elements->Size() : 0;
-  *offset += object_size + elements_size;
-
-  // Copy object header.
-  ASSERT(object->properties()->length() == 0);
-  int inobject_properties = object->map()->inobject_properties();
-  int header_size = object_size - inobject_properties * kPointerSize;
-  for (int i = 0; i < header_size; i += kPointerSize) {
-    if (has_elements && i == JSObject::kElementsOffset) {
-      __ lea(rcx, Operand(result, elements_offset));
-    } else {
-      __ movq(rcx, FieldOperand(source, i));
-    }
-    __ movq(FieldOperand(result, object_offset + i), rcx);
-  }
-
-  // Copy in-object properties.
-  for (int i = 0; i < inobject_properties; i++) {
-    int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
-    Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
-    if (value->IsJSObject()) {
-      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-      __ lea(rcx, Operand(result, *offset));
-      __ movq(FieldOperand(result, total_offset), rcx);
-      __ LoadHeapObject(source, value_object);
-      EmitDeepCopy(value_object, result, source, offset);
-    } else if (value->IsHeapObject()) {
-      __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
-      __ movq(FieldOperand(result, total_offset), rcx);
-    } else {
-      __ movq(rcx, value, RelocInfo::NONE);
-      __ movq(FieldOperand(result, total_offset), rcx);
-    }
-  }
-
-  if (has_elements) {
-    // Copy elements backing store header.
-    __ LoadHeapObject(source, elements);
-    for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
-      __ movq(rcx, FieldOperand(source, i));
-      __ movq(FieldOperand(result, elements_offset + i), rcx);
-    }
-
-    // Copy elements backing store content.
-    int elements_length = elements->length();
-    if (elements->IsFixedDoubleArray()) {
-      Handle<FixedDoubleArray> double_array =
-          Handle<FixedDoubleArray>::cast(elements);
-      for (int i = 0; i < elements_length; i++) {
-        int64_t value = double_array->get_representation(i);
-        int total_offset =
-            elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
-        __ movq(rcx, value, RelocInfo::NONE);
-        __ movq(FieldOperand(result, total_offset), rcx);
-      }
-    } else if (elements->IsFixedArray()) {
-      for (int i = 0; i < elements_length; i++) {
-        int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
-        Handle<Object> value = JSObject::GetElement(object, i);
-        if (value->IsJSObject()) {
-          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
-          __ lea(rcx, Operand(result, *offset));
-          __ movq(FieldOperand(result, total_offset), rcx);
-          __ LoadHeapObject(source, value_object);
-          EmitDeepCopy(value_object, result, source, offset);
-        } else if (value->IsHeapObject()) {
-          __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
-          __ movq(FieldOperand(result, total_offset), rcx);
-        } else {
-          __ movq(rcx, value, RelocInfo::NONE);
-          __ movq(FieldOperand(result, total_offset), rcx);
-        }
-      }
-    } else {
-      UNREACHABLE();
-    }
-  }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
-  int size = instr->hydrogen()->total_size();
-
-  // Allocate all objects that are part of the literal in one big
-  // allocation. This avoids multiple limit checks.
-  Label allocated, runtime_allocate;
-  __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
-  __ jmp(&allocated);
-
-  __ bind(&runtime_allocate);
-  __ Push(Smi::FromInt(size));
-  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
-  __ bind(&allocated);
-  int offset = 0;
-  __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
-  EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
-  ASSERT_EQ(size, offset);
-}
-
-
 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
-  Handle<FixedArray> literals(instr->environment()->closure()->literals());
-  Handle<FixedArray> constant_properties =
-      instr->hydrogen()->constant_properties();
-
-  // Set up the parameters to the stub/runtime call.
-  __ PushHeapObject(literals);
+  // Setup the parameters to the stub/runtime call.
+  __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
-  __ Push(constant_properties);
-  int flags = instr->hydrogen()->fast_elements()
-      ? ObjectLiteral::kFastElements
-      : ObjectLiteral::kNoFlags;
-  flags |= instr->hydrogen()->has_function()
-      ? ObjectLiteral::kHasFunction
-      : ObjectLiteral::kNoFlags;
-  __ Push(Smi::FromInt(flags));
+  __ Push(instr->hydrogen()->constant_properties());
+  __ Push(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0));
 
-  // Pick the right runtime function or stub to call.
-  int properties_count = constant_properties->length() / 2;
+  // Pick the right runtime function to call.
   if (instr->hydrogen()->depth() > 1) {
     CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
-  } else if (flags != ObjectLiteral::kFastElements ||
-      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
-    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   } else {
-    FastCloneShallowObjectStub stub(properties_count);
-    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
   }
 }
 
@@ -4391,7 +3825,8 @@
   Handle<SharedFunctionInfo> shared_info = instr->shared_info();
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && shared_info->num_literals() == 0) {
-    FastNewClosureStub stub(shared_info->language_mode());
+    FastNewClosureStub stub(
+        shared_info->strict_mode() ? kStrictMode : kNonStrictMode);
     __ Push(shared_info);
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -4415,12 +3850,7 @@
 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   ASSERT(!operand->IsDoubleRegister());
   if (operand->IsConstantOperand()) {
-    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
-    if (object->IsSmi()) {
-      __ Push(Handle<Smi>::cast(object));
-    } else {
-      __ PushHeapObject(Handle<HeapObject>::cast(object));
-    }
+    __ Push(ToHandle(LConstantOperand::cast(operand)));
   } else if (operand->IsRegister()) {
     __ push(ToRegister(operand));
   } else {
@@ -4436,11 +3866,12 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  Condition final_branch_condition =
-      EmitTypeofIs(true_label, false_label, input, instr->type_literal());
-  if (final_branch_condition != no_condition) {
-    EmitBranch(true_block, false_block, final_branch_condition);
-  }
+  Condition final_branch_condition = EmitTypeofIs(true_label,
+                                                  false_label,
+                                                  input,
+                                                  instr->type_literal());
+
+  EmitBranch(true_block, false_block, final_branch_condition);
 }
 
 
@@ -4485,12 +3916,9 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
-    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
-    __ j(equal, true_label);
-    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
-    final_branch_condition = equal;
+    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
+    final_branch_condition = above_equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4508,6 +3936,7 @@
     final_branch_condition = zero;
 
   } else {
+    final_branch_condition = never;
     __ jmp(false_label);
   }
 
@@ -4549,7 +3978,11 @@
   int current_pc = masm()->pc_offset();
   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-    __ Nop(padding_size);
+    while (padding_size > 0) {
+      int nop_size = padding_size > 9 ? 9 : padding_size;
+      __ nop(nop_size);
+      padding_size -= nop_size;
+    }
   }
 }
 
@@ -4618,7 +4051,6 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
-    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
@@ -4674,88 +4106,6 @@
   osr_pc_offset_ = masm()->pc_offset();
 }
 
-
-void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  DeoptimizeIf(equal, instr->environment());
-
-  Register null_value = rdi;
-  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  __ cmpq(rax, null_value);
-  DeoptimizeIf(equal, instr->environment());
-
-  Condition cc = masm()->CheckSmi(rax);
-  DeoptimizeIf(cc, instr->environment());
-
-  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
-  __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
-  DeoptimizeIf(below_equal, instr->environment());
-
-  Label use_cache, call_runtime;
-  __ CheckEnumCache(null_value, &call_runtime);
-
-  __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
-  __ jmp(&use_cache, Label::kNear);
-
-  // Get the set of properties to enumerate.
-  __ bind(&call_runtime);
-  __ push(rax);
-  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
-
-  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                 Heap::kMetaMapRootIndex);
-  DeoptimizeIf(not_equal, instr->environment());
-  __ bind(&use_cache);
-}
-
-
-void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
-  Register map = ToRegister(instr->map());
-  Register result = ToRegister(instr->result());
-  __ LoadInstanceDescriptors(map, result);
-  __ movq(result,
-          FieldOperand(result, DescriptorArray::kEnumerationIndexOffset));
-  __ movq(result,
-          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
-  Condition cc = masm()->CheckSmi(result);
-  DeoptimizeIf(cc, instr->environment());
-}
-
-
-void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
-  Register object = ToRegister(instr->value());
-  __ cmpq(ToRegister(instr->map()),
-          FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr->environment());
-}
-
-
-void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  Register object = ToRegister(instr->object());
-  Register index = ToRegister(instr->index());
-
-  Label out_of_object, done;
-  __ SmiToInteger32(index, index);
-  __ cmpl(index, Immediate(0));
-  __ j(less, &out_of_object);
-  __ movq(object, FieldOperand(object,
-                               index,
-                               times_pointer_size,
-                               JSObject::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  __ bind(&out_of_object);
-  __ movq(object, FieldOperand(object, JSObject::kPropertiesOffset));
-  __ negl(index);
-  // Index is now equal to out of object property index plus 1.
-  __ movq(object, FieldOperand(object,
-                               index,
-                               times_pointer_size,
-                               FixedArray::kHeaderSize - kPointerSize));
-  __ bind(&done);
-}
-
-
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index f5045b6..43c045f 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -78,7 +78,6 @@
   XMMRegister ToDoubleRegister(LOperand* op) const;
   bool IsInteger32Constant(LConstantOperand* op) const;
   int ToInteger32(LConstantOperand* op) const;
-  double ToDouble(LConstantOperand* op) const;
   bool IsTaggedConstant(LConstantOperand* op) const;
   Handle<Object> ToHandle(LConstantOperand* op) const;
   Operand ToOperand(LOperand* op) const;
@@ -97,17 +96,12 @@
   void DoDeferredTaggedToI(LTaggedToI* instr);
   void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
   void DoDeferredStackCheck(LStackCheck* instr);
-  void DoDeferredRandom(LRandom* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredAllocateObject(LAllocateObject* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                        Label* map_check);
 
-  void DoCheckMapCommon(Register reg, Handle<Map> map,
-                        CompareMapMode mode, LEnvironment* env);
-
-// Parallel move support.
+  // Parallel move support.
   void DoParallelMove(LParallelMove* move);
   void DoGap(LGap* instr);
 
@@ -132,8 +126,8 @@
   bool is_done() const { return status_ == DONE; }
   bool is_aborted() const { return status_ == ABORTED; }
 
-  StrictModeFlag strict_mode_flag() const {
-    return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
+  int strict_mode_flag() const {
+    return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
 
   LChunk* chunk() const { return chunk_; }
@@ -146,8 +140,7 @@
                        Label* if_false,
                        Handle<String> class_name,
                        Register input,
-                       Register temporary,
-                       Register scratch);
+                       Register temporary);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
   int GetParameterCount() const { return scope()->num_parameters(); }
@@ -196,13 +189,15 @@
                                int argc,
                                LInstruction* instr);
 
+
   // Generate a direct call to a known function.  Expects the function
-  // to be in rdi.
+  // to be in edi.
   void CallKnownFunction(Handle<JSFunction> function,
                          int arity,
                          LInstruction* instr,
                          CallKind call_kind);
 
+  void LoadHeapObject(Register result, Handle<HeapObject> object);
 
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode,
@@ -235,7 +230,6 @@
   void DoMathSqrt(LUnaryMathOperation* instr);
   void DoMathPowHalf(LUnaryMathOperation* instr);
   void DoMathLog(LUnaryMathOperation* instr);
-  void DoMathTan(LUnaryMathOperation* instr);
   void DoMathCos(LUnaryMathOperation* instr);
   void DoMathSin(LUnaryMathOperation* instr);
 
@@ -254,19 +248,17 @@
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void EmitBranch(int left_block, int right_block, Condition cc);
+  void EmitCmpI(LOperand* left, LOperand* right);
   void EmitNumberUntagD(Register input,
                         XMMRegister result,
                         bool deoptimize_on_undefined,
-                        bool deoptimize_on_minus_zero,
                         LEnvironment* env);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label,
-                         Label* false_label,
-                         Register input,
-                         Handle<String> type_name);
+  Condition EmitTypeofIs(Label* true_label, Label* false_label,
+                         Register input, Handle<String> type_name);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
@@ -275,13 +267,6 @@
                          Label* is_not_object,
                          Label* is_object);
 
-  // Emits optimized code for %_IsString(x).  Preserves input register.
-  // Returns the condition on which a final split to
-  // true and false label should be made, to optimize fallthrough.
-  Condition EmitIsString(Register input,
-                         Register temp1,
-                         Label* is_not_string);
-
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
@@ -295,13 +280,6 @@
   // register, or a stack slot operand.
   void EmitPushTaggedOperand(LOperand* operand);
 
-  // Emits optimized code to deep-copy the contents of statically known
-  // object graphs (e.g. object literal boilerplate).
-  void EmitDeepCopy(Handle<JSObject> object,
-                    Register result,
-                    Register source,
-                    int* offset);
-
   struct JumpTableEntry {
     explicit inline JumpTableEntry(Address entry)
         : label(),
@@ -368,20 +346,16 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen),
-        external_exit_(NULL),
-        instruction_index_(codegen->current_instruction_) {
+      : codegen_(codegen), external_exit_(NULL) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
-  virtual LInstruction* instr() = 0;
 
-  void SetExit(Label* exit) { external_exit_ = exit; }
+  void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
-  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -392,7 +366,6 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
-  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index 877ea8c..c3c617c 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -198,18 +198,16 @@
       if (cgen_->IsInteger32Constant(constant_source)) {
         __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
       } else {
-        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+        __ Move(dst, cgen_->ToHandle(constant_source));
       }
     } else {
       ASSERT(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       if (cgen_->IsInteger32Constant(constant_source)) {
-        // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
-        // value.
-        __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
+        // Allow top 32 bits of an untagged Integer32 to be arbitrary.
+        __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
       } else {
-        __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source));
-        __ movq(dst, kScratchRegister);
+        __ Move(dst, cgen_->ToHandle(constant_source));
       }
     }
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index d3e4cdd..5fc5646 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -214,11 +214,10 @@
 }
 
 
-void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(kind() == kStrictEquality ? " === " : " == ");
-  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  stream->Add(is_strict() ? " === null" : " == null");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -230,13 +229,6 @@
 }
 
 
-void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if is_string(");
-  InputAt(0)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -251,14 +243,6 @@
 }
 
 
-void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("if string_compare(");
-  InputAt(0)->PrintTo(stream);
-  InputAt(1)->PrintTo(stream);
-  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -382,7 +366,7 @@
 
 
 void LChunk::MarkEmptyBlocks() {
-  HPhase phase("L_Mark empty blocks", this);
+  HPhase phase("Mark empty blocks", this);
   for (int i = 0; i < graph()->blocks()->length(); ++i) {
     HBasicBlock* block = graph()->blocks()->at(i);
     int first = block->first_instruction_index();
@@ -462,14 +446,8 @@
 }
 
 
-void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
-  object()->PrintTo(stream);
-  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
-}
-
-
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+  LInstructionGap* gap = new LInstructionGap(block);
   int index = -1;
   if (instr->IsControl()) {
     instructions_.Add(gap);
@@ -544,8 +522,8 @@
 
 LChunk* LChunkBuilder::Build() {
   ASSERT(is_unused());
-  chunk_ = new(zone()) LChunk(info(), graph());
-  HPhase phase("L_Building chunk", chunk_);
+  chunk_ = new LChunk(info(), graph());
+  HPhase phase("Building chunk", chunk_);
   status_ = BUILDING;
   const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
   for (int i = 0; i < blocks->length(); i++) {
@@ -574,15 +552,20 @@
 }
 
 
+LRegister* LChunkBuilder::ToOperand(Register reg) {
+  return LRegister::Create(Register::ToAllocationIndex(reg));
+}
+
+
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
-                                  Register::ToAllocationIndex(reg));
+  return new LUnallocated(LUnallocated::FIXED_REGISTER,
+                          Register::ToAllocationIndex(reg));
 }
 
 
 LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
-  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
-                                  XMMRegister::ToAllocationIndex(reg));
+  return new LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                          XMMRegister::ToAllocationIndex(reg));
 }
 
 
@@ -597,29 +580,29 @@
 
 
 LOperand* LChunkBuilder::UseRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Use(value, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
   return Use(value,
-             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+             new LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
                               LUnallocated::USED_AT_START));
 }
 
 
 LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+  return Use(value, new LUnallocated(LUnallocated::WRITABLE_REGISTER));
 }
 
 
 LOperand* LChunkBuilder::Use(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+  return Use(value, new LUnallocated(LUnallocated::NONE));
 }
 
 
 LOperand* LChunkBuilder::UseAtStart(HValue* value) {
-  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+  return Use(value, new LUnallocated(LUnallocated::NONE,
                                      LUnallocated::USED_AT_START));
 }
 
@@ -655,7 +638,7 @@
 LOperand* LChunkBuilder::UseAny(HValue* value) {
   return value->IsConstant()
       ? chunk_->DefineConstantOperand(HConstant::cast(value))
-      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+      :  Use(value, new LUnallocated(LUnallocated::ANY));
 }
 
 
@@ -664,7 +647,7 @@
     HInstruction* instr = HInstruction::cast(value);
     VisitInstruction(instr);
   }
-  operand->set_virtual_register(value->id());
+  allocator_->RecordUse(value, operand);
   return operand;
 }
 
@@ -672,17 +655,22 @@
 template<int I, int T>
 LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
                                     LUnallocated* result) {
-  result->set_virtual_register(current_instruction_->id());
+  allocator_->RecordDefinition(current_instruction_, result);
   instr->set_result(result);
   return instr;
 }
 
 
 template<int I, int T>
+LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
+  return Define(instr, new LUnallocated(LUnallocated::NONE));
+}
+
+
+template<int I, int T>
 LInstruction* LChunkBuilder::DefineAsRegister(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+  return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
 }
 
 
@@ -690,16 +678,14 @@
 LInstruction* LChunkBuilder::DefineAsSpilled(
     LTemplateInstruction<1, I, T>* instr,
     int index) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+  return Define(instr, new LUnallocated(LUnallocated::FIXED_SLOT, index));
 }
 
 
 template<int I, int T>
 LInstruction* LChunkBuilder::DefineSameAsFirst(
     LTemplateInstruction<1, I, T>* instr) {
-  return Define(instr,
-                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+  return Define(instr, new LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
 }
 
 
@@ -720,9 +706,7 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  int argument_index_accumulator = 0;
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator));
+  instr->set_environment(CreateEnvironment(hydrogen_env));
   return instr;
 }
 
@@ -752,7 +736,7 @@
   instr->MarkAsCall();
   instr = AssignPointerMap(instr);
 
-  if (hinstr->HasObservableSideEffects()) {
+  if (hinstr->HasSideEffects()) {
     ASSERT(hinstr->next()->IsSimulate());
     HSimulate* sim = HSimulate::cast(hinstr->next());
     instr = SetInstructionPendingDeoptimizationEnvironment(
@@ -764,8 +748,7 @@
   // Thus we still need to attach environment to this call even if
   // call sequence can not deoptimize eagerly.
   bool needs_environment =
-      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
-      !hinstr->HasObservableSideEffects();
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) || !hinstr->HasSideEffects();
   if (needs_environment && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
@@ -782,46 +765,66 @@
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
   ASSERT(!instr->HasPointerMap());
-  instr->set_pointer_map(new(zone()) LPointerMap(position_));
+  instr->set_pointer_map(new LPointerMap(position_));
   return instr;
 }
 
 
 LUnallocated* LChunkBuilder::TempRegister() {
-  LUnallocated* operand =
-      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
-  operand->set_virtual_register(allocator_->GetVirtualRegister());
-  if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
+  LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  allocator_->RecordTemporary(operand);
   return operand;
 }
 
 
 LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
-  return new(zone()) LLabel(instr->block());
+  return new LLabel(instr->block());
 }
 
 
 LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
+  return AssignEnvironment(new LDeoptimize);
 }
 
 
 LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
-  return AssignEnvironment(new(zone()) LDeoptimize);
+  return AssignEnvironment(new LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoBit(Token::Value op,
+                                   HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsInteger32()) {
+    ASSERT(instr->left()->representation().IsInteger32());
+    ASSERT(instr->right()->representation().IsInteger32());
+
+    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+    return DefineSameAsFirst(new LBitI(op, left, right));
+  } else {
+    ASSERT(instr->representation().IsTagged());
+    ASSERT(instr->left()->representation().IsTagged());
+    ASSERT(instr->right()->representation().IsTagged());
+
+    LOperand* left = UseFixed(instr->left(), rdx);
+    LOperand* right = UseFixed(instr->right(), rax);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
+    return MarkAsCall(DefineFixed(result, rax), instr);
+  }
 }
 
 
@@ -833,7 +836,7 @@
 
     LOperand* left = UseFixed(instr->left(), rdx);
     LOperand* right = UseFixed(instr->right(), rax);
-    LArithmeticT* result = new(zone()) LArithmeticT(op, left, right);
+    LArithmeticT* result = new LArithmeticT(op, left, right);
     return MarkAsCall(DefineFixed(result, rax), instr);
   }
 
@@ -867,7 +870,7 @@
   }
 
   LInstruction* result =
-      DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+      DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
   return does_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -880,7 +883,7 @@
   ASSERT(op != Token::MOD);
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+  LArithmeticD* result = new LArithmeticD(op, left, right);
   return DefineSameAsFirst(result);
 }
 
@@ -898,8 +901,7 @@
   ASSERT(right->representation().IsTagged());
   LOperand* left_operand = UseFixed(left, rdx);
   LOperand* right_operand = UseFixed(right, rax);
-  LArithmeticT* result =
-      new(zone()) LArithmeticT(op, left_operand, right_operand);
+  LArithmeticT* result = new LArithmeticT(op, left_operand, right_operand);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -987,26 +989,20 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(
-    HEnvironment* hydrogen_env,
-    int* argument_index_accumulator) {
+LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer =
-      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
+  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
   int ast_id = hydrogen_env->ast_id();
-  ASSERT(ast_id != AstNode::kNoNumber ||
-         hydrogen_env->frame_type() != JS_FUNCTION);
+  ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
-  LEnvironment* result = new(zone()) LEnvironment(
-      hydrogen_env->closure(),
-      hydrogen_env->frame_type(),
-      ast_id,
-      hydrogen_env->parameter_count(),
-      argument_count_,
-      value_count,
-      outer);
-  int argument_index = *argument_index_accumulator;
+  LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+                                          ast_id,
+                                          hydrogen_env->parameter_count(),
+                                          argument_count_,
+                                          value_count,
+                                          outer);
+  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1015,70 +1011,57 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new(zone()) LArgument(argument_index++);
+      op = new LArgument(argument_index++);
     } else {
       op = UseAny(value);
     }
     result->AddValue(op, value->representation());
   }
 
-  if (hydrogen_env->frame_type() == JS_FUNCTION) {
-    *argument_index_accumulator = argument_index;
-  }
-
   return result;
 }
 
 
 LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
-  return new(zone()) LGoto(instr->FirstSuccessor()->block_id());
+  return new LGoto(instr->FirstSuccessor()->block_id());
 }
 
 
 LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
-  HValue* value = instr->value();
-  if (value->EmitAtUses()) {
-    ASSERT(value->IsConstant());
-    ASSERT(!value->representation().IsDouble());
-    HBasicBlock* successor = HConstant::cast(value)->ToBoolean()
+  HValue* v = instr->value();
+  if (v->EmitAtUses()) {
+    ASSERT(v->IsConstant());
+    ASSERT(!v->representation().IsDouble());
+    HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
         ? instr->FirstSuccessor()
         : instr->SecondSuccessor();
-    return new(zone()) LGoto(successor->block_id());
+    return new LGoto(successor->block_id());
   }
-
-  LBranch* result = new(zone()) LBranch(UseRegister(value));
-  // Tagged values that are not known smis or booleans require a
-  // deoptimization environment.
-  Representation rep = value->representation();
-  HType type = value->type();
-  if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean()) {
-    return AssignEnvironment(result);
-  }
-  return result;
+  return AssignEnvironment(new LBranch(UseRegister(v)));
 }
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LCmpMapAndBranch(value);
+  return new LCmpMapAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
-  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
+  return DefineAsRegister(new LArgumentsLength(Use(length->value())));
 }
 
 
 LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
-  return DefineAsRegister(new(zone()) LArgumentsElements);
+  return DefineAsRegister(new LArgumentsElements);
 }
 
 
 LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
   LOperand* left = UseFixed(instr->left(), rax);
   LOperand* right = UseFixed(instr->right(), rdx);
-  LInstanceOf* result = new(zone()) LInstanceOf(left, right);
+  LInstanceOf* result = new LInstanceOf(left, right);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1086,26 +1069,18 @@
 LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
     HInstanceOfKnownGlobal* instr) {
   LInstanceOfKnownGlobal* result =
-      new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
-                                         FixedTemp(rdi));
+      new LInstanceOfKnownGlobal(UseFixed(instr->left(), rax),
+                                 FixedTemp(rdi));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
-LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
-  LOperand* receiver = UseRegister(instr->receiver());
-  LOperand* function = UseRegisterAtStart(instr->function());
-  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
-  return AssignEnvironment(DefineSameAsFirst(result));
-}
-
-
 LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
   LOperand* function = UseFixed(instr->function(), rdi);
   LOperand* receiver = UseFixed(instr->receiver(), rax);
   LOperand* length = UseFixed(instr->length(), rbx);
   LOperand* elements = UseFixed(instr->elements(), rcx);
-  LApplyArguments* result = new(zone()) LApplyArguments(function,
+  LApplyArguments* result = new LApplyArguments(function,
                                                 receiver,
                                                 length,
                                                 elements);
@@ -1116,68 +1091,61 @@
 LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) {
   ++argument_count_;
   LOperand* argument = UseOrConstant(instr->argument());
-  return new(zone()) LPushArgument(argument);
+  return new LPushArgument(argument);
 }
 
 
 LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
-  return instr->HasNoUses()
-      ? NULL
-      : DefineAsRegister(new(zone()) LThisFunction);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
 }
 
 
 LInstruction* LChunkBuilder::DoContext(HContext* instr) {
-  return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+  return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
 }
 
 
 LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LOuterContext(context));
-}
-
-
-LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
-  return MarkAsCall(new(zone()) LDeclareGlobals, instr);
+  return DefineAsRegister(new LOuterContext(context));
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
-  return DefineAsRegister(new(zone()) LGlobalObject);
+  return DefineAsRegister(new LGlobalObject);
 }
 
 
 LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) {
   LOperand* global_object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LGlobalReceiver(global_object));
+  return DefineAsRegister(new LGlobalReceiver(global_object));
 }
 
 
 LInstruction* LChunkBuilder::DoCallConstantFunction(
     HCallConstantFunction* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr);
+  return MarkAsCall(DefineFixed(new LCallConstantFunction, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* function = UseFixed(instr->function(), rdi);
   argument_count_ -= instr->argument_count();
-  LInvokeFunction* result = new(zone()) LInvokeFunction(function);
+  LInvokeFunction* result = new LInvokeFunction(function);
   return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
 }
 
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   BuiltinFunctionId op = instr->op();
-  if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
+  if (op == kMathLog || op == kMathSin || op == kMathCos) {
     LOperand* input = UseFixedDouble(instr->value(), xmm1);
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   } else {
     LOperand* input = UseRegisterAtStart(instr->value());
-    LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
+    LUnaryMathOperation* result = new LUnaryMathOperation(input);
     switch (op) {
       case kMathAbs:
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
@@ -1201,48 +1169,47 @@
   ASSERT(instr->key()->representation().IsTagged());
   LOperand* key = UseFixed(instr->key(), rcx);
   argument_count_ -= instr->argument_count();
-  LCallKeyed* result = new(zone()) LCallKeyed(key);
+  LCallKeyed* result = new LCallKeyed(key);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr);
+  return MarkAsCall(DefineFixed(new LCallNamed, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr);
+  return MarkAsCall(DefineFixed(new LCallGlobal, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr);
+  return MarkAsCall(DefineFixed(new LCallKnownGlobal, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
   LOperand* constructor = UseFixed(instr->constructor(), rdi);
   argument_count_ -= instr->argument_count();
-  LCallNew* result = new(zone()) LCallNew(constructor);
+  LCallNew* result = new LCallNew(constructor);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
-  LOperand* function = UseFixed(instr->function(), rdi);
   argument_count_ -= instr->argument_count();
-  LCallFunction* result = new(zone()) LCallFunction(function);
+  LCallFunction* result = new LCallFunction();
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr);
+  return MarkAsCall(DefineFixed(new LCallRuntime, rax), instr);
 }
 
 
@@ -1261,24 +1228,8 @@
 }
 
 
-LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
-  if (instr->representation().IsInteger32()) {
-    ASSERT(instr->left()->representation().IsInteger32());
-    ASSERT(instr->right()->representation().IsInteger32());
-
-    LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
-    LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    return DefineSameAsFirst(new(zone()) LBitI(left, right));
-  } else {
-    ASSERT(instr->representation().IsTagged());
-    ASSERT(instr->left()->representation().IsTagged());
-    ASSERT(instr->right()->representation().IsTagged());
-
-    LOperand* left = UseFixed(instr->left(), rdx);
-    LOperand* right = UseFixed(instr->right(), rax);
-    LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right);
-    return MarkAsCall(DefineFixed(result, rax), instr);
-  }
+LInstruction* LChunkBuilder::DoBitAnd(HBitAnd* instr) {
+  return DoBit(Token::BIT_AND, instr);
 }
 
 
@@ -1286,11 +1237,21 @@
   ASSERT(instr->value()->representation().IsInteger32());
   ASSERT(instr->representation().IsInteger32());
   LOperand* input = UseRegisterAtStart(instr->value());
-  LBitNotI* result = new(zone()) LBitNotI(input);
+  LBitNotI* result = new LBitNotI(input);
   return DefineSameAsFirst(result);
 }
 
 
+LInstruction* LChunkBuilder::DoBitOr(HBitOr* instr) {
+  return DoBit(Token::BIT_OR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitXor(HBitXor* instr) {
+  return DoBit(Token::BIT_XOR, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
@@ -1300,7 +1261,7 @@
     LOperand* temp = FixedTemp(rdx);
     LOperand* dividend = UseFixed(instr->left(), rax);
     LOperand* divisor = UseRegister(instr->right());
-    LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
+    LDivI* result = new LDivI(dividend, divisor, temp);
     return AssignEnvironment(DefineFixed(result, rax));
   } else {
     ASSERT(instr->representation().IsTagged());
@@ -1318,8 +1279,7 @@
     if (instr->HasPowerOf2Divisor()) {
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
-      LModI* mod =
-          new(zone()) LModI(value, UseOrConstant(instr->right()), NULL);
+      LModI* mod = new LModI(value, UseOrConstant(instr->right()), NULL);
       result = DefineSameAsFirst(mod);
     } else {
       // The temporary operand is necessary to ensure that right is not
@@ -1327,7 +1287,7 @@
       LOperand* temp = FixedTemp(rdx);
       LOperand* value = UseFixed(instr->left(), rax);
       LOperand* divisor = UseRegister(instr->right());
-      LModI* mod = new(zone()) LModI(value, divisor, temp);
+      LModI* mod = new LModI(value, divisor, temp);
       result = DefineFixed(mod, rdx);
     }
 
@@ -1344,7 +1304,7 @@
     // TODO(fschneider): Allow any register as input registers.
     LOperand* left = UseFixedDouble(instr->left(), xmm2);
     LOperand* right = UseFixedDouble(instr->right(), xmm1);
-    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+    LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
     return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
   }
 }
@@ -1356,12 +1316,8 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstant(instr->MostConstantOperand());
-    LMulI* mul = new(zone()) LMulI(left, right);
-    if (instr->CheckFlag(HValue::kCanOverflow) ||
-        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      AssignEnvironment(mul);
-    }
-    return DefineSameAsFirst(mul);
+    LMulI* mul = new LMulI(left, right);
+    return AssignEnvironment(DefineSameAsFirst(mul));
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MUL, instr);
   } else {
@@ -1377,7 +1333,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    LSubI* sub = new(zone()) LSubI(left, right);
+    LSubI* sub = new LSubI(left, right);
     LInstruction* result = DefineSameAsFirst(sub);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1398,7 +1354,7 @@
     ASSERT(instr->right()->representation().IsInteger32());
     LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
     LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
-    LAddI* add = new(zone()) LAddI(left, right);
+    LAddI* add = new LAddI(left, right);
     LInstruction* result = DefineSameAsFirst(add);
     if (instr->CheckFlag(HValue::kCanOverflow)) {
       result = AssignEnvironment(result);
@@ -1428,31 +1384,20 @@
 #else
       UseFixed(instr->right(), rdi);
 #endif
-  LPower* result = new(zone()) LPower(left, right);
-  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
+  LPower* result = new LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
 }
 
 
-LInstruction* LChunkBuilder::DoRandom(HRandom* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->global_object()->representation().IsTagged());
-#ifdef _WIN64
-  LOperand* global_object = UseFixed(instr->global_object(), rcx);
-#else
-  LOperand* global_object = UseFixed(instr->global_object(), rdi);
-#endif
-  LRandom* result = new(zone()) LRandom(global_object);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  Token::Value op = instr->token();
   ASSERT(instr->left()->representation().IsTagged());
   ASSERT(instr->right()->representation().IsTagged());
-  LOperand* left = UseFixed(instr->left(), rdx);
-  LOperand* right = UseFixed(instr->right(), rax);
-  LCmpT* result = new(zone()) LCmpT(left, right);
+  bool reversed = (op == Token::GT || op == Token::LTE);
+  LOperand* left = UseFixed(instr->left(), reversed ? rax : rdx);
+  LOperand* right = UseFixed(instr->right(), reversed ? rdx : rax);
+  LCmpT* result = new LCmpT(left, right);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1463,23 +1408,16 @@
   if (r.IsInteger32()) {
     ASSERT(instr->left()->representation().IsInteger32());
     ASSERT(instr->right()->representation().IsInteger32());
-    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
-    return new(zone()) LCmpIDAndBranch(left, right);
+    return new LCmpIDAndBranch(left, right);
   } else {
     ASSERT(r.IsDouble());
     ASSERT(instr->left()->representation().IsDouble());
     ASSERT(instr->right()->representation().IsDouble());
-    LOperand* left;
-    LOperand* right;
-    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
-      left = UseRegisterOrConstantAtStart(instr->left());
-      right = UseRegisterOrConstantAtStart(instr->right());
-    } else {
-      left = UseRegisterAtStart(instr->left());
-      right = UseRegisterAtStart(instr->right());
-    }
-    return new(zone()) LCmpIDAndBranch(left, right);
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new LCmpIDAndBranch(left, right);
   }
 }
 
@@ -1488,72 +1426,47 @@
     HCompareObjectEqAndBranch* instr) {
   LOperand* left = UseRegisterAtStart(instr->left());
   LOperand* right = UseRegisterAtStart(instr->right());
-  return new(zone()) LCmpObjectEqAndBranch(left, right);
+  return new LCmpObjectEqAndBranch(left, right);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
     HCompareConstantEqAndBranch* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LCmpConstantEqAndBranch(value);
+  return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
-LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
-  return new(zone()) LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
+  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
-LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  return new(zone()) LIsStringAndBranch(value, temp);
+  return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+  return new LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  return new(zone()) LIsUndetectableAndBranch(value, temp);
-}
-
-
-LInstruction* LChunkBuilder::DoStringCompareAndBranch(
-    HStringCompareAndBranch* instr) {
-
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
-  LOperand* left = UseFixed(instr->left(), rdx);
-  LOperand* right = UseFixed(instr->right(), rax);
-  LStringCompareAndBranch* result =
-      new(zone()) LStringCompareAndBranch(left, right);
-
-  return MarkAsCall(result, instr);
+  return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+                                      TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LHasInstanceTypeAndBranch(value);
+  return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
@@ -1562,64 +1475,54 @@
   ASSERT(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
-  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+  return DefineAsRegister(new LGetCachedArrayIndex(value));
 }
 
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* value = UseRegisterAtStart(instr->value());
-  return new(zone()) LHasCachedArrayIndexAndBranch(value);
+  return new LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  return new(zone()) LClassOfTestAndBranch(value,
-                                           TempRegister(),
-                                           TempRegister());
+  return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister());
 }
 
 
 LInstruction* LChunkBuilder::DoJSArrayLength(HJSArrayLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LJSArrayLength(array));
+  return DefineAsRegister(new LJSArrayLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoFixedArrayBaseLength(
     HFixedArrayBaseLength* instr) {
   LOperand* array = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LFixedArrayBaseLength(array));
+  return DefineAsRegister(new LFixedArrayBaseLength(array));
 }
 
 
 LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
   LOperand* object = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LElementsKind(object));
+  return DefineAsRegister(new LElementsKind(object));
 }
 
 
 LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
   LOperand* object = UseRegister(instr->value());
-  LValueOf* result = new(zone()) LValueOf(object);
-  return DefineSameAsFirst(result);
-}
-
-
-LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
-  LOperand* object = UseFixed(instr->value(), rax);
-  LDateField* result = new LDateField(object, instr->index());
-  return MarkAsCall(DefineFixed(result, rax), instr);
+  LValueOf* result = new LValueOf(object);
+  return AssignEnvironment(DefineSameAsFirst(result));
 }
 
 
 LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
-  LOperand* value = UseRegisterOrConstantAtStart(instr->index());
-  LOperand* length = Use(instr->length());
-  return AssignEnvironment(new(zone()) LBoundsCheck(value, length));
+  return AssignEnvironment(new LBoundsCheck(
+      UseRegisterOrConstantAtStart(instr->index()),
+      Use(instr->length())));
 }
 
 
@@ -1632,7 +1535,7 @@
 
 LInstruction* LChunkBuilder::DoThrow(HThrow* instr) {
   LOperand* value = UseFixed(instr->value(), rax);
-  return MarkAsCall(new(zone()) LThrow(value), instr);
+  return MarkAsCall(new LThrow(value), instr);
 }
 
 
@@ -1655,7 +1558,7 @@
   if (from.IsTagged()) {
     if (to.IsDouble()) {
       LOperand* value = UseRegister(instr->value());
-      LNumberUntagD* res = new(zone()) LNumberUntagD(value);
+      LNumberUntagD* res = new LNumberUntagD(value);
       return AssignEnvironment(DefineAsRegister(res));
     } else {
       ASSERT(to.IsInteger32());
@@ -1664,10 +1567,10 @@
       if (needs_check) {
         bool truncating = instr->CanTruncateToInt32();
         LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
-        LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
+        LTaggedToI* res = new LTaggedToI(value, xmm_temp);
         return AssignEnvironment(DefineSameAsFirst(res));
       } else {
-        return DefineSameAsFirst(new(zone()) LSmiUntag(value, needs_check));
+        return DefineSameAsFirst(new LSmiUntag(value, needs_check));
       }
     }
   } else if (from.IsDouble()) {
@@ -1677,27 +1580,26 @@
 
       // Make sure that temp and result_temp are different registers.
       LUnallocated* result_temp = TempRegister();
-      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
+      LNumberTagD* result = new LNumberTagD(value, temp);
       return AssignPointerMap(Define(result, result_temp));
     } else {
       ASSERT(to.IsInteger32());
       LOperand* value = UseRegister(instr->value());
-      return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
+      return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
     }
   } else if (from.IsInteger32()) {
     if (to.IsTagged()) {
       HValue* val = instr->value();
       LOperand* value = UseRegister(val);
       if (val->HasRange() && val->range()->IsInSmiRange()) {
-        return DefineSameAsFirst(new(zone()) LSmiTag(value));
+        return DefineSameAsFirst(new LSmiTag(value));
       } else {
-        LNumberTagI* result = new(zone()) LNumberTagI(value);
+        LNumberTagI* result = new LNumberTagI(value);
         return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
       }
     } else {
       ASSERT(to.IsDouble());
-      LOperand* value = Use(instr->value());
-      return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+      return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
     }
   }
   UNREACHABLE();
@@ -1707,39 +1609,39 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckNonSmi(value));
+  return AssignEnvironment(new LCheckNonSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
+  LCheckInstanceType* result = new LCheckInstanceType(value);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
   LOperand* temp = TempRegister();
-  LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
+  LCheckPrototypeMaps* result = new LCheckPrototypeMaps(temp);
   return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckSmi(value));
+  return AssignEnvironment(new LCheckSmi(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new(zone()) LCheckFunction(value));
+  return AssignEnvironment(new LCheckFunction(value));
 }
 
 
 LInstruction* LChunkBuilder::DoCheckMap(HCheckMap* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  LCheckMap* result = new(zone()) LCheckMap(value);
+  LCheckMap* result = new LCheckMap(value);
   return AssignEnvironment(result);
 }
 
@@ -1749,36 +1651,62 @@
   Representation input_rep = value->representation();
   LOperand* reg = UseRegister(value);
   if (input_rep.IsDouble()) {
-    return DefineAsRegister(new(zone()) LClampDToUint8(reg,
+    return DefineAsRegister(new LClampDToUint8(reg,
                                                TempRegister()));
   } else if (input_rep.IsInteger32()) {
-    return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
+    return DefineSameAsFirst(new LClampIToUint8(reg));
   } else {
     ASSERT(input_rep.IsTagged());
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
-    LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
-                                                        TempRegister(),
-                                                        FixedTemp(xmm1));
+    LClampTToUint8* result = new LClampTToUint8(reg,
+                                                TempRegister(),
+                                                FixedTemp(xmm1));
     return AssignEnvironment(DefineSameAsFirst(result));
   }
 }
 
 
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return AssignEnvironment(DefineAsRegister(new LDoubleToI(reg)));
+  } else if (input_rep.IsInteger32()) {
+    // Canonicalization should already have removed the hydrogen instruction in
+    // this case, since it is a noop.
+    UNREACHABLE();
+    return NULL;
+  } else {
+    ASSERT(input_rep.IsTagged());
+    LOperand* reg = UseRegister(value);
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LOperand* xmm_temp =
+        CpuFeatures::IsSupported(SSE3)
+        ? NULL
+        : FixedTemp(xmm1);
+    return AssignEnvironment(
+        DefineSameAsFirst(new LTaggedToI(reg, xmm_temp)));
+  }
+}
+
+
 LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
-  return new(zone()) LReturn(UseFixed(instr->value(), rax));
+  return new LReturn(UseFixed(instr->value(), rax));
 }
 
 
 LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
   Representation r = instr->representation();
   if (r.IsInteger32()) {
-    return DefineAsRegister(new(zone()) LConstantI);
+    return DefineAsRegister(new LConstantI);
   } else if (r.IsDouble()) {
     LOperand* temp = TempRegister();
-    return DefineAsRegister(new(zone()) LConstantD(temp));
+    return DefineAsRegister(new LConstantD(temp));
   } else if (r.IsTagged()) {
-    return DefineAsRegister(new(zone()) LConstantT);
+    return DefineAsRegister(new LConstantT);
   } else {
     UNREACHABLE();
     return NULL;
@@ -1787,8 +1715,8 @@
 
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
-  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
-  return instr->RequiresHoleCheck()
+  LLoadGlobalCell* result = new LLoadGlobalCell;
+  return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1796,35 +1724,29 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object = UseFixed(instr->global_object(), rax);
-  LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  LOperand* value = UseRegister(instr->value());
-  // Use a temp to avoid reloading the cell value address in the case where
-  // we perform a hole check.
-  return instr->RequiresHoleCheck()
-      ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
-      : new(zone()) LStoreGlobalCell(value, NULL);
+  LStoreGlobalCell* result =
+      new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
+  return instr->check_hole_value() ? AssignEnvironment(result) : result;
 }
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
   LOperand* global_object = UseFixed(instr->global_object(), rdx);
   LOperand* value = UseFixed(instr->value(), rax);
-  LStoreGlobalGeneric* result =  new(zone()) LStoreGlobalGeneric(global_object,
-                                                                 value);
+  LStoreGlobalGeneric* result =  new LStoreGlobalGeneric(global_object, value);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
   LOperand* context = UseRegisterAtStart(instr->value());
-  LInstruction* result =
-      DefineAsRegister(new(zone()) LLoadContextSlot(context));
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+  return DefineAsRegister(new LLoadContextSlot(context));
 }
 
 
@@ -1841,15 +1763,14 @@
     value = UseRegister(instr->value());
     temp = NULL;
   }
-  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
-  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+  return new LStoreContextSlot(context, value, temp);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
   ASSERT(instr->representation().IsTagged());
   LOperand* obj = UseRegisterAtStart(instr->object());
-  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+  return DefineAsRegister(new LLoadNamedField(obj));
 }
 
 
@@ -1858,13 +1779,11 @@
   ASSERT(instr->representation().IsTagged());
   if (instr->need_generic()) {
     LOperand* obj = UseFixed(instr->object(), rax);
-    LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
     return MarkAsCall(DefineFixed(result, rax), instr);
   } else {
     LOperand* obj = UseRegisterAtStart(instr->object());
-    LLoadNamedFieldPolymorphic* result =
-        new(zone()) LLoadNamedFieldPolymorphic(obj);
+    LLoadNamedFieldPolymorphic* result = new LLoadNamedFieldPolymorphic(obj);
     return AssignEnvironment(DefineAsRegister(result));
   }
 }
@@ -1872,7 +1791,7 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object = UseFixed(instr->object(), rax);
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(object);
+  LLoadNamedGeneric* result = new LLoadNamedGeneric(object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1880,20 +1799,20 @@
 LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
     HLoadFunctionPrototype* instr) {
   return AssignEnvironment(DefineAsRegister(
-      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+      new LLoadFunctionPrototype(UseRegister(instr->function()))));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadElements(HLoadElements* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadElements(input));
+  return DefineAsRegister(new LLoadElements(input));
 }
 
 
 LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
     HLoadExternalArrayPointer* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LLoadExternalArrayPointer(input));
+  return DefineAsRegister(new LLoadExternalArrayPointer(input));
 }
 
 
@@ -1903,9 +1822,8 @@
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* obj = UseRegisterAtStart(instr->object());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
-  LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
-  if (instr->RequiresHoleCheck()) AssignEnvironment(result);
-  return DefineAsRegister(result);
+  LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
+  return AssignEnvironment(DefineAsRegister(result));
 }
 
 
@@ -1916,7 +1834,7 @@
   LOperand* elements = UseRegisterAtStart(instr->elements());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LLoadKeyedFastDoubleElement* result =
-      new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+      new LLoadKeyedFastDoubleElement(elements, key);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
@@ -1924,18 +1842,19 @@
 LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
     HLoadKeyedSpecializedArrayElement* instr) {
   ElementsKind elements_kind = instr->elements_kind();
+  Representation representation(instr->representation());
   ASSERT(
-      (instr->representation().IsInteger32() &&
+      (representation.IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (instr->representation().IsDouble() &&
+      (representation.IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->key()->representation().IsInteger32());
   LOperand* external_pointer = UseRegister(instr->external_pointer());
   LOperand* key = UseRegisterOrConstant(instr->key());
   LLoadKeyedSpecializedArrayElement* result =
-      new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+      new LLoadKeyedSpecializedArrayElement(external_pointer, key);
   LInstruction* load_instr = DefineAsRegister(result);
   // An unsigned int array load might overflow and cause a deopt, make sure it
   // has an environment.
@@ -1948,7 +1867,7 @@
   LOperand* object = UseFixed(instr->object(), rdx);
   LOperand* key = UseFixed(instr->key(), rax);
 
-  LLoadKeyedGeneric* result = new(zone()) LLoadKeyedGeneric(object, key);
+  LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -1967,7 +1886,8 @@
   LOperand* key = needs_write_barrier
       ? UseTempRegister(instr->key())
       : UseRegisterOrConstantAtStart(instr->key());
-  return new(zone()) LStoreKeyedFastElement(obj, key, val);
+
+  return AssignEnvironment(new LStoreKeyedFastElement(obj, key, val));
 }
 
 
@@ -1981,18 +1901,19 @@
   LOperand* val = UseTempRegister(instr->value());
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
 
-  return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+  return new LStoreKeyedFastDoubleElement(elements, key, val);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
     HStoreKeyedSpecializedArrayElement* instr) {
+  Representation representation(instr->value()->representation());
   ElementsKind elements_kind = instr->elements_kind();
   ASSERT(
-      (instr->value()->representation().IsInteger32() &&
+      (representation.IsInteger32() &&
        (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
        (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
-      (instr->value()->representation().IsDouble() &&
+      (representation.IsDouble() &&
        ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
        (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
   ASSERT(instr->external_pointer()->representation().IsExternal());
@@ -2007,9 +1928,9 @@
       : UseRegister(instr->value());
   LOperand* key = UseRegisterOrConstant(instr->key());
 
-  return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
-                                                        key,
-                                                        val);
+  return new LStoreKeyedSpecializedArrayElement(external_pointer,
+                                                key,
+                                                val);
 }
 
 
@@ -2022,35 +1943,11 @@
   ASSERT(instr->key()->representation().IsTagged());
   ASSERT(instr->value()->representation().IsTagged());
 
-  LStoreKeyedGeneric* result =
-      new(zone()) LStoreKeyedGeneric(object, key, value);
+  LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value);
   return MarkAsCall(result, instr);
 }
 
 
-LInstruction* LChunkBuilder::DoTransitionElementsKind(
-    HTransitionElementsKind* instr) {
-  if (instr->original_map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS &&
-      instr->transitioned_map()->elements_kind() == FAST_ELEMENTS) {
-    LOperand* object = UseRegister(instr->object());
-    LOperand* new_map_reg = TempRegister();
-    LOperand* temp_reg = TempRegister();
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
-    return DefineSameAsFirst(result);
-  } else {
-    LOperand* object = UseFixed(instr->object(), rax);
-    LOperand* fixed_object_reg = FixedTemp(rdx);
-    LOperand* new_map_reg = FixedTemp(rbx);
-    LTransitionElementsKind* result =
-        new(zone()) LTransitionElementsKind(object,
-                                            new_map_reg,
-                                            fixed_object_reg);
-    return MarkAsCall(DefineFixed(result, rax), instr);
-  }
-}
-
-
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
@@ -2067,7 +1964,7 @@
   LOperand* temp = (!instr->is_in_object() || needs_write_barrier)
       ? TempRegister() : NULL;
 
-  return new(zone()) LStoreNamedField(obj, val, temp);
+  return new LStoreNamedField(obj, val, temp);
 }
 
 
@@ -2075,7 +1972,7 @@
   LOperand* object = UseFixed(instr->object(), rdx);
   LOperand* value = UseFixed(instr->value(), rax);
 
-  LStoreNamedGeneric* result = new(zone()) LStoreNamedGeneric(object, value);
+  LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value);
   return MarkAsCall(result, instr);
 }
 
@@ -2083,67 +1980,55 @@
 LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
   LOperand* left = UseOrConstantAtStart(instr->left());
   LOperand* right = UseOrConstantAtStart(instr->right());
-  return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), rax),
-                    instr);
+  return MarkAsCall(DefineFixed(new LStringAdd(left, right), rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
   LOperand* string = UseTempRegister(instr->string());
   LOperand* index = UseTempRegister(instr->index());
-  LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index);
+  LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
   return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
 }
 
 
 LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
   LOperand* char_code = UseRegister(instr->value());
-  LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code);
+  LStringCharFromCode* result = new LStringCharFromCode(char_code);
   return AssignPointerMap(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
   LOperand* string = UseRegisterAtStart(instr->value());
-  return DefineAsRegister(new(zone()) LStringLength(string));
-}
-
-
-LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
-  LAllocateObject* result = new LAllocateObject(TempRegister());
-  return AssignPointerMap(DefineAsRegister(result));
-}
-
-
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
+  return DefineAsRegister(new LStringLength(string));
 }
 
 
 LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new LArrayLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LObjectLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new LObjectLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
-  return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, rax), instr);
+  return MarkAsCall(DefineFixed(new LFunctionLiteral, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
-  LOperand* object = UseAtStart(instr->object());
-  LOperand* key = UseOrConstantAtStart(instr->key());
-  LDeleteProperty* result = new(zone()) LDeleteProperty(object, key);
+  LDeleteProperty* result =
+      new LDeleteProperty(UseAtStart(instr->object()),
+                          UseOrConstantAtStart(instr->key()));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2151,13 +2036,13 @@
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
-  return AssignEnvironment(new(zone()) LOsrEntry);
+  return AssignEnvironment(new LOsrEntry);
 }
 
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   int spill_index = chunk()->GetParameterStackSlot(instr->index());
-  return DefineAsSpilled(new(zone()) LParameter, spill_index);
+  return DefineAsSpilled(new LParameter, spill_index);
 }
 
 
@@ -2167,13 +2052,13 @@
     Abort("Too many spill slots needed for OSR");
     spill_index = 0;
   }
-  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+  return DefineAsSpilled(new LUnknownOSRValue, spill_index);
 }
 
 
 LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
   argument_count_ -= instr->argument_count();
-  return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr);
+  return MarkAsCall(DefineFixed(new LCallStub, rax), instr);
 }
 
 
@@ -2190,33 +2075,32 @@
   LOperand* arguments = UseRegister(instr->arguments());
   LOperand* length = UseTempRegister(instr->length());
   LOperand* index = Use(instr->index());
-  LAccessArgumentsAt* result =
-      new(zone()) LAccessArgumentsAt(arguments, length, index);
+  LAccessArgumentsAt* result = new LAccessArgumentsAt(arguments, length, index);
   return AssignEnvironment(DefineAsRegister(result));
 }
 
 
 LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
   LOperand* object = UseFixed(instr->value(), rax);
-  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  LToFastProperties* result = new LToFastProperties(object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
-  LTypeof* result = new(zone()) LTypeof(UseAtStart(instr->value()));
+  LTypeof* result = new LTypeof(UseAtStart(instr->value()));
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
-  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+  return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
     HIsConstructCallAndBranch* instr) {
-  return new(zone()) LIsConstructCallAndBranch(TempRegister());
+  return new LIsConstructCallAndBranch(TempRegister());
 }
 
 
@@ -2239,7 +2123,7 @@
   // If there is an instruction pending deoptimization environment create a
   // lazy bailout instruction to capture the environment.
   if (pending_deoptimization_ast_id_ == instr->ast_id()) {
-    LLazyBailout* lazy_bailout = new(zone()) LLazyBailout;
+    LLazyBailout* lazy_bailout = new LLazyBailout;
     LInstruction* result = AssignEnvironment(lazy_bailout);
     instruction_pending_deoptimization_environment_->
         set_deoptimization_environment(result->environment());
@@ -2253,10 +2137,10 @@
 
 LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
   if (instr->is_function_entry()) {
-    return MarkAsCall(new(zone()) LStackCheck, instr);
+    return MarkAsCall(new LStackCheck, instr);
   } else {
     ASSERT(instr->is_backwards_branch());
-    return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck));
+    return AssignEnvironment(AssignPointerMap(new LStackCheck));
   }
 }
 
@@ -2265,11 +2149,9 @@
   HEnvironment* outer = current_block_->last_environment();
   HConstant* undefined = graph()->GetConstantUndefined();
   HEnvironment* inner = outer->CopyForInlining(instr->closure(),
-                                               instr->arguments_count(),
                                                instr->function(),
                                                undefined,
-                                               instr->call_kind(),
-                                               instr->is_construct());
+                                               instr->call_kind());
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
   return NULL;
@@ -2277,8 +2159,7 @@
 
 
 LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
-  HEnvironment* outer = current_block_->last_environment()->
-      DiscardInlined(false);
+  HEnvironment* outer = current_block_->last_environment()->outer();
   current_block_->UpdateEnvironment(outer);
   return NULL;
 }
@@ -2287,39 +2168,11 @@
 LInstruction* LChunkBuilder::DoIn(HIn* instr) {
   LOperand* key = UseOrConstantAtStart(instr->key());
   LOperand* object = UseOrConstantAtStart(instr->object());
-  LIn* result = new(zone()) LIn(key, object);
+  LIn* result = new LIn(key, object);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
 
-LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
-  LOperand* object = UseFixed(instr->enumerable(), rax);
-  LForInPrepareMap* result = new(zone()) LForInPrepareMap(object);
-  return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
-}
-
-
-LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
-  LOperand* map = UseRegister(instr->map());
-  return AssignEnvironment(DefineAsRegister(
-      new(zone()) LForInCacheArray(map)));
-}
-
-
-LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
-  LOperand* map = UseRegisterAtStart(instr->map());
-  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
-}
-
-
-LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
-  LOperand* object = UseRegister(instr->object());
-  LOperand* index = UseTempRegister(instr->index());
-  return DefineSameAsFirst(new(zone()) LLoadFieldByIndex(object, index));
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 2d8fd2e..d169bf6 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -49,7 +49,6 @@
 #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
   V(AccessArgumentsAt)                          \
   V(AddI)                                       \
-  V(AllocateObject)                             \
   V(ApplyArguments)                             \
   V(ArgumentsElements)                          \
   V(ArgumentsLength)                            \
@@ -88,13 +87,11 @@
   V(ConstantI)                                  \
   V(ConstantT)                                  \
   V(Context)                                    \
-  V(DeclareGlobals)                             \
   V(DeleteProperty)                             \
   V(Deoptimize)                                 \
   V(DivI)                                       \
   V(DoubleToI)                                  \
   V(ElementsKind)                               \
-  V(FastLiteral)                                \
   V(FixedArrayBaseLength)                       \
   V(FunctionLiteral)                            \
   V(GetCachedArrayIndex)                        \
@@ -110,12 +107,10 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNilAndBranch)                             \
+  V(IsNullAndBranch)                            \
   V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
-  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -143,7 +138,6 @@
   V(Parameter)                                  \
   V(Power)                                      \
   V(PushArgument)                               \
-  V(Random)                                     \
   V(RegExpLiteral)                              \
   V(Return)                                     \
   V(ShiftI)                                     \
@@ -168,18 +162,11 @@
   V(ThisFunction)                               \
   V(Throw)                                      \
   V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
   V(Typeof)                                     \
   V(TypeofIsAndBranch)                          \
   V(UnaryMathOperation)                         \
   V(UnknownOSRValue)                            \
-  V(ValueOf)                                    \
-  V(ForInPrepareMap)                            \
-  V(ForInCacheArray)                            \
-  V(CheckMapValue)                              \
-  V(LoadFieldByIndex)                           \
-  V(DateField)                                  \
-  V(WrapReceiver)
+  V(ValueOf)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)              \
@@ -470,20 +457,6 @@
 };
 
 
-class LWrapReceiver: public LTemplateInstruction<1, 2, 0> {
- public:
-  LWrapReceiver(LOperand* receiver, LOperand* function) {
-    inputs_[0] = receiver;
-    inputs_[1] = function;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
-
-  LOperand* receiver() { return inputs_[0]; }
-  LOperand* function() { return inputs_[1]; }
-};
-
-
 class LApplyArguments: public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
@@ -636,18 +609,17 @@
 };
 
 
-class LIsNilAndBranch: public LControlInstruction<1, 1> {
+class LIsNullAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNilAndBranch(LOperand* value, LOperand* temp) {
+  LIsNullAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
 
-  EqualityKind kind() const { return hydrogen()->kind(); }
-  NilValue nil() const { return hydrogen()->nil(); }
+  bool is_strict() const { return hydrogen()->is_strict(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -666,20 +638,6 @@
 };
 
 
-class LIsStringAndBranch: public LControlInstruction<1, 1> {
- public:
-  explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-};
-
-
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -708,23 +666,6 @@
 };
 
 
-class LStringCompareAndBranch: public LControlInstruction<2, 0> {
- public:
-  explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
-                               "string-compare-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  Token::Value op() const { return hydrogen()->token(); }
-};
-
-
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -764,12 +705,11 @@
 };
 
 
-class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
  public:
-  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
-    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -850,15 +790,18 @@
 
 class LBitI: public LTemplateInstruction<1, 2, 0> {
  public:
-  LBitI(LOperand* left, LOperand* right) {
+  LBitI(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
     inputs_[0] = left;
     inputs_[1] = right;
   }
 
-  Token::Value op() const { return hydrogen()->op(); }
+  Token::Value op() const { return op_; }
 
   DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
-  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ private:
+  Token::Value op_;
 };
 
 
@@ -1003,22 +946,6 @@
 };
 
 
-class LDateField: public LTemplateInstruction<1, 1, 0> {
- public:
-  LDateField(LOperand* date, Smi* index) : index_(index) {
-    inputs_[0] = date;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field")
-  DECLARE_HYDROGEN_ACCESSOR(ValueOf)
-
-  Smi* index() const { return index_; }
-
- private:
-  Smi* index_;
-};
-
-
 class LThrow: public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LThrow(LOperand* value) {
@@ -1063,17 +990,6 @@
 };
 
 
-class LRandom: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LRandom(LOperand* global_object) {
-    inputs_[0] = global_object;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(Random, "random")
-  DECLARE_HYDROGEN_ACCESSOR(Random)
-};
-
-
 class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@@ -1290,8 +1206,6 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-
-  LOperand* value() { return inputs_[0]; }
 };
 
 
@@ -1309,7 +1223,7 @@
   LOperand* global_object() { return InputAt(0); }
   Handle<Object> name() const { return hydrogen()->name(); }
   LOperand* value() { return InputAt(1); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1343,6 +1257,7 @@
   LOperand* context() { return InputAt(0); }
   LOperand* value() { return InputAt(1); }
   int slot_index() { return hydrogen()->slot_index(); }
+  int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1359,9 +1274,7 @@
 
 
 class LThisFunction: public LTemplateInstruction<1, 0, 0> {
- public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
-  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
@@ -1383,13 +1296,6 @@
 };
 
 
-class LDeclareGlobals: public LTemplateInstruction<0, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
-  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
-};
-
-
 class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@@ -1466,17 +1372,14 @@
 };
 
 
-class LCallFunction: public LTemplateInstruction<1, 1, 0> {
+class LCallFunction: public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LCallFunction(LOperand* function) {
-    inputs_[0] = function;
-  }
+  LCallFunction() {}
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
   DECLARE_HYDROGEN_ACCESSOR(CallFunction)
 
-  LOperand* function() { return inputs_[0]; }
-  int arity() const { return hydrogen()->argument_count() - 1; }
+  int arity() const { return hydrogen()->argument_count() - 2; }
 };
 
 
@@ -1645,6 +1548,7 @@
   Handle<Object> name() const { return hydrogen()->name(); }
   bool is_in_object() { return hydrogen()->is_in_object(); }
   int offset() { return hydrogen()->offset(); }
+  bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
   Handle<Map> transition() const { return hydrogen()->transition(); }
 };
 
@@ -1664,7 +1568,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* value() { return inputs_[1]; }
   Handle<Object> name() const { return hydrogen()->name(); }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1749,31 +1653,7 @@
   LOperand* object() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
   LOperand* value() { return inputs_[2]; }
-  StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
-};
-
-
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
- public:
-  LTransitionElementsKind(LOperand* object,
-                          LOperand* new_map_temp,
-                          LOperand* temp_reg) {
-    inputs_[0] = object;
-    temps_[0] = new_map_temp;
-    temps_[1] = temp_reg;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
-                               "transition-elements-kind")
-  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
-
-  virtual void PrintDataTo(StringStream* stream);
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* new_map_reg() { return temps_[0]; }
-  LOperand* temp_reg() { return temps_[1]; }
-  Handle<Map> original_map() { return hydrogen()->original_map(); }
-  Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+  bool strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
@@ -1839,8 +1719,6 @@
     inputs_[0] = value;
   }
 
-  LOperand* value() { return InputAt(0); }
-
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -1943,24 +1821,6 @@
 };
 
 
-class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
- public:
-  explicit LAllocateObject(LOperand* temp) {
-    temps_[0] = temp;
-  }
-
-  DECLARE_CONCRETE_INSTRUCTION(AllocateObject, "allocate-object")
-  DECLARE_HYDROGEN_ACCESSOR(AllocateObject)
-};
-
-
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
-  DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
-  DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
 class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -2088,62 +1948,6 @@
 };
 
 
-class LForInPrepareMap: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInPrepareMap(LOperand* object) {
-    inputs_[0] = object;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
-};
-
-
-class LForInCacheArray: public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LForInCacheArray(LOperand* map) {
-    inputs_[0] = map;
-  }
-
-  LOperand* map() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
-
-  int idx() {
-    return HForInCacheArray::cast(this->hydrogen_value())->idx();
-  }
-};
-
-
-class LCheckMapValue: public LTemplateInstruction<0, 2, 0> {
- public:
-  LCheckMapValue(LOperand* value, LOperand* map) {
-    inputs_[0] = value;
-    inputs_[1] = map;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-  LOperand* map() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
-};
-
-
-class LLoadFieldByIndex: public LTemplateInstruction<1, 2, 0> {
- public:
-  LLoadFieldByIndex(LOperand* object, LOperand* index) {
-    inputs_[0] = object;
-    inputs_[1] = index;
-  }
-
-  LOperand* object() { return inputs_[0]; }
-  LOperand* index() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
-};
-
-
 class LChunkBuilder;
 class LChunk: public ZoneObject {
  public:
@@ -2217,7 +2021,6 @@
       : chunk_(NULL),
         info_(info),
         graph_(graph),
-        zone_(graph->isolate()->zone()),
         status_(UNUSED),
         current_instruction_(NULL),
         current_block_(NULL),
@@ -2247,7 +2050,6 @@
   LChunk* chunk() const { return chunk_; }
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
-  Zone* zone() const { return zone_; }
 
   bool is_unused() const { return status_ == UNUSED; }
   bool is_building() const { return status_ == BUILDING; }
@@ -2257,6 +2059,7 @@
   void Abort(const char* format, ...);
 
   // Methods for getting operands for Use / Define / Temp.
+  LRegister* ToOperand(Register reg);
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
 
@@ -2307,6 +2110,8 @@
       LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
                            LUnallocated* result);
   template<int I, int T>
+      LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
+  template<int I, int T>
       LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
   template<int I, int T>
       LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
@@ -2341,12 +2146,12 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
-                                  int* argument_index_accumulator);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
 
   void VisitInstruction(HInstruction* current);
 
   void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
   LInstruction* DoArithmeticD(Token::Value op,
                               HArithmeticBinaryOperation* instr);
@@ -2356,7 +2161,6 @@
   LChunk* chunk_;
   CompilationInfo* info_;
   HGraph* const graph_;
-  Zone* zone_;
   Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index f7db250..8fcad23 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -44,7 +44,6 @@
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       allow_stub_calls_(true),
-      has_frame_(false),
       root_array_available_(true) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -55,7 +54,7 @@
 
 static intptr_t RootRegisterDelta(ExternalReference other, Isolate* isolate) {
   Address roots_register_value = kRootRegisterBias +
-      reinterpret_cast<Address>(isolate->heap()->roots_array_start());
+      reinterpret_cast<Address>(isolate->heap()->roots_address());
   intptr_t delta = other.address() - roots_register_value;
   return delta;
 }
@@ -197,47 +196,28 @@
 }
 
 
-void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
-                                         Register addr,
-                                         Register scratch,
-                                         SaveFPRegsMode save_fp,
-                                         RememberedSetFinalAction and_then) {
-  if (FLAG_debug_code) {
-    Label ok;
-    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
-    int3();
-    bind(&ok);
+void MacroAssembler::RecordWriteHelper(Register object,
+                                       Register addr,
+                                       Register scratch) {
+  if (emit_debug_code()) {
+    // Check that the object is not in new space.
+    Label not_in_new_space;
+    InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
+    Abort("new-space object passed to RecordWriteHelper");
+    bind(&not_in_new_space);
   }
-  // Load store buffer top.
-  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
-  // Store pointer to buffer.
-  movq(Operand(scratch, 0), addr);
-  // Increment buffer top.
-  addq(scratch, Immediate(kPointerSize));
-  // Write back new top of buffer.
-  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
-  // Call stub on end of buffer.
-  Label done;
-  // Check for end of buffer.
-  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
-  if (and_then == kReturnAtEnd) {
-    Label buffer_overflowed;
-    j(not_equal, &buffer_overflowed, Label::kNear);
-    ret(0);
-    bind(&buffer_overflowed);
-  } else {
-    ASSERT(and_then == kFallThroughAtEnd);
-    j(equal, &done, Label::kNear);
-  }
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(save_fp);
-  CallStub(&store_buffer_overflow);
-  if (and_then == kReturnAtEnd) {
-    ret(0);
-  } else {
-    ASSERT(and_then == kFallThroughAtEnd);
-    bind(&done);
-  }
+
+  // Compute the page start address from the heap object pointer, and reuse
+  // the 'object' register for it.
+  and_(object, Immediate(~Page::kPageAlignmentMask));
+
+  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+  // method for more details.
+  shrl(addr, Immediate(Page::kRegionSizeLog2));
+  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
+
+  // Set dirty mark for region.
+  bts(Operand(object, Page::kDirtyFlagOffset), addr);
 }
 
 
@@ -245,7 +225,7 @@
                                 Register scratch,
                                 Condition cc,
                                 Label* branch,
-                                Label::Distance distance) {
+                                Label::Distance near_jump) {
   if (Serializer::enabled()) {
     // Can't do arithmetic on external references if it might get serialized.
     // The mask isn't really an address.  We load it as an external reference in
@@ -260,7 +240,7 @@
     }
     movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
     cmpq(scratch, kScratchRegister);
-    j(cc, branch, distance);
+    j(cc, branch, near_jump);
   } else {
     ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
     intptr_t new_space_start =
@@ -272,88 +252,35 @@
       lea(scratch, Operand(object, kScratchRegister, times_1, 0));
     }
     and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
-    j(cc, branch, distance);
+    j(cc, branch, near_jump);
   }
 }
 
 
-void MacroAssembler::RecordWriteField(
-    Register object,
-    int offset,
-    Register value,
-    Register dst,
-    SaveFPRegsMode save_fp,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(Register object,
+                                 int offset,
+                                 Register value,
+                                 Register index) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!value.is(rsi) && !dst.is(rsi));
+  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis.
+  // catch stores of smis and stores into the young generation.
   Label done;
+  JumpIfSmi(value, &done);
 
-  // Skip barrier if writing a smi.
-  if (smi_check == INLINE_SMI_CHECK) {
-    JumpIfSmi(value, &done);
-  }
-
-  // Although the object register is tagged, the offset is relative to the start
-  // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
-
-  lea(dst, FieldOperand(object, offset));
-  if (emit_debug_code()) {
-    Label ok;
-    testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
-    j(zero, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-  }
-
-  RecordWrite(
-      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
+  RecordWriteNonSmi(object, offset, value, index);
   bind(&done);
 
-  // Clobber clobbered input registers when running with the debug-code flag
-  // turned on to provoke errors.
+  // Clobber all input registers when running with the debug-code flag
+  // turned on to provoke errors. This clobbering repeats the
+  // clobbering done inside RecordWriteNonSmi but it's necessary to
+  // avoid having the fast case for smis leave the registers
+  // unchanged.
   if (emit_debug_code()) {
-    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-  }
-}
-
-
-void MacroAssembler::RecordWriteArray(Register object,
-                                      Register value,
-                                      Register index,
-                                      SaveFPRegsMode save_fp,
-                                      RememberedSetAction remembered_set_action,
-                                      SmiCheck smi_check) {
-  // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis.
-  Label done;
-
-  // Skip barrier if writing a smi.
-  if (smi_check == INLINE_SMI_CHECK) {
-    JumpIfSmi(value, &done);
-  }
-
-  // Array access: calculate the destination address. Index is not a smi.
-  Register dst = index;
-  lea(dst, Operand(object, index, times_pointer_size,
-                   FixedArray::kHeaderSize - kHeapObjectTag));
-
-  RecordWrite(
-      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
-
-  bind(&done);
-
-  // Clobber clobbered input registers when running with the debug-code flag
-  // turned on to provoke errors.
-  if (emit_debug_code()) {
+    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
@@ -362,72 +289,90 @@
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value,
-                                 SaveFPRegsMode fp_mode,
-                                 RememberedSetAction remembered_set_action,
-                                 SmiCheck smi_check) {
+                                 Register value) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!value.is(rsi) && !address.is(rsi));
-
-  ASSERT(!object.is(value));
-  ASSERT(!object.is(address));
-  ASSERT(!value.is(address));
-  if (emit_debug_code()) {
-    AbortIfSmi(object);
-  }
-
-  if (remembered_set_action == OMIT_REMEMBERED_SET &&
-      !FLAG_incremental_marking) {
-    return;
-  }
-
-  if (FLAG_debug_code) {
-    Label ok;
-    cmpq(value, Operand(address, 0));
-    j(equal, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-  }
+  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
+  JumpIfSmi(value, &done);
 
-  if (smi_check == INLINE_SMI_CHECK) {
-    // Skip barrier if writing a smi.
-    JumpIfSmi(value, &done);
-  }
+  InNewSpace(object, value, equal, &done);
 
-  CheckPageFlag(value,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersToHereAreInterestingMask,
-                zero,
-                &done,
-                Label::kNear);
-
-  CheckPageFlag(object,
-                value,  // Used as scratch.
-                MemoryChunk::kPointersFromHereAreInterestingMask,
-                zero,
-                &done,
-                Label::kNear);
-
-  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
-  CallStub(&stub);
+  RecordWriteHelper(object, address, value);
 
   bind(&done);
 
-  // Clobber clobbered registers when running with the debug-code flag
+  // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
+    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
 
+void MacroAssembler::RecordWriteNonSmi(Register object,
+                                       int offset,
+                                       Register scratch,
+                                       Register index) {
+  Label done;
+
+  if (emit_debug_code()) {
+    Label okay;
+    JumpIfNotSmi(object, &okay, Label::kNear);
+    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
+    bind(&okay);
+
+    if (offset == 0) {
+      // index must be int32.
+      Register tmp = index.is(rax) ? rbx : rax;
+      push(tmp);
+      movl(tmp, index);
+      cmpq(tmp, index);
+      Check(equal, "Index register for RecordWrite must be untagged int32.");
+      pop(tmp);
+    }
+  }
+
+  // Test that the object address is not in the new space. We cannot
+  // update page dirty marks for new space pages.
+  InNewSpace(object, scratch, equal, &done);
+
+  // The offset is relative to a tagged or untagged HeapObject pointer,
+  // so either offset or offset + kHeapObjectTag must be a
+  // multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize) ||
+         IsAligned(offset + kHeapObjectTag, kPointerSize));
+
+  Register dst = index;
+  if (offset != 0) {
+    lea(dst, Operand(object, offset));
+  } else {
+    // array access: calculate the destination address in the same manner as
+    // KeyedStoreIC::GenerateGeneric.
+    lea(dst, FieldOperand(object,
+                          index,
+                          times_pointer_size,
+                          FixedArray::kHeaderSize));
+  }
+  RecordWriteHelper(object, dst, scratch);
+
+  bind(&done);
+
+  // Clobber all input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+  }
+}
+
 void MacroAssembler::Assert(Condition cc, const char* msg) {
   if (emit_debug_code()) Check(cc, msg);
 }
@@ -455,7 +400,7 @@
   Label L;
   j(cc, &L, Label::kNear);
   Abort(msg);
-  // Control will not return here.
+  // will not return here
   bind(&L);
 }
 
@@ -495,7 +440,7 @@
   // from the real pointer as a smi.
   intptr_t p1 = reinterpret_cast<intptr_t>(msg);
   intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
-  // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
+  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
   ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
 #ifdef DEBUG
   if (msg != NULL) {
@@ -503,6 +448,9 @@
     RecordComment(msg);
   }
 #endif
+  // Disable stub call restrictions to always allow calls to abort.
+  AllowStubCallsScope allow_scope(this, true);
+
   push(rax);
   movq(kScratchRegister, p0, RelocInfo::NONE);
   push(kScratchRegister);
@@ -510,44 +458,52 @@
        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
        RelocInfo::NONE);
   push(kScratchRegister);
-
-  if (!has_frame_) {
-    // We don't actually want to generate a pile of code for this, so just
-    // claim there is a stack frame, without generating one.
-    FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 2);
-  } else {
-    CallRuntime(Runtime::kAbort, 2);
-  }
-  // Control will not return here.
+  CallRuntime(Runtime::kAbort, 2);
+  // will not return here
   int3();
 }
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
+  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
+MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  MaybeObject* result = stub->TryGetCode();
+  if (!result->IsFailure()) {
+    call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
+         RelocInfo::CODE_TARGET);
+  }
+  return result;
+}
+
+
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  MaybeObject* result = stub->TryGetCode();
+  if (!result->IsFailure()) {
+    jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
+        RelocInfo::CODE_TARGET);
+  }
+  return result;
+}
+
+
 void MacroAssembler::StubReturn(int argc) {
   ASSERT(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
 
-bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
-  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
-  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
-}
-
-
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -584,11 +540,18 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(rax, function->nargs);
   LoadAddress(rbx, ExternalReference(function, isolate()));
-  CEntryStub ces(1, kSaveFPRegs);
+  CEntryStub ces(1);
+  ces.SaveDoubles();
   CallStub(&ces);
 }
 
 
+MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
+                                            int num_arguments) {
+  return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
 void MacroAssembler::CallRuntime(const Runtime::Function* f,
                                  int num_arguments) {
   // If the expected number of arguments of the runtime function is
@@ -610,6 +573,26 @@
 }
 
 
+MaybeObject* MacroAssembler::TryCallRuntime(const Runtime::Function* f,
+                                            int num_arguments) {
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    // Since we did not call the stub, there was no allocation failure.
+    // Return some non-failure object.
+    return HEAP->undefined_value();
+  }
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  Set(rax, num_arguments);
+  LoadAddress(rbx, ExternalReference(f, isolate()));
+  CEntryStub ces(f->result_size);
+  return TryCallStub(&ces);
+}
+
+
 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
                                            int num_arguments) {
   Set(rax, num_arguments);
@@ -639,6 +622,24 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallExternalReference(
+    const ExternalReference& ext, int num_arguments, int result_size) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : argument num_arguments - 1
+  //  ...
+  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
+  // -----------------------------------
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  Set(rax, num_arguments);
+  return TryJumpToExternalReference(ext, result_size);
+}
+
+
 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
                                      int num_arguments,
                                      int result_size) {
@@ -648,6 +649,15 @@
 }
 
 
+MaybeObject* MacroAssembler::TryTailCallRuntime(Runtime::FunctionId fid,
+                                                int num_arguments,
+                                                int result_size) {
+  return TryTailCallExternalReference(ExternalReference(fid, isolate()),
+                                      num_arguments,
+                                      result_size);
+}
+
+
 static int Offset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   // Check that fits into int.
@@ -670,8 +680,8 @@
 }
 
 
-void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
-                                              int stack_space) {
+MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
+    ApiFunction* function, int stack_space) {
   Label empty_result;
   Label prologue;
   Label promote_scheduled_exception;
@@ -701,7 +711,8 @@
   movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
   addl(Operand(base_reg, kLevelOffset), Immediate(1));
   // Call the api function!
-  movq(rax, reinterpret_cast<int64_t>(function_address),
+  movq(rax,
+       reinterpret_cast<int64_t>(function->address()),
        RelocInfo::RUNTIME_ENTRY);
   call(rax);
 
@@ -733,7 +744,11 @@
   ret(stack_space * kPointerSize);
 
   bind(&promote_scheduled_exception);
-  TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
+  MaybeObject* result = TryTailCallRuntime(Runtime::kPromoteScheduledException,
+                                           0, 1);
+  if (result->IsFailure()) {
+    return result;
+  }
 
   bind(&empty_result);
   // It was zero; the result is undefined.
@@ -754,6 +769,8 @@
   call(rax);
   movq(rax, prev_limit_reg);
   jmp(&leave_exit_frame);
+
+  return result;
 }
 
 
@@ -766,11 +783,20 @@
 }
 
 
+MaybeObject* MacroAssembler::TryJumpToExternalReference(
+    const ExternalReference& ext, int result_size) {
+  // Set the entry point and jump to the C entry runtime stub.
+  LoadAddress(rbx, ext);
+  CEntryStub ces(result_size);
+  return TryTailCallStub(&ces);
+}
+
+
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  // Calls are not allowed in some stubs.
+  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -799,64 +825,6 @@
 }
 
 
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const Register saved_regs[] = {
-  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
-  REG(r9), REG(r10), REG(r11)
-};
-
-#undef REG
-
-static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
-
-
-void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
-                                     Register exclusion1,
-                                     Register exclusion2,
-                                     Register exclusion3) {
-  // We don't allow a GC during a store buffer overflow so there is no need to
-  // store the registers in any particular way, but we do have to store and
-  // restore them.
-  for (int i = 0; i < kNumberOfSavedRegs; i++) {
-    Register reg = saved_regs[i];
-    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
-      push(reg);
-    }
-  }
-  // R12 to r15 are callee save on all platforms.
-  if (fp_mode == kSaveFPRegs) {
-    CpuFeatures::Scope scope(SSE2);
-    subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
-    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
-      XMMRegister reg = XMMRegister::from_code(i);
-      movsd(Operand(rsp, i * kDoubleSize), reg);
-    }
-  }
-}
-
-
-void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
-                                    Register exclusion1,
-                                    Register exclusion2,
-                                    Register exclusion3) {
-  if (fp_mode == kSaveFPRegs) {
-    CpuFeatures::Scope scope(SSE2);
-    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
-      XMMRegister reg = XMMRegister::from_code(i);
-      movsd(reg, Operand(rsp, i * kDoubleSize));
-    }
-    addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
-  }
-  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
-    Register reg = saved_regs[i];
-    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
-      pop(reg);
-    }
-  }
-}
-
-
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
@@ -2121,7 +2089,7 @@
   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ASCII strings.
+  // Check that both are flat ascii strings.
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2167,7 +2135,7 @@
   movq(scratch1, first_object_instance_type);
   movq(scratch2, second_object_instance_type);
 
-  // Check that both are flat ASCII strings.
+  // Check that both are flat ascii strings.
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
@@ -2245,43 +2213,6 @@
 }
 
 
-void MacroAssembler::LoadHeapObject(Register result,
-                                    Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
-    movq(result, Operand(result, 0));
-  } else {
-    Move(result, object);
-  }
-}
-
-
-void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
-  if (isolate()->heap()->InNewSpace(*object)) {
-    Handle<JSGlobalPropertyCell> cell =
-        isolate()->factory()->NewJSGlobalPropertyCell(object);
-    movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
-    movq(kScratchRegister, Operand(kScratchRegister, 0));
-    push(kScratchRegister);
-  } else {
-    Push(object);
-  }
-}
-
-
-void MacroAssembler::LoadGlobalCell(Register dst,
-                                    Handle<JSGlobalPropertyCell> cell) {
-  if (dst.is(rax)) {
-    load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
-  } else {
-    movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
-    movq(dst, Operand(dst, 0));
-  }
-}
-
-
 void MacroAssembler::Push(Smi* source) {
   intptr_t smi = reinterpret_cast<intptr_t>(source);
   if (is_int32(smi)) {
@@ -2305,13 +2236,6 @@
 }
 
 
-void MacroAssembler::TestBit(const Operand& src, int bits) {
-  int byte_offset = bits / kBitsPerByte;
-  int bit_in_byte = bits & (kBitsPerByte - 1);
-  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
-}
-
-
 void MacroAssembler::Jump(ExternalReference ext) {
   LoadAddress(kScratchRegister, ext);
   jmp(kScratchRegister);
@@ -2425,8 +2349,7 @@
 
 // Order general registers are pushed by Pushad:
 // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-const int
-MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
     0,
     1,
     2,
@@ -2461,146 +2384,146 @@
 }
 
 
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
-                                    int handler_index) {
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+                                    HandlerType type) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
 
-  // We will build up the handler from the bottom by pushing on the stack.
-  // First push the frame pointer and context.
-  if (kind == StackHandler::JS_ENTRY) {
-    // The frame pointer does not point to a JS frame so we save NULL for
-    // rbp. We expect the code throwing an exception to check rbp before
-    // dereferencing it to restore the context.
-    push(Immediate(0));  // NULL frame pointer.
-    Push(Smi::FromInt(0));  // No context.
-  } else {
+  // The pc (return address) is already on TOS.  This code pushes state,
+  // frame pointer, context, and current handler.
+  if (try_location == IN_JAVASCRIPT) {
+    if (type == TRY_CATCH_HANDLER) {
+      push(Immediate(StackHandler::TRY_CATCH));
+    } else {
+      push(Immediate(StackHandler::TRY_FINALLY));
+    }
     push(rbp);
     push(rsi);
+  } else {
+    ASSERT(try_location == IN_JS_ENTRY);
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for rbp. We expect the code throwing an exception to check rbp
+    // before dereferencing it to restore the context.
+    push(Immediate(StackHandler::ENTRY));
+    push(Immediate(0));  // NULL frame pointer.
+    Push(Smi::FromInt(0));  // No context.
   }
-
-  // Push the state and the code object.
-  unsigned state =
-      StackHandler::IndexField::encode(handler_index) |
-      StackHandler::KindField::encode(kind);
-  push(Immediate(state));
-  Push(CodeObject());
-
-  // Link the current handler as the next handler.
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  push(ExternalOperand(handler_address));
-  // Set this new handler as the current one.
-  movq(ExternalOperand(handler_address), rsp);
+  // Save the current handler.
+  Operand handler_operand =
+      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
+  push(handler_operand);
+  // Link this handler.
+  movq(handler_operand, rsp);
 }
 
 
 void MacroAssembler::PopTryHandler() {
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  pop(ExternalOperand(handler_address));
+  ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+  // Unlink this handler.
+  Operand handler_operand =
+      ExternalOperand(ExternalReference(Isolate::kHandlerAddress, isolate()));
+  pop(handler_operand);
+  // Remove the remaining fields.
   addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
 }
 
 
-void MacroAssembler::JumpToHandlerEntry() {
-  // Compute the handler entry address and jump to it.  The handler table is
-  // a fixed array of (smi-tagged) code offsets.
-  // rax = exception, rdi = code object, rdx = state.
-  movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
-  shr(rdx, Immediate(StackHandler::kKindWidth));
-  movq(rdx, FieldOperand(rbx, rdx, times_8, FixedArray::kHeaderSize));
-  SmiToInteger64(rdx, rdx);
-  lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
-  jmp(rdi);
-}
-
-
 void MacroAssembler::Throw(Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in rax.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  // Keep thrown value in rax.
   if (!value.is(rax)) {
     movq(rax, value);
   }
-  // Drop the stack pointer to the top of the top handler.
+
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
-  movq(rsp, ExternalOperand(handler_address));
-  // Restore the next handler.
-  pop(ExternalOperand(handler_address));
-
-  // Remove the code object and state, compute the handler address in rdi.
-  pop(rdi);  // Code object.
-  pop(rdx);  // Offset and state.
-
-  // Restore the context and frame pointer.
+  Operand handler_operand = ExternalOperand(handler_address);
+  movq(rsp, handler_operand);
+  // get next in chain
+  pop(handler_operand);
   pop(rsi);  // Context.
   pop(rbp);  // Frame pointer.
+  pop(rdx);  // State.
 
   // If the handler is a JS frame, restore the context to the frame.
-  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
-  // rbp or rsi.
+  // (rdx == ENTRY) == (rbp == 0) == (rsi == 0), so we could test any
+  // of them.
   Label skip;
-  testq(rsi, rsi);
-  j(zero, &skip, Label::kNear);
+  cmpq(rdx, Immediate(StackHandler::ENTRY));
+  j(equal, &skip, Label::kNear);
   movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
   bind(&skip);
 
-  JumpToHandlerEntry();
+  ret(0);
 }
 
 
-void MacroAssembler::ThrowUncatchable(Register value) {
+void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
+                                      Register value) {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
-  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
-  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
-  // The exception is expected in rax.
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 4 * kPointerSize);
+  // Keep thrown value in rax.
   if (!value.is(rax)) {
     movq(rax, value);
   }
-  // Drop the stack pointer to the top of the top stack handler.
+  // Fetch top stack handler.
   ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
   Load(rsp, handler_address);
 
-  // Unwind the handlers until the top ENTRY handler is found.
-  Label fetch_next, check_kind;
-  jmp(&check_kind, Label::kNear);
-  bind(&fetch_next);
-  movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  bind(&loop);
+  // Load the type of the current stack handler.
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
+  j(equal, &done, Label::kNear);
+  // Fetch the next handler in the list.
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  movq(rsp, Operand(rsp, kNextOffset));
+  jmp(&loop);
+  bind(&done);
 
-  bind(&check_kind);
-  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
-  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
-        Immediate(StackHandler::KindField::kMask));
-  j(not_zero, &fetch_next);
+  // Set the top handler address to next handler past the current ENTRY handler.
+  Operand handler_operand = ExternalOperand(handler_address);
+  pop(handler_operand);
 
-  // Set the top handler address to next handler past the top ENTRY handler.
-  pop(ExternalOperand(handler_address));
+  if (type == OUT_OF_MEMORY) {
+    // Set external caught exception to false.
+    ExternalReference external_caught(
+        Isolate::kExternalCaughtExceptionAddress, isolate());
+    Set(rax, static_cast<int64_t>(false));
+    Store(external_caught, rax);
 
-  // Remove the code object and state, compute the handler address in rdi.
-  pop(rdi);  // Code object.
-  pop(rdx);  // Offset and state.
+    // Set pending exception and rax to out of memory exception.
+    ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
+                                        isolate());
+    movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+    Store(pending_exception, rax);
+  }
 
-  // Clear the context pointer and frame pointer (0 was saved in the handler).
-  pop(rsi);
-  pop(rbp);
+  // Discard the context saved in the handler and clear the context pointer.
+  pop(rdx);
+  Set(rsi, 0);
 
-  JumpToHandlerEntry();
+  pop(rbp);  // Restore frame pointer.
+  pop(rdx);  // Discard state.
+
+  ret(0);
 }
 
 
@@ -2644,133 +2567,22 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 0);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Immediate(Map::kMaximumBitField2FastElementValue));
   j(above, fail, distance);
 }
 
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Label* fail,
-                                             Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_ELEMENTS == 1);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
-  j(below_equal, fail, distance);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiOnlyElements(Register map,
-                                              Label* fail,
-                                              Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
-    Register maybe_number,
-    Register elements,
-    Register index,
-    XMMRegister xmm_scratch,
-    Label* fail) {
-  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
-
-  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
-  CheckMap(maybe_number,
-           isolate()->factory()->heap_number_map(),
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  cmpl(FieldOperand(maybe_number, offset),
-       Immediate(kNaNOrInfinityLowerBoundUpper32));
-  j(greater_equal, &maybe_nan, Label::kNear);
-
-  bind(&not_nan);
-  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
-  bind(&have_double_value);
-  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
-        xmm_scratch);
-  jmp(&done);
-
-  bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  j(greater, &is_nan, Label::kNear);
-  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
-  j(zero, &not_nan);
-  bind(&is_nan);
-  // Convert all NaNs to the same canonical NaN value when they are stored in
-  // the double array.
-  Set(kScratchRegister, BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
-  movq(xmm_scratch, kScratchRegister);
-  jmp(&have_double_value, Label::kNear);
-
-  bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  SmiToInteger32(kScratchRegister, maybe_number);
-  cvtlsi2sd(xmm_scratch, kScratchRegister);
-  movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
-        xmm_scratch);
-  bind(&done);
-}
-
-
-void MacroAssembler::CompareMap(Register obj,
-                                Handle<Map> map,
-                                Label* early_success,
-                                CompareMapMode mode) {
-  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
-  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
-    Map* transitioned_fast_element_map(
-        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
-    ASSERT(transitioned_fast_element_map == NULL ||
-           map->elements_kind() != FAST_ELEMENTS);
-    if (transitioned_fast_element_map != NULL) {
-      j(equal, early_success, Label::kNear);
-      Cmp(FieldOperand(obj, HeapObject::kMapOffset),
-          Handle<Map>(transitioned_fast_element_map));
-    }
-
-    Map* transitioned_double_map(
-        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
-    ASSERT(transitioned_double_map == NULL ||
-           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
-    if (transitioned_double_map != NULL) {
-      j(equal, early_success, Label::kNear);
-      Cmp(FieldOperand(obj, HeapObject::kMapOffset),
-          Handle<Map>(transitioned_double_map));
-    }
-  }
-}
-
-
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              SmiCheckType smi_check_type,
-                              CompareMapMode mode) {
+                              SmiCheckType smi_check_type) {
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
-
-  Label success;
-  CompareMap(obj, map, &success, mode);
+  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
   j(not_equal, fail);
-  bind(&success);
 }
 
 
@@ -2860,14 +2672,6 @@
 }
 
 
-void MacroAssembler::AbortIfNotZeroExtended(Register int32_register) {
-  ASSERT(!int32_register.is(kScratchRegister));
-  movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
-  cmpq(kScratchRegister, int32_register);
-  Assert(above_equal, "32 bit value in register is not zero-extended");
-}
-
-
 void MacroAssembler::AbortIfNotString(Register object) {
   testb(object, Immediate(kSmiTagMask));
   Assert(not_equal, "Operand is not a string");
@@ -2903,8 +2707,7 @@
 
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
-                                             Label* miss,
-                                             bool miss_on_bound_function) {
+                                             Label* miss) {
   // Check that the receiver isn't a smi.
   testl(function, Immediate(kSmiTagMask));
   j(zero, miss);
@@ -2913,17 +2716,6 @@
   CmpObjectType(function, JS_FUNCTION_TYPE, result);
   j(not_equal, miss);
 
-  if (miss_on_bound_function) {
-    movq(kScratchRegister,
-         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
-    // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
-    // field).
-    TestBit(FieldOperand(kScratchRegister,
-                         SharedFunctionInfo::kCompilerHintsOffset),
-            SharedFunctionInfo::kBoundFunction);
-    j(not_zero, miss);
-  }
-
   // Make sure that the function has an instance prototype.
   Label non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
@@ -2995,10 +2787,10 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
+  ASSERT(allow_stub_calls());
   Set(rax, 0);  // No arguments.
   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(1);
-  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif  // ENABLE_DEBUGGER_SUPPORT
@@ -3024,34 +2816,27 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
-  bool definitely_mismatches = false;
   InvokePrologue(expected,
                  actual,
                  Handle<Code>::null(),
                  code,
                  &done,
-                 &definitely_mismatches,
                  flag,
                  Label::kNear,
                  call_wrapper,
                  call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      call_wrapper.BeforeCall(CallSize(code));
-      SetCallKind(rcx, call_kind);
-      call(code);
-      call_wrapper.AfterCall();
-    } else {
-      ASSERT(flag == JUMP_FUNCTION);
-      SetCallKind(rcx, call_kind);
-      jmp(code);
-    }
-    bind(&done);
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
+    call(code);
+    call_wrapper.AfterCall();
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
+    jmp(code);
   }
+  bind(&done);
 }
 
 
@@ -3062,35 +2847,28 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   Label done;
-  bool definitely_mismatches = false;
   Register dummy = rax;
   InvokePrologue(expected,
                  actual,
                  code,
                  dummy,
                  &done,
-                 &definitely_mismatches,
                  flag,
                  Label::kNear,
                  call_wrapper,
                  call_kind);
-  if (!definitely_mismatches) {
-    if (flag == CALL_FUNCTION) {
-      call_wrapper.BeforeCall(CallSize(code));
-      SetCallKind(rcx, call_kind);
-      Call(code, rmode);
-      call_wrapper.AfterCall();
-    } else {
-      ASSERT(flag == JUMP_FUNCTION);
-      SetCallKind(rcx, call_kind);
-      Jump(code, rmode);
-    }
-    bind(&done);
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
+    Call(code, rmode);
+    call_wrapper.AfterCall();
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
+    Jump(code, rmode);
   }
+  bind(&done);
 }
 
 
@@ -3099,9 +2877,6 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
   ASSERT(function.is(rdi));
   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -3116,24 +2891,34 @@
 }
 
 
-void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
-  // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
-
+  ASSERT(function->is_compiled());
   // Get the function and setup the context.
-  LoadHeapObject(rdi, function);
+  Move(rdi, Handle<JSFunction>(function));
   movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-  ParameterCount expected(function->shared()->formal_parameter_count());
-  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+  if (V8::UseCrankshaft()) {
+    // Since Crankshaft can recompile a function, we need to load
+    // the Code object every time we call the function.
+    movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
+  } else {
+    // Invoke the cached code.
+    Handle<Code> code(function->code());
+    ParameterCount expected(function->shared()->formal_parameter_count());
+    InvokeCode(code,
+               expected,
+               actual,
+               RelocInfo::CODE_TARGET,
+               flag,
+               call_wrapper,
+               call_kind);
+  }
 }
 
 
@@ -3142,13 +2927,11 @@
                                     Handle<Code> code_constant,
                                     Register code_register,
                                     Label* done,
-                                    bool* definitely_mismatches,
                                     InvokeFlag flag,
                                     Label::Distance near_jump,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
   bool definitely_matches = false;
-  *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
     ASSERT(actual.is_immediate());
@@ -3164,7 +2947,6 @@
         // arguments.
         definitely_matches = true;
       } else {
-        *definitely_mismatches = true;
         Set(rbx, expected.immediate());
       }
     }
@@ -3201,9 +2983,7 @@
       SetCallKind(rcx, call_kind);
       Call(adaptor, RelocInfo::CODE_TARGET);
       call_wrapper.AfterCall();
-      if (!*definitely_mismatches) {
-        jmp(done, near_jump);
-      }
+      jmp(done, near_jump);
     } else {
       SetCallKind(rcx, call_kind);
       Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -3242,7 +3022,7 @@
 
 
 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
-  // Set up the frame structure on the stack.
+  // Setup the frame structure on the stack.
   // All constants are relative to the frame pointer of the exit frame.
   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -3302,7 +3082,7 @@
 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
   EnterExitFramePrologue(true);
 
-  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
+  // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
   // so it must be retained across the C-call.
   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
   lea(r15, Operand(rbp, r14, times_pointer_size, offset));
@@ -3836,7 +3616,7 @@
     subq(scratch1, Immediate(kHeaderAlignment));
   }
 
-  // Allocate ASCII string in new space.
+  // Allocate ascii string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
                      times_1,
                      scratch1,
@@ -3992,20 +3772,6 @@
 }
 
 
-void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
-                                                Register end_offset,
-                                                Register filler) {
-  Label loop, entry;
-  jmp(&entry);
-  bind(&loop);
-  movq(Operand(start_offset, 0), filler);
-  addq(start_offset, Immediate(kPointerSize));
-  bind(&entry);
-  cmpq(start_offset, end_offset);
-  j(less, &loop);
-}
-
-
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
@@ -4031,46 +3797,6 @@
   }
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  // Load the global or builtins object from the current context.
-  movq(scratch, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
-  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
-
-  // Check that the function's map is the same as the expected cached map.
-  int expected_index =
-      Context::GetContextMapIndexFromElementsKind(expected_kind);
-  cmpq(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
-  j(not_equal, no_map_match);
-
-  // Use the transitioned cached map.
-  int trans_index =
-      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
-  movq(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
-}
-
-
-void MacroAssembler::LoadInitialArrayMap(
-    Register function_in, Register scratch, Register map_out) {
-  ASSERT(!function_in.is(map_out));
-  Label done;
-  movq(map_out, FieldOperand(function_in,
-                             JSFunction::kPrototypeOrInitialMapOffset));
-  if (!FLAG_smi_only_arrays) {
-    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                        FAST_ELEMENTS,
-                                        map_out,
-                                        scratch,
-                                        &done);
-  }
-  bind(&done);
-}
-
 #ifdef _WIN64
 static const int kRegisterPassedArguments = 4;
 #else
@@ -4145,7 +3871,6 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
-  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -4160,17 +3885,6 @@
 }
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
-}
-
-
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -4191,241 +3905,6 @@
   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
-
-void MacroAssembler::CheckPageFlag(
-    Register object,
-    Register scratch,
-    int mask,
-    Condition cc,
-    Label* condition_met,
-    Label::Distance condition_met_distance) {
-  ASSERT(cc == zero || cc == not_zero);
-  if (scratch.is(object)) {
-    and_(scratch, Immediate(~Page::kPageAlignmentMask));
-  } else {
-    movq(scratch, Immediate(~Page::kPageAlignmentMask));
-    and_(scratch, object);
-  }
-  if (mask < (1 << kBitsPerByte)) {
-    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
-          Immediate(static_cast<uint8_t>(mask)));
-  } else {
-    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
-  }
-  j(cc, condition_met, condition_met_distance);
-}
-
-
-void MacroAssembler::JumpIfBlack(Register object,
-                                 Register bitmap_scratch,
-                                 Register mask_scratch,
-                                 Label* on_black,
-                                 Label::Distance on_black_distance) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
-  GetMarkBits(object, bitmap_scratch, mask_scratch);
-
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  // The mask_scratch register contains a 1 at the position of the first bit
-  // and a 0 at all other positions, including the position of the second bit.
-  movq(rcx, mask_scratch);
-  // Make rcx into a mask that covers both marking bits using the operation
-  // rcx = mask | (mask << 1).
-  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
-  // Note that we are using a 4-byte aligned 8-byte load.
-  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
-  cmpq(mask_scratch, rcx);
-  j(equal, on_black, on_black_distance);
-}
-
-
-// Detect some, but not all, common pointer-free objects.  This is used by the
-// incremental write barrier which doesn't care about oddballs (they are always
-// marked black immediately so this code is not hit).
-void MacroAssembler::JumpIfDataObject(
-    Register value,
-    Register scratch,
-    Label* not_data_object,
-    Label::Distance not_data_object_distance) {
-  Label is_data_object;
-  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
-  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  j(equal, &is_data_object, Label::kNear);
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
-        Immediate(kIsIndirectStringMask | kIsNotStringMask));
-  j(not_zero, not_data_object, not_data_object_distance);
-  bind(&is_data_object);
-}
-
-
-void MacroAssembler::GetMarkBits(Register addr_reg,
-                                 Register bitmap_reg,
-                                 Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
-  movq(bitmap_reg, addr_reg);
-  // Sign extended 32 bit immediate.
-  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
-  movq(rcx, addr_reg);
-  int shift =
-      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
-  shrl(rcx, Immediate(shift));
-  and_(rcx,
-       Immediate((Page::kPageAlignmentMask >> shift) &
-                 ~(Bitmap::kBytesPerCell - 1)));
-
-  addq(bitmap_reg, rcx);
-  movq(rcx, addr_reg);
-  shrl(rcx, Immediate(kPointerSizeLog2));
-  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
-  movl(mask_reg, Immediate(1));
-  shl_cl(mask_reg);
-}
-
-
-void MacroAssembler::EnsureNotWhite(
-    Register value,
-    Register bitmap_scratch,
-    Register mask_scratch,
-    Label* value_is_white_and_not_data,
-    Label::Distance distance) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
-  GetMarkBits(value, bitmap_scratch, mask_scratch);
-
-  // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  Label done;
-
-  // Since both black and grey have a 1 in the first position and white does
-  // not have a 1 there we only need to check one bit.
-  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-  j(not_zero, &done, Label::kNear);
-
-  if (FLAG_debug_code) {
-    // Check for impossible bit pattern.
-    Label ok;
-    push(mask_scratch);
-    // shl.  May overflow making the check conservative.
-    addq(mask_scratch, mask_scratch);
-    testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-    j(zero, &ok, Label::kNear);
-    int3();
-    bind(&ok);
-    pop(mask_scratch);
-  }
-
-  // Value is white.  We check whether it is data that doesn't need scanning.
-  // Currently only checks for HeapNumber and non-cons strings.
-  Register map = rcx;  // Holds map while checking type.
-  Register length = rcx;  // Holds length of object after checking type.
-  Label not_heap_number;
-  Label is_data_object;
-
-  // Check for heap-number
-  movq(map, FieldOperand(value, HeapObject::kMapOffset));
-  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
-  j(not_equal, &not_heap_number, Label::kNear);
-  movq(length, Immediate(HeapNumber::kSize));
-  jmp(&is_data_object, Label::kNear);
-
-  bind(&not_heap_number);
-  // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
-  // If it's a string and it's not a cons string then it's an object containing
-  // no GC pointers.
-  Register instance_type = rcx;
-  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
-  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
-  j(not_zero, value_is_white_and_not_data);
-  // It's a non-indirect (non-cons and non-slice) string.
-  // If it's external, the length is just ExternalString::kSize.
-  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
-  Label not_external;
-  // External strings are the only ones with the kExternalStringTag bit
-  // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
-  testb(instance_type, Immediate(kExternalStringTag));
-  j(zero, &not_external, Label::kNear);
-  movq(length, Immediate(ExternalString::kSize));
-  jmp(&is_data_object, Label::kNear);
-
-  bind(&not_external);
-  // Sequential string, either ASCII or UC16.
-  ASSERT(kAsciiStringTag == 0x04);
-  and_(length, Immediate(kStringEncodingMask));
-  xor_(length, Immediate(kStringEncodingMask));
-  addq(length, Immediate(0x04));
-  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
-  imul(length, FieldOperand(value, String::kLengthOffset));
-  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
-  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
-  and_(length, Immediate(~kObjectAlignmentMask));
-
-  bind(&is_data_object);
-  // Value is a data object, and it is white.  Mark it black.  Since we know
-  // that the object is white we can make it black by flipping one bit.
-  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
-
-  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
-  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
-
-  bind(&done);
-}
-
-
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
-  Label next;
-  Register empty_fixed_array_value = r8;
-  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
-  Register empty_descriptor_array_value = r9;
-  LoadRoot(empty_descriptor_array_value,
-              Heap::kEmptyDescriptorArrayRootIndex);
-  movq(rcx, rax);
-  bind(&next);
-
-  // Check that there are no elements.  Register rcx contains the
-  // current JS object we've reached through the prototype chain.
-  cmpq(empty_fixed_array_value,
-       FieldOperand(rcx, JSObject::kElementsOffset));
-  j(not_equal, call_runtime);
-
-  // Check that instance descriptors are not empty so that we can
-  // check for an enum cache.  Leave the map in rbx for the subsequent
-  // prototype load.
-  movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
-  movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOrBitField3Offset));
-  JumpIfSmi(rdx, call_runtime);
-
-  // Check that there is an enum cache in the non-empty instance
-  // descriptors (rdx).  This is the case if the next enumeration
-  // index field does not contain a smi.
-  movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
-  JumpIfSmi(rdx, call_runtime);
-
-  // For all objects but the receiver, check that the cache is empty.
-  Label check_prototype;
-  cmpq(rcx, rax);
-  j(equal, &check_prototype, Label::kNear);
-  movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
-  cmpq(rdx, empty_fixed_array_value);
-  j(not_equal, call_runtime);
-
-  // Load the prototype from the map and loop if non-null.
-  bind(&check_prototype);
-  movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
-  cmpq(rcx, null_value);
-  j(not_equal, &next);
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 6bb5cfe..ff6edc5 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,7 +29,6 @@
 #define V8_X64_MACRO_ASSEMBLER_X64_H_
 
 #include "assembler.h"
-#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -50,23 +49,18 @@
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
-const Register kScratchRegister = { 10 };      // r10.
-const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
-const Register kRootRegister = { 13 };         // r13 (callee save).
+static const Register kScratchRegister = { 10 };      // r10.
+static const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
+static const Register kRootRegister = { 13 };         // r13 (callee save).
 // Value of smi in kSmiConstantRegister.
-const int kSmiConstantRegisterValue = 1;
+static const int kSmiConstantRegisterValue = 1;
 // Actual value of root register is offset from the root array's start
 // to take advantage of negitive 8-bit displacement values.
-const int kRootRegisterBias = 128;
+static const int kRootRegisterBias = 128;
 
 // Convenience for platform-independent signatures.
 typedef Operand MemOperand;
 
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
-
 // Forward declaration.
 class JumpTarget;
 
@@ -78,7 +72,6 @@
   ScaleFactor scale;
 };
 
-
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -141,145 +134,56 @@
   void CompareRoot(const Operand& with, Heap::RootListIndex index);
   void PushRoot(Heap::RootListIndex index);
 
-  // These functions do not arrange the registers in any particular order so
-  // they are not useful for calls that can cause a GC.  The caller can
-  // exclude up to 3 registers that do not need to be saved and restored.
-  void PushCallerSaved(SaveFPRegsMode fp_mode,
-                       Register exclusion1 = no_reg,
-                       Register exclusion2 = no_reg,
-                       Register exclusion3 = no_reg);
-  void PopCallerSaved(SaveFPRegsMode fp_mode,
-                      Register exclusion1 = no_reg,
-                      Register exclusion2 = no_reg,
-                      Register exclusion3 = no_reg);
+  // ---------------------------------------------------------------------------
+  // GC Support
 
-// ---------------------------------------------------------------------------
-// GC Support
+  // For page containing |object| mark region covering |addr| dirty.
+  // RecordWriteHelper only works if the object is not in new
+  // space.
+  void RecordWriteHelper(Register object,
+                         Register addr,
+                         Register scratch);
 
+  // Check if object is in new space. The condition cc can be equal or
+  // not_equal. If it is equal a jump will be done if the object is on new
+  // space. The register scratch can be object itself, but it will be clobbered.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* branch,
+                  Label::Distance near_jump = Label::kFar);
 
-  enum RememberedSetFinalAction {
-    kReturnAtEnd,
-    kFallThroughAtEnd
-  };
-
-  // Record in the remembered set the fact that we have a pointer to new space
-  // at the address pointed to by the addr register.  Only works if addr is not
-  // in new space.
-  void RememberedSetHelper(Register object,  // Used for debug code.
-                           Register addr,
-                           Register scratch,
-                           SaveFPRegsMode save_fp,
-                           RememberedSetFinalAction and_then);
-
-  void CheckPageFlag(Register object,
-                     Register scratch,
-                     int mask,
-                     Condition cc,
-                     Label* condition_met,
-                     Label::Distance condition_met_distance = Label::kFar);
-
-  // Check if object is in new space.  Jumps if the object is not in new space.
-  // The register scratch can be object itself, but scratch will be clobbered.
-  void JumpIfNotInNewSpace(Register object,
-                           Register scratch,
-                           Label* branch,
-                           Label::Distance distance = Label::kFar) {
-    InNewSpace(object, scratch, not_equal, branch, distance);
-  }
-
-  // Check if object is in new space.  Jumps if the object is in new space.
-  // The register scratch can be object itself, but it will be clobbered.
-  void JumpIfInNewSpace(Register object,
-                        Register scratch,
-                        Label* branch,
-                        Label::Distance distance = Label::kFar) {
-    InNewSpace(object, scratch, equal, branch, distance);
-  }
-
-  // Check if an object has the black incremental marking color.  Also uses rcx!
-  void JumpIfBlack(Register object,
-                   Register scratch0,
-                   Register scratch1,
-                   Label* on_black,
-                   Label::Distance on_black_distance = Label::kFar);
-
-  // Detects conservatively whether an object is data-only, i.e. it does need to
-  // be scanned by the garbage collector.
-  void JumpIfDataObject(Register value,
-                        Register scratch,
-                        Label* not_data_object,
-                        Label::Distance not_data_object_distance);
-
-  // Checks the color of an object.  If the object is already grey or black
-  // then we just fall through, since it is already live.  If it is white and
-  // we can determine that it doesn't need to be scanned, then we just mark it
-  // black and fall through.  For the rest we jump to the label so the
-  // incremental marker can fix its assumptions.
-  void EnsureNotWhite(Register object,
-                      Register scratch1,
-                      Register scratch2,
-                      Label* object_is_white_and_not_data,
-                      Label::Distance distance);
-
-  // Notify the garbage collector that we wrote a pointer into an object.
-  // |object| is the object being stored into, |value| is the object being
-  // stored.  value and scratch registers are clobbered by the operation.
-  // The offset is the offset from the start of the object, not the offset from
-  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
-  void RecordWriteField(
-      Register object,
-      int offset,
-      Register value,
-      Register scratch,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
-
-  // As above, but the offset has the tag presubtracted.  For use with
-  // Operand(reg, off).
-  void RecordWriteContextSlot(
-      Register context,
-      int offset,
-      Register value,
-      Register scratch,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK) {
-    RecordWriteField(context,
-                     offset + kHeapObjectTag,
-                     value,
-                     scratch,
-                     save_fp,
-                     remembered_set_action,
-                     smi_check);
-  }
-
-  // Notify the garbage collector that we wrote a pointer into a fixed array.
-  // |array| is the array being stored into, |value| is the
-  // object being stored.  |index| is the array index represented as a non-smi.
-  // All registers are clobbered by the operation RecordWriteArray
-  // filters out smis so it does not update the write barrier if the
-  // value is a smi.
-  void RecordWriteArray(
-      Register array,
-      Register value,
-      Register index,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
-
-  // For page containing |object| mark region covering |address|
+  // For page containing |object| mark region covering [object+offset]
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. The address and value registers are clobbered by the
+  // object being stored. If |offset| is zero, then the |scratch|
+  // register contains the array index into the elements array
+  // represented as an untagged 32-bit integer. All registers are
+  // clobbered by the operation. RecordWrite filters out smis so it
+  // does not update the write barrier if the value is a smi.
+  void RecordWrite(Register object,
+                   int offset,
+                   Register value,
+                   Register scratch);
+
+  // For page containing |object| mark region covering [address]
+  // dirty. |object| is the object being stored into, |value| is the
+  // object being stored. All registers are clobbered by the
   // operation.  RecordWrite filters out smis so it does not update
   // the write barrier if the value is a smi.
-  void RecordWrite(
-      Register object,
-      Register address,
-      Register value,
-      SaveFPRegsMode save_fp,
-      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
-      SmiCheck smi_check = INLINE_SMI_CHECK);
+  void RecordWrite(Register object,
+                   Register address,
+                   Register value);
+
+  // For page containing |object| mark region covering [object+offset] dirty.
+  // The value is known to not be a smi.
+  // object is the object being stored into, value is the object being stored.
+  // If offset is zero, then the scratch register contains the array index into
+  // the elements array represented as an untagged 32-bit integer.
+  // All registers are clobbered by the operation.
+  void RecordWriteNonSmi(Register object,
+                         int offset,
+                         Register value,
+                         Register scratch);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -288,6 +192,15 @@
   void DebugBreak();
 #endif
 
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
   // Enter specific kind of exit frame; either in normal or
   // debug mode. Expects the number of arguments in register rax and
   // sets up the number of arguments in register rdi and the pointer
@@ -319,16 +232,16 @@
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
   void InitializeRootRegister() {
-    ExternalReference roots_array_start =
-        ExternalReference::roots_array_start(isolate());
-    movq(kRootRegister, roots_array_start);
+    ExternalReference roots_address =
+        ExternalReference::roots_address(isolate());
+    movq(kRootRegister, roots_address);
     addq(kRootRegister, Immediate(kRootRegisterBias));
   }
 
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
-  // Set up call kind marking in rcx. The method takes rcx as an
+  // Setup call kind marking in rcx. The method takes rcx as an
   // explicit first parameter to make the code more readable at the
   // call sites.
   void SetCallKind(Register dst, CallKind kind);
@@ -357,7 +270,7 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  void InvokeFunction(Handle<JSFunction> function,
+  void InvokeFunction(JSFunction* function,
                       const ParameterCount& actual,
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper,
@@ -726,7 +639,6 @@
   void Push(Smi* smi);
   void Test(const Operand& dst, Smi* source);
 
-
   // ---------------------------------------------------------------------------
   // String macros.
 
@@ -745,7 +657,7 @@
       Label* on_not_both_flat_ascii,
       Label::Distance near_jump = Label::kFar);
 
-  // Check whether the instance type represents a flat ASCII string. Jump to the
+  // Check whether the instance type represents a flat ascii string. Jump to the
   // label if not. If the instance type can be scratched specify same register
   // for both instance type and scratch.
   void JumpIfInstanceTypeIsNotSequentialAscii(
@@ -772,9 +684,6 @@
   // Move if the registers are not identical.
   void Move(Register target, Register source);
 
-  // Bit-field support.
-  void TestBit(const Operand& dst, int bit_index);
-
   // Handle support
   void Move(Register dst, Handle<Object> source);
   void Move(const Operand& dst, Handle<Object> source);
@@ -784,22 +693,6 @@
   void Cmp(const Operand& dst, Smi* src);
   void Push(Handle<Object> source);
 
-  // Load a heap object and handle the case of new-space objects by
-  // indirecting via a global cell.
-  void LoadHeapObject(Register result, Handle<HeapObject> object);
-  void PushHeapObject(Handle<HeapObject> object);
-
-  void LoadObject(Register result, Handle<Object> object) {
-    if (object->IsHeapObject()) {
-      LoadHeapObject(result, Handle<HeapObject>::cast(object));
-    } else {
-      Move(result, object);
-    }
-  }
-
-  // Load a global cell into a register.
-  void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
-
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the rsp register.
   void Drop(int stack_elements);
@@ -867,46 +760,13 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Label* fail,
-                               Label::Distance distance = Label::kFar);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiOnlyElements(Register map,
-                                Label* fail,
-                                Label::Distance distance = Label::kFar);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by index in
-  // the FastDoubleElements array elements, otherwise jump to fail.  Note that
-  // index must not be smi-tagged.
-  void StoreNumberToDoubleElements(Register maybe_number,
-                                   Register elements,
-                                   Register index,
-                                   XMMRegister xmm_scratch,
-                                   Label* fail);
-
-  // Compare an object's map with the specified map and its transitioned
-  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
-  // result of map compare. If multiple map compares are required, the compare
-  // sequences branches to early_success.
-  void CompareMap(Register obj,
-                  Handle<Map> map,
-                  Label* early_success,
-                  CompareMapMode mode = REQUIRE_EXACT_MAP);
-
-  // Check if the map of an object is equal to a specified map and branch to
-  // label if not. Skip the smi check if not required (object is known to be a
-  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
-  // against maps that are ElementsKind transition maps of the specified map.
+  // Check if the map of an object is equal to a specified map and
+  // branch to label if not. Skip the smi check if not required
+  // (object is known to be a heap object)
   void CheckMap(Register obj,
                 Handle<Map> map,
                 Label* fail,
-                SmiCheckType smi_check_type,
-                CompareMapMode mode = REQUIRE_EXACT_MAP);
+                SmiCheckType smi_check_type);
 
   // Check if the map of an object is equal to a specified map and branch to a
   // specified target if equal. Skip the smi check if not required (object is
@@ -949,10 +809,6 @@
   void AbortIfNotSmi(Register object);
   void AbortIfNotSmi(const Operand& object);
 
-  // Abort execution if a 64 bit register containing a 32 bit payload does not
-  // have zeros in the top 32 bits.
-  void AbortIfNotZeroExtended(Register reg);
-
   // Abort execution if argument is a string. Used in debug code.
   void AbortIfNotString(Register object);
 
@@ -964,8 +820,9 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link it into try handler chain.
-  void PushTryHandler(StackHandler::Kind kind, int handler_index);
+  // Push a new try handler and link into try handler chain.  The return
+  // address must be pushed before calling this helper.
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
 
   // Unlink the stack handler on top of the stack from the try handler chain.
   void PopTryHandler();
@@ -975,7 +832,7 @@
   void Throw(Register value);
 
   // Propagate an uncatchable exception out of the current JS stack.
-  void ThrowUncatchable(Register value);
+  void ThrowUncatchable(UncatchableExceptionType type, Register value);
 
   // ---------------------------------------------------------------------------
   // Inline caching support
@@ -1109,8 +966,7 @@
   // clobbered.
   void TryGetFunctionPrototype(Register function,
                                Register result,
-                               Label* miss,
-                               bool miss_on_bound_function = false);
+                               Label* miss);
 
   // Generates code for reporting that an illegal operation has
   // occurred.
@@ -1125,22 +981,6 @@
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the global context if the map in register
-  // map_in_out is the cached Array map in the global context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
-  // Load the initial map for new Arrays from a JSFunction.
-  void LoadInitialArrayMap(Register function_in,
-                           Register scratch,
-                           Register map_out);
-
   // Load the global function with the given index.
   void LoadGlobalFunction(int index, Register function);
 
@@ -1154,9 +994,19 @@
   // Call a code stub.
   void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
 
+  // Call a code stub and return the code object called.  Try to generate
+  // the code if necessary.  Do not perform a GC but instead return a retry
+  // after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
+
   // Tail call a code stub (jump).
   void TailCallStub(CodeStub* stub);
 
+  // Tail call a code stub (jump) and return the code object called.  Try to
+  // generate the code if necessary.  Do not perform a GC but instead return
+  // a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
+
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
 
@@ -1166,9 +1016,19 @@
   // Call a runtime function and save the value of XMM registers.
   void CallRuntimeSaveDoubles(Runtime::FunctionId id);
 
+  // Call a runtime function, returning the CodeStub object called.
+  // Try to generate the stub code if necessary.  Do not perform a GC
+  // but instead return a retry after GC failure.
+  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
+                                              int num_arguments);
+
   // Convenience function: Same as above, but takes the fid instead.
   void CallRuntime(Runtime::FunctionId id, int num_arguments);
 
+  // Convenience function: Same as above, but takes the fid instead.
+  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
+                                              int num_arguments);
+
   // Convenience function: call an external reference.
   void CallExternalReference(const ExternalReference& ext,
                              int num_arguments);
@@ -1180,26 +1040,38 @@
                                  int num_arguments,
                                  int result_size);
 
+  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
+      const ExternalReference& ext, int num_arguments, int result_size);
+
   // Convenience function: tail call a runtime routine (jump).
   void TailCallRuntime(Runtime::FunctionId fid,
                        int num_arguments,
                        int result_size);
 
+  MUST_USE_RESULT  MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
+                                                   int num_arguments,
+                                                   int result_size);
+
   // Jump to a runtime routine.
   void JumpToExternalReference(const ExternalReference& ext, int result_size);
 
-  // Prepares stack to put arguments (aligns and so on).  WIN64 calling
-  // convention requires to put the pointer to the return value slot into
-  // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
-  // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
+  // Jump to a runtime routine.
+  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
+                                          int result_size);
+
+  // Prepares stack to put arguments (aligns and so on).
+  // WIN64 calling convention requires to put the pointer to the return value
+  // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
+  // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
   // inside the exit frame (not GCed) accessible via StackSpaceOperand.
   void PrepareCallApiFunction(int arg_stack_space);
 
-  // Calls an API function.  Allocates HandleScope, extracts returned value
-  // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
-  // caller-save registers.  Restores context.  On return removes
-  // stack_space * kPointerSize (GCed).
-  void CallApiFunctionAndReturn(Address function_address, int stack_space);
+  // Calls an API function. Allocates HandleScope, extracts
+  // returned value from handle and propagates exceptions.
+  // Clobbers r14, r15, rbx and caller-save registers. Restores context.
+  // On return removes stack_space * kPointerSize (GCed).
+  MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
+      ApiFunction* function, int stack_space);
 
   // Before calling a C-function from generated code, align arguments on stack.
   // After aligning the frame, arguments must be stored in esp[0], esp[4],
@@ -1248,13 +1120,6 @@
                  int min_length = 0,
                  Register scratch = kScratchRegister);
 
-  // Initialize fields with filler values.  Fields starting at |start_offset|
-  // not including end_offset are overwritten with the value in |filler|.  At
-  // the end the loop, |start_offset| takes the value of |end_offset|.
-  void InitializeFieldsWithFiller(Register start_offset,
-                                  Register end_offset,
-                                  Register filler);
-
 
   // ---------------------------------------------------------------------------
   // StatsCounter support
@@ -1287,33 +1152,20 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
-  void set_has_frame(bool value) { has_frame_ = value; }
-  bool has_frame() { return has_frame_; }
-  inline bool AllowThisStubCall(CodeStub* stub);
 
   static int SafepointRegisterStackIndex(Register reg) {
     return SafepointRegisterStackIndex(reg.code());
   }
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
-  // Expects object in rax and returns map with validated enum cache
-  // in rax.  Assumes that any other register can be used as a scratch.
-  void CheckEnumCache(Register null_value,
-                      Label* call_runtime);
-
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
-  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
+  static int kSafepointPushRegisterIndices[Register::kNumRegisters];
   static const int kNumSafepointSavedRegisters = 11;
   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
   bool generating_stub_;
   bool allow_stub_calls_;
-  bool has_frame_;
   bool root_array_available_;
 
   // Returns a register holding the smi value. The register MUST NOT be
@@ -1332,12 +1184,15 @@
                       Handle<Code> code_constant,
                       Register code_register,
                       Label* done,
-                      bool* definitely_mismatches,
                       InvokeFlag flag,
                       Label::Distance near_jump = Label::kFar,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
   void EnterExitFramePrologue(bool save_rax);
 
   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
@@ -1364,24 +1219,6 @@
                                Register scratch,
                                bool gc_allowed);
 
-  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,
-                  Label* branch,
-                  Label::Distance distance = Label::kFar);
-
-  // Helper for finding the mark bits for an address.  Afterwards, the
-  // bitmap register points at the word with the mark bits and the mask
-  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
-  // unchanged.
-  inline void GetMarkBits(Register addr_reg,
-                          Register bitmap_reg,
-                          Register mask_reg);
-
-  // Helper for throwing exceptions.  Compute a handler address and jump to
-  // it.  See the implementation for register usage.
-  void JumpToHandlerEntry();
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
@@ -1419,32 +1256,32 @@
 // Static helper functions.
 
 // Generate an Operand for loading a field from an object.
-inline Operand FieldOperand(Register object, int offset) {
+static inline Operand FieldOperand(Register object, int offset) {
   return Operand(object, offset - kHeapObjectTag);
 }
 
 
 // Generate an Operand for loading an indexed field from an object.
-inline Operand FieldOperand(Register object,
-                            Register index,
-                            ScaleFactor scale,
-                            int offset) {
+static inline Operand FieldOperand(Register object,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   int offset) {
   return Operand(object, index, scale, offset - kHeapObjectTag);
 }
 
 
-inline Operand ContextOperand(Register context, int index) {
+static inline Operand ContextOperand(Register context, int index) {
   return Operand(context, Context::SlotOffset(index));
 }
 
 
-inline Operand GlobalObjectOperand() {
+static inline Operand GlobalObjectOperand() {
   return ContextOperand(rsi, Context::GLOBAL_INDEX);
 }
 
 
 // Provides access to exit frame stack space (not GCed).
-inline Operand StackSpaceOperand(int index) {
+static inline Operand StackSpaceOperand(int index) {
 #ifdef _WIN64
   const int kShaddowSpace = 4;
   return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 837c254..a782bd7 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -193,7 +193,7 @@
 void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
   Label not_at_start;
   // Did we start the match at the start of the string at all?
-  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, &not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -205,7 +205,7 @@
 
 void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
   // Did we start the match at the start of the string at all?
-  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, on_not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -226,7 +226,7 @@
                                               bool check_end_of_string) {
 #ifdef DEBUG
   // If input is ASCII, don't even bother calling here if the string to
-  // match contains a non-ASCII character.
+  // match contains a non-ascii character.
   if (mode_ == ASCII) {
     ASSERT(String::IsAscii(str.start(), str.length()));
   }
@@ -431,14 +431,9 @@
     // Isolate.
     __ LoadAddress(rcx, ExternalReference::isolate_address());
 #endif
-
-    { // NOLINT: Can't find a way to open this scope without confusing the
-      // linter.
-      AllowExternalCallThatCantCauseGC scope(&masm_);
-      ExternalReference compare =
-          ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
-      __ CallCFunction(compare, num_arguments);
-    }
+    ExternalReference compare =
+        ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+    __ CallCFunction(compare, num_arguments);
 
     // Restore original values before reacting on result value.
     __ Move(code_object_pointer(), masm_.CodeObject());
@@ -564,7 +559,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  ASSERT(minus < String::kMaxUC16CharCode);
   __ lea(rax, Operand(current_character(), -minus));
   __ and_(rax, Immediate(mask));
   __ cmpl(rax, Immediate(c));
@@ -711,12 +706,7 @@
   // registers we need.
   // Entry code:
   __ bind(&entry_label_);
-
-  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
-  // is generated.
-  FrameScope scope(&masm_, StackFrame::MANUAL);
-
-  // Actually emit code to start a new stack frame.
+  // Start new stack frame.
   __ push(rbp);
   __ movq(rbp, rsp);
   // Save parameters and callee-save registers. Order here should correspond
@@ -1192,7 +1182,7 @@
   ASSERT(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
-  MaybeObject* result = Execution::HandleStackGuardInterrupt(isolate);
+  MaybeObject* result = Execution::HandleStackGuardInterrupt();
 
   if (*code_handle != re_code) {  // Return address no longer valid
     intptr_t delta = code_handle->address() - re_code->address();
@@ -1248,11 +1238,6 @@
     frame_entry<const String*>(re_frame, kInputString) = *subject;
     frame_entry<const byte*>(re_frame, kInputStart) = new_address;
     frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
-  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
-    // Subject string might have been a ConsString that underwent
-    // short-circuiting during GC. That will not change start_address but
-    // will change pointer inside the subject handle.
-    frame_entry<const String*>(re_frame, kInputString) = *subject;
   }
 
   return 0;
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index f07f6b6..76d2555 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,61 +43,32 @@
                        MacroAssembler* masm,
                        Code::Flags flags,
                        StubCache::Table table,
-                       Register receiver,
                        Register name,
-                       // The offset is scaled by 4, based on
-                       // kHeapObjectTagSize, which is two bits
                        Register offset) {
-  // We need to scale up the pointer by 2 because the offset is scaled by less
-  // than the pointer size.
-  ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
-  ScaleFactor scale_factor = times_2;
-
-  ASSERT_EQ(24, sizeof(StubCache::Entry));
+  ASSERT_EQ(8, kPointerSize);
+  ASSERT_EQ(16, sizeof(StubCache::Entry));
   // The offset register holds the entry offset times four (due to masking
   // and shifting optimizations).
   ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
-  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
   Label miss;
 
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
   __ LoadAddress(kScratchRegister, key_offset);
-
   // Check that the key in the entry matches the name.
   // Multiply entry offset by 16 to get the entry address. Since the
   // offset register already holds the entry offset times four, multiply
   // by a further four.
-  __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
+  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
   __ j(not_equal, &miss);
-
-  // Get the map entry from the cache.
-  // Use key_offset + kPointerSize * 2, rather than loading map_offset.
-  __ movq(kScratchRegister,
-          Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
-  __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ j(not_equal, &miss);
-
   // Get the code entry from the cache.
-  __ LoadAddress(kScratchRegister, value_offset);
+  // Use key_offset + kPointerSize, rather than loading value_offset.
   __ movq(kScratchRegister,
-          Operand(kScratchRegister, offset, scale_factor, 0));
-
+          Operand(kScratchRegister, offset, times_4, kPointerSize));
   // Check that the flags match what we're looking for.
   __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
   __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
   __ cmpl(offset, Immediate(flags));
   __ j(not_equal, &miss);
 
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
   // Jump to the first instruction in the code stub.
   __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ jmp(kScratchRegister);
@@ -111,12 +82,13 @@
 // must always call a backup property check that is complete.
 // This function is safe to call if the receiver has fast properties.
 // Name must be a symbol and receiver must be a heap object.
-static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                             Label* miss_label,
-                                             Register receiver,
-                                             Handle<String> name,
-                                             Register r0,
-                                             Register r1) {
+MUST_USE_RESULT static MaybeObject* GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm,
+    Label* miss_label,
+    Register receiver,
+    String* name,
+    Register r0,
+    Register r1) {
   ASSERT(name->IsSymbol());
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->negative_lookups(), 1);
@@ -146,14 +118,19 @@
   __ j(not_equal, miss_label);
 
   Label done;
-  StringDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                     miss_label,
-                                                     &done,
-                                                     properties,
-                                                     name,
-                                                     r1);
+  MaybeObject* result = StringDictionaryLookupStub::GenerateNegativeLookup(
+      masm,
+      miss_label,
+      &done,
+      properties,
+      name,
+      r1);
+  if (result->IsFailure()) return result;
+
   __ bind(&done);
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
+
+  return result;
 }
 
 
@@ -163,16 +140,14 @@
                               Register name,
                               Register scratch,
                               Register extra,
-                              Register extra2,
-                              Register extra3) {
+                              Register extra2) {
   Isolate* isolate = masm->isolate();
   Label miss;
   USE(extra);   // The register extra is not used on the X64 platform.
   USE(extra2);  // The register extra2 is not used on the X64 platform.
-  USE(extra3);  // The register extra2 is not used on the X64 platform.
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 24.
-  ASSERT(sizeof(Entry) == 24);
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 16.
+  ASSERT(sizeof(Entry) == 16);
 
   // Make sure the flags do not name a specific type.
   ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
@@ -184,10 +159,6 @@
   // Check scratch register is valid, extra and extra2 are unused.
   ASSERT(!scratch.is(no_reg));
   ASSERT(extra2.is(no_reg));
-  ASSERT(extra3.is(no_reg));
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
 
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, &miss);
@@ -197,12 +168,10 @@
   // Use only the low 32 bits of the map pointer.
   __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ xor_(scratch, Immediate(flags));
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
   __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
+  ProbeTable(isolate, masm, flags, kPrimary, name, scratch);
 
   // Primary miss: Compute hash for secondary probe.
   __ movl(scratch, FieldOperand(name, String::kHashFieldOffset));
@@ -214,12 +183,11 @@
   __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
+  ProbeTable(isolate, masm, flags, kSecondary, name, scratch);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
   __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
 }
 
 
@@ -243,10 +211,7 @@
 
 
 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
   Isolate* isolate = masm->isolate();
   // Check we're still in the same context.
   __ Move(prototype, isolate->global());
@@ -254,8 +219,8 @@
           prototype);
   __ j(not_equal, miss);
   // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->global_context()->get(index)));
+  JSFunction* function =
+      JSFunction::cast(isolate->global_context()->get(index));
   // Load its initial map. The global functions all have initial maps.
   __ Move(prototype, Handle<Map>(function->initial_map()));
   // Load the prototype from the initial map.
@@ -347,10 +312,8 @@
 // are loaded directly otherwise the property is loaded from the properties
 // fixed array.
 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            Handle<JSObject> holder,
-                                            int index) {
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
   // Adjust for the number of properties stored in the holder.
   index -= holder->map()->inobject_properties();
   if (index < 0) {
@@ -370,11 +333,11 @@
                                      Register receiver,
                                      Register holder,
                                      Register name,
-                                     Handle<JSObject> holder_obj) {
+                                     JSObject* holder_obj) {
   __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
-  __ Move(kScratchRegister, interceptor);
+  InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+  ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
+  __ Move(kScratchRegister, Handle<Object>(interceptor));
   __ push(kScratchRegister);
   __ push(receiver);
   __ push(holder);
@@ -382,12 +345,11 @@
 }
 
 
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj) {
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+                                                   Register receiver,
+                                                   Register holder,
+                                                   Register name,
+                                                   JSObject* holder_obj) {
   PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
 
   ExternalReference ref =
@@ -441,9 +403,9 @@
 
 
 // Generates call to API function.
-static void GenerateFastApiCall(MacroAssembler* masm,
-                                const CallOptimization& optimization,
-                                int argc) {
+static MaybeObject* GenerateFastApiCall(MacroAssembler* masm,
+                                        const CallOptimization& optimization,
+                                        int argc) {
   // ----------- S t a t e -------------
   //  -- rsp[0]              : return address
   //  -- rsp[8]              : object passing the type check
@@ -458,25 +420,29 @@
   //  -- rsp[(argc + 4) * 8] : receiver
   // -----------------------------------
   // Get the function and setup the context.
-  Handle<JSFunction> function = optimization.constant_function();
-  __ LoadHeapObject(rdi, function);
+  JSFunction* function = optimization.constant_function();
+  __ Move(rdi, Handle<JSFunction>(function));
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Pass the additional arguments.
   __ movq(Operand(rsp, 2 * kPointerSize), rdi);
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data(api_call_info->data());
-  if (masm->isolate()->heap()->InNewSpace(*call_data)) {
-    __ Move(rcx, api_call_info);
+  Object* call_data = optimization.api_call_info()->data();
+  Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
+  if (masm->isolate()->heap()->InNewSpace(call_data)) {
+    __ Move(rcx, api_call_info_handle);
     __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
     __ movq(Operand(rsp, 3 * kPointerSize), rbx);
   } else {
-    __ Move(Operand(rsp, 3 * kPointerSize), call_data);
+    __ Move(Operand(rsp, 3 * kPointerSize), Handle<Object>(call_data));
   }
 
   // Prepare arguments.
   __ lea(rbx, Operand(rsp, 3 * kPointerSize));
 
+  Object* callback = optimization.api_call_info()->callback();
+  Address api_function_address = v8::ToCData<Address>(callback);
+  ApiFunction fun(api_function_address);
+
 #ifdef _WIN64
   // Win64 uses first register--rcx--for returned value.
   Register arguments_arg = rdx;
@@ -499,11 +465,12 @@
 
   // v8::InvocationCallback's argument.
   __ lea(arguments_arg, StackSpaceOperand(0));
-
-  // Function address is a foreign pointer outside V8's heap.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  __ CallApiFunctionAndReturn(function_address,
-                              argc + kFastApiCallArguments + 1);
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  return masm->TryCallApiFunctionAndReturn(&fun,
+                                           argc + kFastApiCallArguments + 1);
 }
 
 
@@ -518,16 +485,16 @@
         name_(name),
         extra_ic_state_(extra_ic_state) {}
 
-  void Compile(MacroAssembler* masm,
-               Handle<JSObject> object,
-               Handle<JSObject> holder,
-               Handle<String> name,
-               LookupResult* lookup,
-               Register receiver,
-               Register scratch1,
-               Register scratch2,
-               Register scratch3,
-               Label* miss) {
+  MaybeObject* Compile(MacroAssembler* masm,
+                       JSObject* object,
+                       JSObject* holder,
+                       String* name,
+                       LookupResult* lookup,
+                       Register receiver,
+                       Register scratch1,
+                       Register scratch2,
+                       Register scratch3,
+                       Label* miss) {
     ASSERT(holder->HasNamedInterceptor());
     ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
 
@@ -535,27 +502,45 @@
     __ JumpIfSmi(receiver, miss);
 
     CallOptimization optimization(lookup);
+
     if (optimization.is_constant_call()) {
-      CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
-                       holder, lookup, name, optimization, miss);
+      return CompileCacheable(masm,
+                              object,
+                              receiver,
+                              scratch1,
+                              scratch2,
+                              scratch3,
+                              holder,
+                              lookup,
+                              name,
+                              optimization,
+                              miss);
     } else {
-      CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3,
-                     name, holder, miss);
+      CompileRegular(masm,
+                     object,
+                     receiver,
+                     scratch1,
+                     scratch2,
+                     scratch3,
+                     name,
+                     holder,
+                     miss);
+      return masm->isolate()->heap()->undefined_value();  // Success.
     }
   }
 
  private:
-  void CompileCacheable(MacroAssembler* masm,
-                        Handle<JSObject> object,
-                        Register receiver,
-                        Register scratch1,
-                        Register scratch2,
-                        Register scratch3,
-                        Handle<JSObject> interceptor_holder,
-                        LookupResult* lookup,
-                        Handle<String> name,
-                        const CallOptimization& optimization,
-                        Label* miss_label) {
+  MaybeObject* CompileCacheable(MacroAssembler* masm,
+                                JSObject* object,
+                                Register receiver,
+                                Register scratch1,
+                                Register scratch2,
+                                Register scratch3,
+                                JSObject* interceptor_holder,
+                                LookupResult* lookup,
+                                String* name,
+                                const CallOptimization& optimization,
+                                Label* miss_label) {
     ASSERT(optimization.is_constant_call());
     ASSERT(!lookup->holder()->IsGlobalObject());
 
@@ -564,14 +549,16 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(
-          object, interceptor_holder);
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                       interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(
-            interceptor_holder, Handle<JSObject>(lookup->holder()));
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                         lookup->holder());
       }
-      can_do_fast_api_call =
-          depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth;
+      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
+                             (depth2 != kInvalidProtoDepth);
     }
 
     Counters* counters = masm->isolate()->counters();
@@ -587,9 +574,9 @@
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver,
+                                        interceptor_holder, scratch1,
+                                        scratch2, scratch3, name, depth1, miss);
 
     // Invoke an interceptor and if it provides a value,
     // branch to |regular_invoke|.
@@ -602,11 +589,10 @@
 
     // Check that the maps from interceptor's holder to constant function's
     // holder haven't changed and thus we can use cached constant function.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
-                                      Handle<JSObject>(lookup->holder()),
-                                      scratch1, scratch2, scratch3,
-                                      name, depth2, miss);
+                                      lookup->holder(), scratch1,
+                                      scratch2, scratch3, name, depth2, miss);
     } else {
       // CheckPrototypes has a side effect of fetching a 'holder'
       // for API (object which is instanceof for the signature).  It's
@@ -617,7 +603,10 @@
 
     // Invoke function.
     if (can_do_fast_api_call) {
-      GenerateFastApiCall(masm, optimization, arguments_.immediate());
+      MaybeObject* result = GenerateFastApiCall(masm,
+                                                optimization,
+                                                arguments_.immediate());
+      if (result->IsFailure()) return result;
     } else {
       CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
           ? CALL_AS_FUNCTION
@@ -638,27 +627,33 @@
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
     }
+
+    return masm->isolate()->heap()->undefined_value();  // Success.
   }
 
   void CompileRegular(MacroAssembler* masm,
-                      Handle<JSObject> object,
+                      JSObject* object,
                       Register receiver,
                       Register scratch1,
                       Register scratch2,
                       Register scratch3,
-                      Handle<String> name,
-                      Handle<JSObject> interceptor_holder,
+                      String* name,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
         stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
-                                        scratch1, scratch2, scratch3,
-                                        name, miss_label);
+                                        scratch1, scratch2, scratch3, name,
+                                        miss_label);
 
-    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ EnterInternalFrame();
     // Save the name_ register across the call.
     __ push(name_);
 
-    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
+    PushInterceptorArguments(masm,
+                             receiver,
+                             holder,
+                             name_,
+                             interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
@@ -667,30 +662,27 @@
 
     // Restore the name_ register.
     __ pop(name_);
-
-    // Leave the internal frame.
+    __ LeaveInternalFrame();
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
                            Register receiver,
                            Register holder,
-                           Handle<JSObject> holder_obj,
+                           JSObject* holder_obj,
                            Label* interceptor_succeeded) {
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ push(holder);  // Save the holder.
-      __ push(name_);  // Save the name.
+    __ EnterInternalFrame();
+    __ push(holder);  // Save the holder.
+    __ push(name_);  // Save the name.
 
-      CompileCallLoadPropertyWithInterceptor(masm,
-                                             receiver,
-                                             holder,
-                                             name_,
-                                             holder_obj);
+    CompileCallLoadPropertyWithInterceptor(masm,
+                                           receiver,
+                                           holder,
+                                           name_,
+                                           holder_obj);
 
-      __ pop(name_);  // Restore the name.
-      __ pop(receiver);  // Restore the holder.
-      // Leave the internal frame.
-    }
+    __ pop(name_);  // Restore the name.
+    __ pop(receiver);  // Restore the holder.
+    __ LeaveInternalFrame();
 
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
     __ j(not_equal, interceptor_succeeded);
@@ -705,33 +697,43 @@
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
-  Handle<Code> code = (kind == Code::LOAD_IC)
-      ? masm->isolate()->builtins()->LoadIC_Miss()
-      : masm->isolate()->builtins()->KeyedLoadIC_Miss();
-  __ Jump(code, RelocInfo::CODE_TARGET);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = masm->isolate()->builtins()->builtin(Builtins::kLoadIC_Miss);
+  } else {
+    code = masm->isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
 void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
-  Handle<Code> code =
-      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
-  __ Jump(code, RelocInfo::CODE_TARGET);
+  Code* code = masm->isolate()->builtins()->builtin(
+      Builtins::kKeyedLoadIC_MissForceGeneric);
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
 }
 
 
 // Both name_reg and receiver_reg are preserved on jumps to miss_label,
 // but may be destroyed if store is successful.
 void StubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                      Handle<JSObject> object,
+                                      JSObject* object,
                                       int index,
-                                      Handle<Map> transition,
+                                      Map* transition,
                                       Register receiver_reg,
                                       Register name_reg,
                                       Register scratch,
                                       Label* miss_label) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver_reg, miss_label);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(receiver_reg, Handle<Map>(object->map()),
-              miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, miss_label);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -743,12 +745,12 @@
   ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
 
   // Perform map transition for the receiver if necessary.
-  if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
     // The properties must be extended before we can store the value.
     // We jump to a runtime call that extends the properties array.
     __ pop(scratch);  // Return address.
     __ push(receiver_reg);
-    __ Push(transition);
+    __ Push(Handle<Map>(transition));
     __ push(rax);
     __ push(scratch);
     __ TailCallExternalReference(
@@ -759,10 +761,11 @@
     return;
   }
 
-  if (!transition.is_null()) {
+  if (transition != NULL) {
     // Update the map of the object; no write barrier updating is
     // needed because the map is never in new space.
-    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), transition);
+    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+            Handle<Map>(transition));
   }
 
   // Adjust for the number of properties stored in the object. Even in the
@@ -778,8 +781,7 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWriteField(
-        receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
+    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -790,8 +792,7 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWriteField(
-        scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
   }
 
   // Return the value (register rax).
@@ -802,53 +803,37 @@
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
-                                      Handle<GlobalObject> global,
-                                      Handle<String> name,
-                                      Register scratch,
-                                      Label* miss) {
-  Handle<JSGlobalPropertyCell> cell =
-      GlobalObject::EnsurePropertyCell(global, name);
+MUST_USE_RESULT static MaybeObject* GenerateCheckPropertyCell(
+    MacroAssembler* masm,
+    GlobalObject* global,
+    String* name,
+    Register scratch,
+    Label* miss) {
+  Object* probe;
+  { MaybeObject* maybe_probe = global->EnsurePropertyCell(name);
+    if (!maybe_probe->ToObject(&probe)) return maybe_probe;
+  }
+  JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
   ASSERT(cell->value()->IsTheHole());
-  __ Move(scratch, cell);
+  __ Move(scratch, Handle<Object>(cell));
   __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
          masm->isolate()->factory()->the_hole_value());
   __ j(not_equal, miss);
+  return cell;
 }
 
 
-// Calls GenerateCheckPropertyCell for each global object in the prototype chain
-// from object to (but not including) holder.
-static void GenerateCheckPropertyCells(MacroAssembler* masm,
-                                       Handle<JSObject> object,
-                                       Handle<JSObject> holder,
-                                       Handle<String> name,
-                                       Register scratch,
-                                       Label* miss) {
-  Handle<JSObject> current = object;
-  while (!current.is_identical_to(holder)) {
-    if (current->IsGlobalObject()) {
-      GenerateCheckPropertyCell(masm,
-                                Handle<GlobalObject>::cast(current),
-                                name,
-                                scratch,
-                                miss);
-    }
-    current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
-  }
-}
-
 #undef __
 #define __ ACCESS_MASM((masm()))
 
 
-Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
+Register StubCompiler::CheckPrototypes(JSObject* object,
                                        Register object_reg,
-                                       Handle<JSObject> holder,
+                                       JSObject* holder,
                                        Register holder_reg,
                                        Register scratch1,
                                        Register scratch2,
-                                       Handle<String> name,
+                                       String* name,
                                        int save_at_depth,
                                        Label* miss) {
   // Make sure there's no overlap between holder and object registers.
@@ -868,56 +853,80 @@
 
   // Check the maps in the prototype chain.
   // Traverse the prototype chain from the object and do map checks.
-  Handle<JSObject> current = object;
-  while (!current.is_identical_to(holder)) {
-    ++depth;
+  JSObject* current = object;
+  while (current != holder) {
+    depth++;
 
     // Only global objects and objects that do not require access
     // checks are allowed in stubs.
     ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
 
-    Handle<JSObject> prototype(JSObject::cast(current->GetPrototype()));
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
     if (!current->HasFastProperties() &&
         !current->IsJSGlobalObject() &&
         !current->IsJSGlobalProxy()) {
       if (!name->IsSymbol()) {
-        name = factory()->LookupSymbol(name);
+        MaybeObject* lookup_result = heap()->LookupSymbol(name);
+        if (lookup_result->IsFailure()) {
+          set_failure(Failure::cast(lookup_result));
+          return reg;
+        } else {
+          name = String::cast(lookup_result->ToObjectUnchecked());
+        }
       }
-      ASSERT(current->property_dictionary()->FindEntry(*name) ==
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
              StringDictionary::kNotFound);
 
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
+      MaybeObject* negative_lookup = GenerateDictionaryNegativeLookup(masm(),
+                                                                      miss,
+                                                                      reg,
+                                                                      name,
+                                                                      scratch1,
+                                                                      scratch2);
+      if (negative_lookup->IsFailure()) {
+        set_failure(Failure::cast(negative_lookup));
+        return reg;
+      }
 
       __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
+      reg = holder_reg;  // from now the object is in holder_reg
       __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      Handle<Map> current_map(current->map());
-      if (in_new_space) {
-        // Save the map in scratch1 for later.
+    } else if (heap()->InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      __ Cmp(scratch1, Handle<Map>(current->map()));
+      // Branch on the result of the map check.
+      __ j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
+
+        // Restore scratch register to be the map of the object.
+        // We load the prototype from the map in the scratch register.
         __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
       }
-      __ CheckMap(reg, Handle<Map>(current_map),
-                  miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
 
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
+    } else {
+      // Check the map of the current object.
+      __ Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+          Handle<Map>(current->map()));
+      // Branch on the result of the map check.
+      __ j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
       if (current->IsJSGlobalProxy()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+        __ CheckAccessGlobalProxy(reg, scratch1, miss);
       }
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (in_new_space) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ Move(reg, prototype);
-      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ Move(reg, Handle<JSObject>(prototype));
     }
 
     if (save_at_depth == depth) {
@@ -927,46 +936,62 @@
     // Go to the next object in the prototype chain.
     current = prototype;
   }
-  ASSERT(current.is_identical_to(holder));
+
+  // Check the holder map.
+  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
+  __ j(not_equal, miss);
 
   // Log the check depth.
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
-  // Check the holder map.
-  __ CheckMap(reg, Handle<Map>(holder->map()),
-              miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
-
-  // Perform security check for access to the global object.
+  // Perform security check for access to the global object and return
+  // the holder register.
+  ASSERT(current == holder);
   ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
   if (current->IsJSGlobalProxy()) {
     __ CheckAccessGlobalProxy(reg, scratch1, miss);
   }
 
-  // If we've skipped any global objects, it's not enough to verify that
-  // their maps haven't changed.  We also need to check that the property
-  // cell for the property is still empty.
-  GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.  We also need to check that the
+  // property cell for the property is still empty.
+  current = object;
+  while (current != holder) {
+    if (current->IsGlobalObject()) {
+      MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                    GlobalObject::cast(current),
+                                                    name,
+                                                    scratch1,
+                                                    miss);
+      if (cell->IsFailure()) {
+        set_failure(Failure::cast(cell));
+        return reg;
+      }
+    }
+    current = JSObject::cast(current->GetPrototype());
+  }
 
   // Return the register containing the holder.
   return reg;
 }
 
 
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
-                                     Handle<JSObject> holder,
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
                                      Register receiver,
                                      Register scratch1,
                                      Register scratch2,
                                      Register scratch3,
                                      int index,
-                                     Handle<String> name,
+                                     String* name,
                                      Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check the prototype chain.
-  Register reg = CheckPrototypes(
-      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, scratch3, name, miss);
 
   // Get the value from the properties.
   GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
@@ -974,22 +999,25 @@
 }
 
 
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
-                                        Register receiver,
-                                        Register name_reg,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        Handle<AccessorInfo> callback,
-                                        Handle<String> name,
-                                        Label* miss) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+                                                JSObject* holder,
+                                                Register receiver,
+                                                Register name_reg,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3,
+                                                AccessorInfo* callback,
+                                                String* name,
+                                                Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  Register reg = CheckPrototypes(object, receiver, holder, scratch1,
-                                 scratch2, scratch3, name, miss);
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1,
+                      scratch2, scratch3, name, miss);
+
+  Handle<AccessorInfo> callback_handle(callback);
 
   // Insert additional parameters into the stack frame above return address.
   ASSERT(!scratch2.is(reg));
@@ -997,11 +1025,11 @@
 
   __ push(receiver);  // receiver
   __ push(reg);  // holder
-  if (heap()->InNewSpace(callback->data())) {
-    __ Move(scratch1, callback);
+  if (heap()->InNewSpace(callback_handle->data())) {
+    __ Move(scratch1, callback_handle);
     __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));  // data
   } else {
-    __ Push(Handle<Object>(callback->data()));
+    __ Push(Handle<Object>(callback_handle->data()));
   }
   __ push(name_reg);  // name
   // Save a pointer to where we pushed the arguments pointer.
@@ -1020,7 +1048,11 @@
   __ movq(name_arg, rsp);
   __ push(scratch2);  // Restore return address.
 
-  // 3 elements array for v8::Arguments::values_ and handler for name.
+  // Do call through the api.
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+
+  // 3 elements array for v8::Agruments::values_ and handler for name.
   const int kStackSpace = 4;
 
   // Allocate v8::AccessorInfo in non-GCed stack space.
@@ -1036,42 +1068,45 @@
   // could be used to pass arguments.
   __ lea(accessor_info_arg, StackSpaceOperand(0));
 
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  __ CallApiFunctionAndReturn(getter_address, kStackSpace);
+  // Emitting a stub call may try to allocate (if the code is not
+  // already generated).  Do not allow the assembler to perform a
+  // garbage collection but instead return the allocation failure
+  // object.
+  return masm()->TryCallApiFunctionAndReturn(&fun, kStackSpace);
 }
 
 
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
-                                        Handle<JSObject> holder,
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
                                         Register receiver,
                                         Register scratch1,
                                         Register scratch2,
                                         Register scratch3,
-                                        Handle<JSFunction> value,
-                                        Handle<String> name,
+                                        Object* value,
+                                        String* name,
                                         Label* miss) {
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(receiver, miss);
 
   // Check that the maps haven't changed.
-  CheckPrototypes(
-      object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+  CheckPrototypes(object, receiver, holder,
+                  scratch1, scratch2, scratch3, name, miss);
 
   // Return the constant value.
-  __ LoadHeapObject(rax, value);
+  __ Move(rax, Handle<Object>(value));
   __ ret(0);
 }
 
 
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
-                                           Handle<JSObject> interceptor_holder,
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* interceptor_holder,
                                            LookupResult* lookup,
                                            Register receiver,
                                            Register name_reg,
                                            Register scratch1,
                                            Register scratch2,
                                            Register scratch3,
-                                           Handle<String> name,
+                                           String* name,
                                            Label* miss) {
   ASSERT(interceptor_holder->HasNamedInterceptor());
   ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
@@ -1083,13 +1118,13 @@
   // and CALLBACKS, so inline only them, other cases may be added
   // later.
   bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
+  if (lookup->IsProperty() && lookup->IsCacheable()) {
     if (lookup->type() == FIELD) {
       compile_followup_inline = true;
     } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsAccessorInfo()) {
-      compile_followup_inline =
-          AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL;
+        lookup->GetCallbackObject()->IsAccessorInfo() &&
+        AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+      compile_followup_inline = true;
     }
   }
 
@@ -1104,49 +1139,47 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+    __ EnterInternalFrame();
 
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        // CALLBACKS case needs a receiver to be passed into C++ callback.
-        __ push(receiver);
-      }
-      __ push(holder_reg);
-      __ push(name_reg);
-
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(masm(),
-                                             receiver,
-                                             holder_reg,
-                                             name_reg,
-                                             interceptor_holder);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
-      __ j(equal, &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ ret(0);
-
-      __ bind(&interceptor_failed);
-      __ pop(name_reg);
-      __ pop(holder_reg);
-      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-        __ pop(receiver);
-      }
-
-      // Leave the internal frame.
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      // CALLBACKS case needs a receiver to be passed into C++ callback.
+      __ push(receiver);
     }
+    __ push(holder_reg);
+    __ push(name_reg);
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(masm(),
+                                           receiver,
+                                           holder_reg,
+                                           name_reg,
+                                           interceptor_holder);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ j(equal, &interceptor_failed);
+    __ LeaveInternalFrame();
+    __ ret(0);
+
+    __ bind(&interceptor_failed);
+    __ pop(name_reg);
+    __ pop(holder_reg);
+    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+      __ pop(receiver);
+    }
+
+    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
-    if (*interceptor_holder != lookup->holder()) {
+    if (interceptor_holder != lookup->holder()) {
       holder_reg = CheckPrototypes(interceptor_holder,
                                    holder_reg,
-                                   Handle<JSObject>(lookup->holder()),
+                                   lookup->holder(),
                                    scratch1,
                                    scratch2,
                                    scratch3,
@@ -1158,15 +1191,15 @@
       // We found FIELD property in prototype chain of interceptor's holder.
       // Retrieve a field from field's holder.
       GenerateFastPropertyLoad(masm(), rax, holder_reg,
-                               Handle<JSObject>(lookup->holder()),
-                               lookup->GetFieldIndex());
+                               lookup->holder(), lookup->GetFieldIndex());
       __ ret(0);
     } else {
       // We found CALLBACKS property in prototype chain of interceptor's
       // holder.
       ASSERT(lookup->type() == CALLBACKS);
-      Handle<AccessorInfo> callback(
-          AccessorInfo::cast(lookup->GetCallbackObject()));
+      ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+      AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+      ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
       // Tail call to runtime.
@@ -1175,7 +1208,7 @@
       __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(holder_reg);
-      __ Move(holder_reg, callback);
+      __ Move(holder_reg, Handle<AccessorInfo>(callback));
       __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
       __ push(holder_reg);
       __ push(name_reg);
@@ -1204,17 +1237,17 @@
 }
 
 
-void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) {
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ Cmp(rcx, name);
+    __ Cmp(rcx, Handle<String>(name));
     __ j(not_equal, miss);
   }
 }
 
 
-void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<String> name,
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+                                                   JSObject* holder,
+                                                   String* name,
                                                    Label* miss) {
   ASSERT(holder->IsGlobalObject());
 
@@ -1224,23 +1257,27 @@
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(rdx, miss);
+  }
 
   // Check that the maps haven't changed.
-  __ JumpIfSmi(rdx, miss);
   CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
 }
 
 
-void CallStubCompiler::GenerateLoadFunctionFromCell(
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Label* miss) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    Label* miss) {
   // Get the value from the cell.
-  __ Move(rdi, cell);
+  __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
   __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
 
   // Check that the cell contains the same function.
-  if (heap()->InNewSpace(*function)) {
+  if (heap()->InNewSpace(function)) {
     // We can't embed a pointer to a function in new space so we have
     // to verify that the shared function info is unchanged. This has
     // the nice side effect that multiple closures based on the same
@@ -1253,26 +1290,30 @@
     // Check the shared function info. Make sure it hasn't changed.
     __ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
     __ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
+    __ j(not_equal, miss);
   } else {
-    __ Cmp(rdi, function);
+    __ Cmp(rdi, Handle<JSFunction>(function));
+    __ j(not_equal, miss);
   }
-  __ j(not_equal, miss);
 }
 
 
-void CallStubCompiler::GenerateMissBranch() {
-  Handle<Code> code =
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
+  MaybeObject* maybe_obj =
       isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(),
                                                kind_,
-                                               extra_state_);
-  __ Jump(code, RelocInfo::CODE_TARGET);
+                                               extra_ic_state_);
+  Object* obj;
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+  __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
+  return obj;
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
+MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
+                                                JSObject* holder,
                                                 int index,
-                                                Handle<String> name) {
+                                                String* name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -1312,7 +1353,7 @@
   }
 
   // Invoke the function.
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -1320,19 +1361,19 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(FIELD, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPushCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1342,9 +1383,10 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -1354,8 +1396,14 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
-                  name, &miss);
+  CheckPrototypes(JSObject::cast(object),
+                  rdx,
+                  holder,
+                  rbx,
+                  rax,
+                  rdi,
+                  name,
+                  &miss);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1364,85 +1412,53 @@
   } else {
     Label call_builtin;
 
+    // Get the elements array of the object.
+    __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
+
+    // Check that the elements are in fast mode and writable.
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           factory()->fixed_array_map());
+    __ j(not_equal, &call_builtin);
+
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label attempt_to_grow_elements, with_write_barrier;
-
-      // Get the elements array of the object.
-      __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
-
-      // Check that the elements are in fast mode and writable.
-      __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
-             factory()->fixed_array_map());
-      __ j(not_equal, &call_builtin);
+      Label exit, with_write_barrier, attempt_to_grow_elements;
 
       // Get the array's length into rax and calculate new length.
       __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
       STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
       __ addl(rax, Immediate(argc));
 
-      // Get the elements' length into rcx.
-      __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+      // Get the element's length into rcx.
+      __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
       __ cmpl(rax, rcx);
       __ j(greater, &attempt_to_grow_elements);
 
-      // Check if value is a smi.
+      // Save new length.
+      __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+
+      // Push the element.
       __ movq(rcx, Operand(rsp, argc * kPointerSize));
-      __ JumpIfNotSmi(rcx, &with_write_barrier);
-
-      // Save new length.
-      __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
-      // Store the value.
-      __ movq(FieldOperand(rdi,
-                           rax,
-                           times_pointer_size,
-                           FixedArray::kHeaderSize - argc * kPointerSize),
-              rcx);
-
-      __ Integer32ToSmi(rax, rax);  // Return new length as smi.
-      __ ret((argc + 1) * kPointerSize);
-
-      __ bind(&with_write_barrier);
-
-      __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
-      if (FLAG_smi_only_arrays  && !FLAG_trace_elements_transitions) {
-        Label fast_object, not_fast_object;
-        __ CheckFastObjectElements(rbx, &not_fast_object, Label::kNear);
-        __ jmp(&fast_object);
-        // In case of fast smi-only, convert to fast object, otherwise bail out.
-        __ bind(&not_fast_object);
-        __ CheckFastSmiOnlyElements(rbx, &call_builtin);
-        // rdx: receiver
-        // rbx: map
-        __ movq(r9, rdi);  // Backup rdi as it is going to be trashed.
-        __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
-                                               FAST_ELEMENTS,
-                                               rbx,
-                                               rdi,
-                                               &call_builtin);
-        ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm());
-        __ movq(rdi, r9);
-        __ bind(&fast_object);
-      } else {
-        __ CheckFastObjectElements(rbx, &call_builtin);
-      }
-
-      // Save new length.
-      __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
-      // Store the value.
-      __ lea(rdx, FieldOperand(rdi,
+      __ lea(rdx, FieldOperand(rbx,
                                rax, times_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ movq(Operand(rdx, 0), rcx);
 
-      __ RecordWrite(rdi, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                     OMIT_SMI_CHECK);
-
+      // Check if value is a smi.
       __ Integer32ToSmi(rax, rax);  // Return new length as smi.
+
+      __ JumpIfNotSmi(rcx, &with_write_barrier);
+
+      __ bind(&exit);
+      __ ret((argc + 1) * kPointerSize);
+
+      __ bind(&with_write_barrier);
+
+      __ InNewSpace(rbx, rcx, equal, &exit);
+
+      __ RecordWriteHelper(rbx, rdx, rcx);
+
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1450,15 +1466,6 @@
         __ jmp(&call_builtin);
       }
 
-      __ movq(rbx, Operand(rsp, argc * kPointerSize));
-      // Growing elements that are SMI-only requires special handling in case
-      // the new element is non-Smi. For now, delegate to the builtin.
-      Label no_fast_elements_check;
-      __ JumpIfSmi(rbx, &no_fast_elements_check);
-      __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-      __ CheckFastObjectElements(rcx, &call_builtin, Label::kFar);
-      __ bind(&no_fast_elements_check);
-
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1469,7 +1476,7 @@
       __ Load(rcx, new_space_allocation_top);
 
       // Check if it's the end of elements.
-      __ lea(rdx, FieldOperand(rdi,
+      __ lea(rdx, FieldOperand(rbx,
                                rax, times_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ cmpq(rdx, rcx);
@@ -1482,33 +1489,28 @@
 
       // We fit and could grow elements.
       __ Store(new_space_allocation_top, rcx);
+      __ movq(rcx, Operand(rsp, argc * kPointerSize));
 
       // Push the argument...
-      __ movq(Operand(rdx, 0), rbx);
+      __ movq(Operand(rdx, 0), rcx);
       // ... and fill the rest with holes.
       __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
         __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
       }
 
-      // We know the elements array is in new space so we don't need the
-      // remembered set, but we just pushed a value onto it so we may have to
-      // tell the incremental marker to rescan the object that we just grew.  We
-      // don't need to worry about the holes because they are in old space and
-      // already marked black.
-      __ RecordWrite(rdi, rdx, rbx, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
-
       // Restore receiver to rdx as finish sequence assumes it's here.
       __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
       // Increment element's and array's sizes.
-      __ SmiAddConstant(FieldOperand(rdi, FixedArray::kLengthOffset),
+      __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
                         Smi::FromInt(kAllocationDelta));
 
       // Make new length a smi before returning it.
       __ Integer32ToSmi(rax, rax);
       __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
+      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -1520,19 +1522,19 @@
   }
 
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileArrayPopCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+                                                   JSObject* holder,
+                                                   JSGlobalPropertyCell* cell,
+                                                   JSFunction* function,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : name
   //  -- rsp[0]              : return address
@@ -1542,9 +1544,10 @@
   // -----------------------------------
 
   // If object is not an array, bail out to regular call.
-  if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
 
   Label miss, return_undefined, call_builtin;
+
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -1554,8 +1557,9 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(rdx, &miss);
 
-  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
-                  name, &miss);
+  CheckPrototypes(JSObject::cast(object), rdx,
+                  holder, rbx,
+                  rax, rdi, name, &miss);
 
   // Get the elements array of the object.
   __ movq(rbx, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1601,19 +1605,20 @@
       1);
 
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1623,7 +1628,7 @@
   // -----------------------------------
 
   // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
 
   const int argc = arguments().immediate();
 
@@ -1631,11 +1636,13 @@
   Label name_miss;
   Label index_out_of_range;
   Label* index_out_of_range_label = &index_out_of_range;
+
   if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
        DEFAULT_STRING_STUB)) {
     index_out_of_range_label = &miss;
   }
+
   GenerateNameCheck(name, &name_miss);
 
   // Check that the maps starting from the prototype haven't changed.
@@ -1643,90 +1650,12 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             rax,
                                             &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  rax, holder, rbx, rdx, rdi, name, &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+                  rbx, rdx, rdi, name, &miss);
 
   Register receiver = rbx;
   Register index = rdi;
-  Register result = rax;
-  __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
-  if (argc > 0) {
-    __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
-  } else {
-    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
-  }
-
-  StringCharCodeAtGenerator generator(receiver,
-                                      index,
-                                      result,
-                                      &miss,  // When not a string.
-                                      &miss,  // When not a number.
-                                      index_out_of_range_label,
-                                      STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
-  __ ret((argc + 1) * kPointerSize);
-
-  StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
-
-  if (index_out_of_range.is_linked()) {
-    __ bind(&index_out_of_range);
-    __ LoadRoot(rax, Heap::kNanValueRootIndex);
-    __ ret((argc + 1) * kPointerSize);
-  }
-
-  __ bind(&miss);
-  // Restore function name in rcx.
-  __ Move(rcx, name);
-  __ bind(&name_miss);
-  GenerateMissBranch();
-
-  // Return the generated code.
-  return GetCode(function);
-}
-
-
-Handle<Code> CallStubCompiler::CompileStringCharAtCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
-  // ----------- S t a t e -------------
-  //  -- rcx                 : function name
-  //  -- rsp[0]              : return address
-  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
-  //  -- ...
-  //  -- rsp[(argc + 1) * 8] : receiver
-  // -----------------------------------
-
-  // If object is not a string, bail out to regular call.
-  if (!object->IsString() || !cell.is_null()) return Handle<Code>::null();
-
-  const int argc = arguments().immediate();
-  Label miss;
-  Label name_miss;
-  Label index_out_of_range;
-  Label* index_out_of_range_label = &index_out_of_range;
-  if (kind_ == Code::CALL_IC &&
-      (CallICBase::StringStubState::decode(extra_state_) ==
-       DEFAULT_STRING_STUB)) {
-    index_out_of_range_label = &miss;
-  }
-  GenerateNameCheck(name, &name_miss);
-
-  // Check that the maps starting from the prototype haven't changed.
-  GenerateDirectLoadGlobalFunctionPrototype(masm(),
-                                            Context::STRING_FUNCTION_INDEX,
-                                            rax,
-                                            &miss);
-  ASSERT(!object.is_identical_to(holder));
-  CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-                  rax, holder, rbx, rdx, rdi, name, &miss);
-
-  Register receiver = rax;
-  Register index = rdi;
   Register scratch = rdx;
   Register result = rax;
   __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1736,42 +1665,44 @@
     __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
   }
 
-  StringCharAtGenerator generator(receiver,
-                                  index,
-                                  scratch,
-                                  result,
-                                  &miss,  // When not a string.
-                                  &miss,  // When not a number.
-                                  index_out_of_range_label,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm());
+  StringCharCodeAtGenerator char_code_at_generator(receiver,
+                                                   index,
+                                                   scratch,
+                                                   result,
+                                                   &miss,  // When not a string.
+                                                   &miss,  // When not a number.
+                                                   index_out_of_range_label,
+                                                   STRING_INDEX_IS_NUMBER);
+  char_code_at_generator.GenerateFast(masm());
   __ ret((argc + 1) * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_code_at_generator.GenerateSlow(masm(), call_helper);
 
   if (index_out_of_range.is_linked()) {
     __ bind(&index_out_of_range);
-    __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+    __ LoadRoot(rax, Heap::kNanValueRootIndex);
     __ ret((argc + 1) * kPointerSize);
   }
+
   __ bind(&miss);
   // Restore function name in rcx.
-  __ Move(rcx, name);
+  __ Move(rcx, Handle<String>(name));
   __ bind(&name_miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1780,23 +1711,111 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
+  // If object is not a string, bail out to regular call.
+  if (!object->IsString() || cell != NULL) return heap()->undefined_value();
+
+  const int argc = arguments().immediate();
+
+  Label miss;
+  Label name_miss;
+  Label index_out_of_range;
+  Label* index_out_of_range_label = &index_out_of_range;
+
+  if (kind_ == Code::CALL_IC &&
+      (CallICBase::StringStubState::decode(extra_ic_state_) ==
+       DEFAULT_STRING_STUB)) {
+    index_out_of_range_label = &miss;
+  }
+
+  GenerateNameCheck(name, &name_miss);
+
+  // Check that the maps starting from the prototype haven't changed.
+  GenerateDirectLoadGlobalFunctionPrototype(masm(),
+                                            Context::STRING_FUNCTION_INDEX,
+                                            rax,
+                                            &miss);
+  ASSERT(object != holder);
+  CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+                  rbx, rdx, rdi, name, &miss);
+
+  Register receiver = rax;
+  Register index = rdi;
+  Register scratch1 = rbx;
+  Register scratch2 = rdx;
+  Register result = rax;
+  __ movq(receiver, Operand(rsp, (argc + 1) * kPointerSize));
+  if (argc > 0) {
+    __ movq(index, Operand(rsp, (argc - 0) * kPointerSize));
+  } else {
+    __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
+  }
+
+  StringCharAtGenerator char_at_generator(receiver,
+                                          index,
+                                          scratch1,
+                                          scratch2,
+                                          result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          index_out_of_range_label,
+                                          STRING_INDEX_IS_NUMBER);
+  char_at_generator.GenerateFast(masm());
+  __ ret((argc + 1) * kPointerSize);
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm(), call_helper);
+
+  if (index_out_of_range.is_linked()) {
+    __ bind(&index_out_of_range);
+    __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+    __ ret((argc + 1) * kPointerSize);
+  }
+
+  __ bind(&miss);
+  // Restore function name in rcx.
+  __ Move(rcx, Handle<String>(name));
+  __ bind(&name_miss);
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
+
+  // Return the generated code.
+  return GetCode(function);
+}
+
+
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx                 : function name
+  //  -- rsp[0]              : return address
+  //  -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+  //  -- ...
+  //  -- rsp[(argc + 1) * 8] : receiver
+  // -----------------------------------
+
+  const int argc = arguments().immediate();
+
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  const int argc = arguments().immediate();
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
     __ JumpIfSmi(rdx, &miss);
-    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
-                    name, &miss);
+
+    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
 
@@ -1811,17 +1830,17 @@
   // Convert the smi code to uint16.
   __ SmiAndConstant(code, code, Smi::FromInt(0xffff));
 
-  StringCharFromCodeGenerator generator(code, rax);
-  generator.GenerateFast(masm());
+  StringCharFromCodeGenerator char_from_code_generator(code, rax);
+  char_from_code_generator.GenerateFast(masm());
   __ ret(2 * kPointerSize);
 
   StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm(), call_helper);
+  char_from_code_generator.GenerateSlow(masm(), call_helper);
 
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1829,30 +1848,29 @@
 
   __ bind(&miss);
   // rcx: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathFloorCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+                                                    JSObject* holder,
+                                                    JSGlobalPropertyCell* cell,
+                                                    JSFunction* function,
+                                                    String* name) {
   // TODO(872): implement this.
-  return Handle<Code>::null();
+  return heap()->undefined_value();
 }
 
 
-Handle<Code> CallStubCompiler::CompileMathAbsCall(
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+                                                  JSObject* holder,
+                                                  JSGlobalPropertyCell* cell,
+                                                  JSFunction* function,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- rcx                 : function name
   //  -- rsp[0]              : return address
@@ -1861,25 +1879,28 @@
   //  -- rsp[(argc + 1) * 8] : receiver
   // -----------------------------------
 
+  const int argc = arguments().immediate();
+
   // If the object is not a JSObject or we got an unexpected number of
   // arguments, bail out to the regular call.
-  const int argc = arguments().immediate();
-  if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
+  if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
 
   Label miss;
   GenerateNameCheck(name, &miss);
 
-  if (cell.is_null()) {
+  if (cell == NULL) {
     __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+
     __ JumpIfSmi(rdx, &miss);
-    CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
-                    name, &miss);
+
+    CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
+                    &miss);
   } else {
-    ASSERT(cell->value() == *function);
-    GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
-                                &miss);
+    ASSERT(cell->value() == function);
+    GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
     GenerateLoadFunctionFromCell(cell, function, &miss);
   }
+
   // Load the (only) argument into rax.
   __ movq(rax, Operand(rsp, 1 * kPointerSize));
 
@@ -1936,7 +1957,7 @@
   // Tail call the full function. We do not have to patch the receiver
   // because the function makes no use of it.
   __ bind(&slow);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -1944,31 +1965,33 @@
 
   __ bind(&miss);
   // rcx: function name.
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
-  return cell.is_null() ? GetCode(function) : GetCode(NORMAL, name);
+  return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileFastApiCall(
+MaybeObject* CallStubCompiler::CompileFastApiCall(
     const CallOptimization& optimization,
-    Handle<Object> object,
-    Handle<JSObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+    Object* object,
+    JSObject* holder,
+    JSGlobalPropertyCell* cell,
+    JSFunction* function,
+    String* name) {
   ASSERT(optimization.is_simple_api_call());
   // Bail out if object is a global object as we don't want to
   // repatch it to global receiver.
-  if (object->IsGlobalObject()) return Handle<Code>::null();
-  if (!cell.is_null()) return Handle<Code>::null();
-  if (!object->IsJSObject()) return Handle<Code>::null();
+  if (object->IsGlobalObject()) return heap()->undefined_value();
+  if (cell != NULL) return heap()->undefined_value();
+  if (!object->IsJSObject()) return heap()->undefined_value();
   int depth = optimization.GetPrototypeDepthOfExpectedType(
-      Handle<JSObject>::cast(object), holder);
-  if (depth == kInvalidProtoDepth) return Handle<Code>::null();
+            JSObject::cast(object), holder);
+  if (depth == kInvalidProtoDepth) return heap()->undefined_value();
 
   Label miss, miss_before_stack_reserved;
+
   GenerateNameCheck(name, &miss_before_stack_reserved);
 
   // Get the receiver from the stack.
@@ -1987,30 +2010,32 @@
   __ subq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
-  CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi,
-                  name, depth, &miss);
+  CheckPrototypes(JSObject::cast(object), rdx, holder,
+                  rbx, rax, rdi, name, depth, &miss);
 
   // Move the return address on top of the stack.
   __ movq(rax, Operand(rsp, 3 * kPointerSize));
   __ movq(Operand(rsp, 0 * kPointerSize), rax);
 
-  GenerateFastApiCall(masm(), optimization, argc);
+  MaybeObject* result = GenerateFastApiCall(masm(), optimization, argc);
+  if (result->IsFailure()) return result;
 
   __ bind(&miss);
   __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
+                                                   JSObject* holder,
+                                                   JSFunction* function,
+                                                   String* name,
                                                    CheckType check) {
   // ----------- S t a t e -------------
   // rcx                 : function name
@@ -2023,14 +2048,16 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, NULL, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -2047,13 +2074,14 @@
   ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
 
   Counters* counters = isolate()->counters();
+  SharedFunctionInfo* function_info = function->shared();
   switch (check) {
     case RECEIVER_MAP_CHECK:
       __ IncrementCounter(counters->call_const(), 1);
 
       // Check that the maps haven't changed.
-      CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax,
-                      rdi, name, &miss);
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rax, rdi, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -2064,25 +2092,28 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
         // Check that the object is a two-byte string or a symbol.
         __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
         __ j(above_equal, &miss);
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            rax, holder, rbx, rdx, rdi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+                        rbx, rdx, rdi, name, &miss);
       }
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+    case NUMBER_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
+        // Calling non-strict non-builtins with a value as the receiver
+        // requires boxing.
+        __ jmp(&miss);
+      } else {
         Label fast;
         // Check that the object is a smi or a heap number.
         __ JumpIfSmi(rdx, &fast);
@@ -2092,18 +2123,18 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            rax, holder, rbx, rdx, rdi, name, &miss);
-      } else {
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+                        rbx, rdx, rdi, name, &miss);
+      }
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      if (!function->IsBuiltin() && !function_info->strict_mode()) {
         // Calling non-strict non-builtins with a value as the receiver
         // requires boxing.
         __ jmp(&miss);
-      }
-      break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+      } else {
         Label fast;
         // Check that the object is a boolean.
         __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
@@ -2114,18 +2145,17 @@
         // Check that the maps starting from the prototype haven't changed.
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            rax, holder, rbx, rdx, rdi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
+        CheckPrototypes(JSObject::cast(object->GetPrototype()), rax, holder,
+                        rbx, rdx, rdi, name, &miss);
       }
       break;
+    }
+
+    default:
+      UNREACHABLE();
   }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
@@ -2133,16 +2163,17 @@
 
   // Handle call cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(function);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2153,20 +2184,30 @@
   // rsp[(argc + 1) * 8] : argument 0 = receiver
   // -----------------------------------
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
   // Get the receiver from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state_);
-  compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
-                   &miss);
+  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_ic_state_);
+  MaybeObject* result = compiler.Compile(masm(),
+                                         object,
+                                         holder,
+                                         name,
+                                         &lookup,
+                                         rdx,
+                                         rbx,
+                                         rdi,
+                                         rax,
+                                         &miss);
+  if (result->IsFailure()) return result;
 
   // Restore receiver.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -2185,7 +2226,7 @@
 
   // Invoke the function.
   __ movq(rdi, rax);
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION,
@@ -2193,19 +2234,19 @@
 
   // Handle load cache miss.
   __ bind(&miss);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(INTERCEPTOR, name);
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<JSFunction> function,
-    Handle<String> name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 JSFunction* function,
+                                                 String* name) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2217,17 +2258,23 @@
   // -----------------------------------
 
   if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder, cell, function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
+    MaybeObject* maybe_result = CompileCustomCall(
+        object, holder, cell, function, name);
+    Object* result;
+    if (!maybe_result->ToObject(&result)) return maybe_result;
+    // undefined means bail out to regular compiler.
+    if (!result->IsUndefined()) return result;
   }
 
   Label miss;
+
   GenerateNameCheck(name, &miss);
 
   // Get the number of arguments.
   const int argc = arguments().immediate();
+
   GenerateGlobalReceiverCheck(object, holder, name, &miss);
+
   GenerateLoadFunctionFromCell(cell, function, &miss);
 
   // Patch the receiver on the stack with the global proxy.
@@ -2236,37 +2283,45 @@
     __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
   }
 
-  // Set up the context (function already in rdi).
+  // Setup the context (function already in rdi).
   __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Jump to the cached code (tail call).
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->call_global_inline(), 1);
+  ASSERT(function->is_compiled());
   ParameterCount expected(function->shared()->formal_parameter_count());
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+  CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
-  // We call indirectly through the code field in the function to
-  // allow recompilation to take effect without changing any of the
-  // call sites.
-  __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
-  __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
-                NullCallWrapper(), call_kind);
-
+  if (V8::UseCrankshaft()) {
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+    __ InvokeCode(rdx, expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
+  } else {
+    Handle<Code> code(function->code());
+    __ InvokeCode(code, expected, arguments(),
+                  RelocInfo::CODE_TARGET, JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
+  }
   // Handle call cache miss.
   __ bind(&miss);
   __ IncrementCounter(counters->call_global_inline_miss(), 1);
-  GenerateMissBranch();
+  MaybeObject* maybe_result = GenerateMissBranch();
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Return the generated code.
   return GetCode(NORMAL, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
                                                   int index,
-                                                  Handle<Map> transition,
-                                                  Handle<String> name) {
+                                                  Map* transition,
+                                                  String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2276,7 +2331,12 @@
   Label miss;
 
   // Generate store field code.  Preserves receiver and name on jump to miss.
-  GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     rdx, rcx, rbx,
+                     &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2284,14 +2344,13 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<AccessorInfo> callback,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+                                                     AccessorInfo* callback,
+                                                     String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2300,9 +2359,13 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(rdx, Handle<Map>(object->map()), &miss,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (object->IsJSGlobalProxy()) {
@@ -2315,7 +2378,7 @@
 
   __ pop(rbx);  // remove the return address
   __ push(rdx);  // receiver
-  __ Push(callback);  // callback info
+  __ Push(Handle<AccessorInfo>(callback));  // callback info
   __ push(rcx);  // name
   __ push(rax);  // value
   __ push(rbx);  // restore return address
@@ -2335,9 +2398,8 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> receiver,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+                                                        String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2346,9 +2408,13 @@
   // -----------------------------------
   Label miss;
 
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rdx, &miss);
+
   // Check that the map of the object hasn't changed.
-  __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss,
-              DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+         Handle<Map>(receiver->map()));
+  __ j(not_equal, &miss);
 
   // Perform global security token check if needed.
   if (receiver->IsJSGlobalProxy()) {
@@ -2381,10 +2447,9 @@
 }
 
 
-Handle<Code> StoreStubCompiler::CompileStoreGlobal(
-    Handle<GlobalObject> object,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name) {
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+                                                   JSGlobalPropertyCell* cell,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : name
@@ -2398,20 +2463,17 @@
          Handle<Map>(object->map()));
   __ j(not_equal, &miss);
 
-  // Compute the cell operand to use.
-  __ Move(rbx, cell);
-  Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
-
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
+  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+  __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+                 Heap::kTheHoleValueRootIndex);
   __ j(equal, &miss);
 
   // Store the value in the cell.
-  __ movq(cell_operand, rax);
-  // Cells are always rescanned, so no write barrier here.
+  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
 
   // Return the value (register rax).
   Counters* counters = isolate()->counters();
@@ -2429,10 +2491,10 @@
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                        int index,
-                                                       Handle<Map> transition,
-                                                       Handle<String> name) {
+                                                       Map* transition,
+                                                       String* name) {
   // ----------- S t a t e -------------
   //  -- rax     : value
   //  -- rcx     : key
@@ -2445,11 +2507,16 @@
   __ IncrementCounter(counters->keyed_store_field(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rcx, name);
+  __ Cmp(rcx, Handle<String>(name));
   __ j(not_equal, &miss);
 
   // Generate store field code.  Preserves receiver and name on jump to miss.
-  GenerateStoreField(masm(), object, index, transition, rdx, rcx, rbx, &miss);
+  GenerateStoreField(masm(),
+                     object,
+                     index,
+                     transition,
+                     rdx, rcx, rbx,
+                     &miss);
 
   // Handle store cache miss.
   __ bind(&miss);
@@ -2458,38 +2525,39 @@
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(transition.is_null() ? FIELD : MAP_TRANSITION, name);
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
   bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  Handle<Code> stub =
-      KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
-
-  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub =
+      KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
@@ -2497,22 +2565,18 @@
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-  __ JumpIfSmi(rdx, &miss, Label::kNear);
+  __ JumpIfSmi(rdx, &miss);
 
-  __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+  Register map_reg = rbx;
+  __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
   int receiver_count = receiver_maps->length();
-  for (int i = 0; i < receiver_count; ++i) {
+  for (int current = 0; current < receiver_count; ++current) {
     // Check map and tail call if there's a match
-    __ Cmp(rdi, receiver_maps->at(i));
-    if (transitioned_maps->at(i).is_null()) {
-      __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
-    } else {
-      Label next_map;
-      __ j(not_equal, &next_map, Label::kNear);
-      __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT);
-      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-      __ bind(&next_map);
-    }
+    Handle<Map> map(receiver_maps->at(current));
+    __ Cmp(map_reg, map);
+    __ j(equal,
+         Handle<Code>(handler_ics->at(current)),
+         RelocInfo::CODE_TARGET);
   }
 
   __ bind(&miss);
@@ -2520,13 +2584,13 @@
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
-                                                      Handle<JSObject> object,
-                                                      Handle<JSObject> last) {
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+                                                      JSObject* object,
+                                                      JSObject* last) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2545,8 +2609,15 @@
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
   if (last->IsGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
+    MaybeObject* cell = GenerateCheckPropertyCell(masm(),
+                                                  GlobalObject::cast(last),
+                                                  name,
+                                                  rdx,
+                                                  &miss);
+    if (cell->IsFailure()) {
+      miss.Unuse();
+      return cell;
+    }
   }
 
   // Return undefined if maps of the full prototype chain are still the
@@ -2558,14 +2629,14 @@
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NONEXISTENT, factory()->empty_string());
+  return GetCode(NONEXISTENT, heap()->empty_string());
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
-                                                Handle<JSObject> holder,
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                                JSObject* holder,
                                                 int index,
-                                                Handle<String> name) {
+                                                String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2582,19 +2653,24 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+                                                   JSObject* object,
+                                                   JSObject* holder,
+                                                   AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-  GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, callback,
-                       name, &miss);
+
+  MaybeObject* result = GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx,
+                                             rdi, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2603,10 +2679,10 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> value,
-                                                   Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                                   JSObject* holder,
+                                                   Object* value,
+                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2623,22 +2699,32 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
-                                                      Handle<JSObject> holder,
-                                                      Handle<String> name) {
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                      JSObject* holder,
+                                                      String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
   Label miss;
-  LookupResult lookup(isolate());
+
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
 
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
-                          name, &miss);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          rax,
+                          rcx,
+                          rdx,
+                          rbx,
+                          rdi,
+                          name,
+                          &miss);
+
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -2647,12 +2733,11 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<JSObject> object,
-    Handle<GlobalObject> holder,
-    Handle<JSGlobalPropertyCell> cell,
-    Handle<String> name,
-    bool is_dont_delete) {
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                                 GlobalObject* holder,
+                                                 JSGlobalPropertyCell* cell,
+                                                 String* name,
+                                                 bool is_dont_delete) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2660,12 +2745,18 @@
   // -----------------------------------
   Label miss;
 
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual loads. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ JumpIfSmi(rax, &miss);
+  }
+
   // Check that the maps haven't changed.
-  __ JumpIfSmi(rax, &miss);
   CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
 
   // Get the value from the cell.
-  __ Move(rbx, cell);
+  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
   __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -2691,9 +2782,9 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
-                                                     Handle<JSObject> receiver,
-                                                     Handle<JSObject> holder,
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                     JSObject* receiver,
+                                                     JSObject* holder,
                                                      int index) {
   // ----------- S t a t e -------------
   //  -- rax     : key
@@ -2706,7 +2797,7 @@
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, name);
+  __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
@@ -2720,27 +2811,34 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<AccessorInfo> callback) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+    String* name,
+    JSObject* receiver,
+    JSObject* holder,
+    AccessorInfo* callback) {
   // ----------- S t a t e -------------
   //  -- rax     : key
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
+
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, name);
+  __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, callback,
-                       name, &miss);
+  MaybeObject* result = GenerateLoadCallback(receiver, holder, rdx, rax, rbx,
+                                             rcx, rdi, callback, name, &miss);
+  if (result->IsFailure()) {
+    miss.Unuse();
+    return result;
+  }
+
   __ bind(&miss);
+
   __ DecrementCounter(counters->keyed_load_callback(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -2749,11 +2847,10 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
-    Handle<String> name,
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<JSFunction> value) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+                                                        JSObject* receiver,
+                                                        JSObject* holder,
+                                                        Object* value) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2765,7 +2862,7 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, name);
+  __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
   GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
@@ -2779,27 +2876,35 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+                                                           JSObject* holder,
+                                                           String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
   Label miss;
+
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, name);
+  __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  LookupResult lookup(isolate());
+  LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
-  GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
-                          name, &miss);
+  GenerateLoadInterceptor(receiver,
+                          holder,
+                          &lookup,
+                          rdx,
+                          rax,
+                          rcx,
+                          rbx,
+                          rdi,
+                          name,
+                          &miss);
   __ bind(&miss);
   __ DecrementCounter(counters->keyed_load_interceptor(), 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -2809,8 +2914,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2822,7 +2926,7 @@
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, name);
+  __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
@@ -2835,8 +2939,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2848,7 +2951,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, name);
+  __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
@@ -2861,8 +2964,7 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
-    Handle<String> name) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2874,7 +2976,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ Cmp(rax, name);
+  __ Cmp(rax, Handle<String>(name));
   __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
@@ -2887,29 +2989,32 @@
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
-    Handle<Map> receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
+  Code* stub;
   ElementsKind elements_kind = receiver_map->elements_kind();
-  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
-  __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+  MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
+  if (!maybe_stub->To(&stub)) return maybe_stub;
+  __ DispatchMap(rdx,
+                 Handle<Map>(receiver_map),
+                 Handle<Code>(stub),
+                 DO_SMI_CHECK);
 
   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
   __ jmp(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string());
+  return GetCode(NORMAL, NULL);
 }
 
 
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_ics) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadMegamorphic(
+    MapList* receiver_maps,
+    CodeList* handler_ics) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -2923,22 +3028,24 @@
   int receiver_count = receiver_maps->length();
   for (int current = 0; current < receiver_count; ++current) {
     // Check map and tail call if there's a match
-    __ Cmp(map_reg, receiver_maps->at(current));
-    __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
+    Handle<Map> map(receiver_maps->at(current));
+    __ Cmp(map_reg, map);
+    __ j(equal,
+         Handle<Code>(handler_ics->at(current)),
+         RelocInfo::CODE_TARGET);
   }
 
   __  bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
   // Return the generated code.
-  return GetCode(NORMAL, factory()->empty_string(), MEGAMORPHIC);
+  return GetCode(NORMAL, NULL, MEGAMORPHIC);
 }
 
 
 // Specialized stub for constructing objects from functions which only have only
 // simple assignments of the form this.x = ...; in their body.
-Handle<Code> ConstructStubCompiler::CompileConstructStub(
-    Handle<JSFunction> function) {
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
   // ----------- S t a t e -------------
   //  -- rax : argc
   //  -- rdi : constructor
@@ -2981,8 +3088,12 @@
   // rbx: initial map
   __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
   __ shl(rcx, Immediate(kPointerSizeLog2));
-  __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
-                        &generic_stub_call, NO_ALLOCATION_FLAGS);
+  __ AllocateInNewSpace(rcx,
+                        rdx,
+                        rcx,
+                        no_reg,
+                        &generic_stub_call,
+                        NO_ALLOCATION_FLAGS);
 
   // Allocated the JSObject, now initialize the fields and add the heap tag.
   // rbx: initial map
@@ -3007,7 +3118,7 @@
   // r9: first in-object property of the JSObject
   // Fill the initialized properties with a constant value or a passed argument
   // depending on the this.x = ...; assignment in the function.
-  Handle<SharedFunctionInfo> shared(function->shared());
+  SharedFunctionInfo* shared = function->shared();
   for (int i = 0; i < shared->this_property_assignments_count(); i++) {
     if (shared->IsThisPropertyAssignmentArgument(i)) {
       // Check if the argument assigned to the property is actually passed.
@@ -3055,8 +3166,10 @@
   // Jump to the generic stub in case the specialized code cannot handle the
   // construction.
   __ bind(&generic_stub_call);
-  Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric();
-  __ Jump(code, RelocInfo::CODE_TARGET);
+  Code* code =
+      isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
+  Handle<Code> generic_construct_stub(code);
+  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
   return GetCode();
@@ -3323,7 +3436,6 @@
       __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
       break;
     case FAST_ELEMENTS:
-    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3391,7 +3503,6 @@
         case EXTERNAL_FLOAT_ELEMENTS:
         case EXTERNAL_DOUBLE_ELEMENTS:
         case FAST_ELEMENTS:
-        case FAST_SMI_ONLY_ELEMENTS:
         case FAST_DOUBLE_ELEMENTS:
         case DICTIONARY_ELEMENTS:
         case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3523,19 +3634,15 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(
-    MacroAssembler* masm,
-    bool is_js_array,
-    ElementsKind elements_kind,
-    KeyedAccessGrowMode grow_mode) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
+                                                      bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, transition_elements_kind, finish_store, grow;
-  Label check_capacity, slow;
+  Label miss_force_generic;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3543,45 +3650,28 @@
   // Check that the key is a smi.
   __ JumpIfNotSmi(rcx, &miss_force_generic);
 
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    __ JumpIfNotSmi(rax, &transition_elements_kind);
-  }
-
   // Get the elements array and make sure it is a fast element array, not 'cow'.
   __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &miss_force_generic);
+
   // Check that the key is within bounds.
   if (is_js_array) {
     __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
-    if (grow_mode == ALLOW_JSARRAY_GROWTH) {
-      __ j(above_equal, &grow);
-    } else {
-      __ j(above_equal, &miss_force_generic);
-    }
+    __ j(above_equal, &miss_force_generic);
   } else {
     __ SmiCompare(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
     __ j(above_equal, &miss_force_generic);
   }
 
-  __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &miss_force_generic);
-
-  __ bind(&finish_store);
-  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
-    __ SmiToInteger32(rcx, rcx);
-    __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
-            rax);
-  } else {
-    // Do the store and update the write barrier.
-    ASSERT(elements_kind == FAST_ELEMENTS);
-    __ SmiToInteger32(rcx, rcx);
-    __ lea(rcx,
-           FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
-    __ movq(Operand(rcx, 0), rax);
-    // Make sure to preserve the value in register rax.
-    __ movq(rbx, rax);
-    __ RecordWrite(rdi, rcx, rbx, kDontSaveFPRegs);
-  }
+  // Do the store and update the write barrier. Make sure to preserve
+  // the value in register eax.
+  __ movq(rdx, rax);
+  __ SmiToInteger32(rcx, rcx);
+  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+          rax);
+  __ RecordWrite(rdi, 0, rdx, rcx);
 
   // Done.
   __ ret(0);
@@ -3591,93 +3681,20 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
-  __ bind(&transition_elements_kind);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Grow the array by a single element if possible.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime. Flags are already set by previous
-    // compare.
-    __ j(not_equal, &miss_force_generic);
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
-    __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
-    __ j(not_equal, &check_capacity);
-
-    int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
-    // rax: value
-    // rcx: key
-    // rdx: receiver
-    // rdi: elements
-    // Make sure that the backing store can hold additional elements.
-    __ Move(FieldOperand(rdi, JSObject::kMapOffset),
-            masm->isolate()->factory()->fixed_array_map());
-    __ Move(FieldOperand(rdi, FixedArray::kLengthOffset),
-            Smi::FromInt(JSArray::kPreallocatedArrayElements));
-    __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
-    for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
-      __ movq(FieldOperand(rdi, FixedArray::SizeFor(i)), rbx);
-    }
-
-    // Store the element at index zero.
-    __ movq(FieldOperand(rdi, FixedArray::SizeFor(0)), rax);
-
-    // Install the new backing store in the JSArray.
-    __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
-    __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
-                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
-    __ ret(0);
-
-    __ bind(&check_capacity);
-    // Check for cow elements, in general they are not handled by this stub.
-    __ CompareRoot(FieldOperand(rdi, HeapObject::kMapOffset),
-                   Heap::kFixedCOWArrayMapRootIndex);
-    __ j(equal, &miss_force_generic);
-
-    // rax: value
-    // rcx: key
-    // rdx: receiver
-    // rdi: elements
-    // Make sure that the backing store can hold additional elements.
-    __ cmpq(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-    __ j(above_equal, &slow);
-
-    // Grow the array and finish the store.
-    __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
-                      Smi::FromInt(1));
-    __ jmp(&finish_store);
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
     MacroAssembler* masm,
-    bool is_js_array,
-    KeyedAccessGrowMode grow_mode) {
+    bool is_js_array) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, transition_elements_kind, finish_store;
-  Label grow, slow, check_capacity;
+  Label miss_force_generic, smi_value, is_nan, maybe_nan;
+  Label have_double_value, not_nan;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3691,22 +3708,57 @@
 
   // Check that the key is within bounds.
   if (is_js_array) {
-      __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
-      if (grow_mode == ALLOW_JSARRAY_GROWTH) {
-        __ j(above_equal, &grow);
-      } else {
-        __ j(above_equal, &miss_force_generic);
-      }
+    __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
   } else {
     __ SmiCompare(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
-    __ j(above_equal, &miss_force_generic);
   }
+  __ j(above_equal, &miss_force_generic);
 
   // Handle smi values specially
-  __ bind(&finish_store);
+  __ JumpIfSmi(rax, &smi_value, Label::kNear);
+
+  __ CheckMap(rax,
+              masm->isolate()->factory()->heap_number_map(),
+              &miss_force_generic,
+              DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  __ cmpl(FieldOperand(rax, offset),
+          Immediate(kNaNOrInfinityLowerBoundUpper32));
+  __ j(greater_equal, &maybe_nan, Label::kNear);
+
+  __ bind(&not_nan);
+  __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
+  __ bind(&have_double_value);
   __ SmiToInteger32(rcx, rcx);
-  __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
-                                 &transition_elements_kind);
+  __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
+           xmm0);
+  __ ret(0);
+
+  __ bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  __ j(greater, &is_nan, Label::kNear);
+  __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
+  __ j(zero, &not_nan);
+  __ bind(&is_nan);
+  // Convert all NaNs to the same canonical NaN value when they are stored in
+  // the double array.
+  __ Set(kScratchRegister, BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+  __ movq(xmm0, kScratchRegister);
+  __ jmp(&have_double_value, Label::kNear);
+
+  __ bind(&smi_value);
+  // Value is a smi. convert to a double and store.
+  // Preserve original value.
+  __ SmiToInteger32(rdx, rax);
+  __ push(rdx);
+  __ fild_s(Operand(rsp, 0));
+  __ pop(rdx);
+  __ SmiToInteger32(rcx, rcx);
+  __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
@@ -3714,77 +3766,6 @@
   Handle<Code> ic_force_generic =
       masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
   __ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
-
-  __ bind(&transition_elements_kind);
-  // Restore smi-tagging of rcx.
-  __ Integer32ToSmi(rcx, rcx);
-  Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
-  __ jmp(ic_miss, RelocInfo::CODE_TARGET);
-
-  if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) {
-    // Grow the array by a single element if possible.
-    __ bind(&grow);
-
-    // Make sure the array is only growing by a single element, anything else
-    // must be handled by the runtime. Flags are already set by previous
-    // compare.
-    __ j(not_equal, &miss_force_generic);
-
-    // Transition on values that can't be stored in a FixedDoubleArray.
-    Label value_is_smi;
-    __ JumpIfSmi(rax, &value_is_smi);
-    __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
-                   Heap::kHeapNumberMapRootIndex);
-    __ j(not_equal, &transition_elements_kind);
-    __ bind(&value_is_smi);
-
-    // Check for the empty array, and preallocate a small backing store if
-    // possible.
-    __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
-    __ CompareRoot(rdi, Heap::kEmptyFixedArrayRootIndex);
-    __ j(not_equal, &check_capacity);
-
-    int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
-    __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT);
-
-    // rax: value
-    // rcx: key
-    // rdx: receiver
-    // rdi: elements
-    // Initialize the new FixedDoubleArray. Leave elements unitialized for
-    // efficiency, they are guaranteed to be initialized before use.
-    __ Move(FieldOperand(rdi, JSObject::kMapOffset),
-            masm->isolate()->factory()->fixed_double_array_map());
-    __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
-            Smi::FromInt(JSArray::kPreallocatedArrayElements));
-
-    // Install the new backing store in the JSArray.
-    __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
-    __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
-                        kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    // Increment the length of the array.
-    __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
-    __ jmp(&finish_store);
-
-    __ bind(&check_capacity);
-    // rax: value
-    // rcx: key
-    // rdx: receiver
-    // rdi: elements
-    // Make sure that the backing store can hold additional elements.
-    __ cmpq(rcx, FieldOperand(rdi, FixedDoubleArray::kLengthOffset));
-    __ j(above_equal, &slow);
-
-    // Grow the array and finish the store.
-    __ SmiAddConstant(FieldOperand(rdx, JSArray::kLengthOffset),
-                      Smi::FromInt(1));
-    __ jmp(&finish_store);
-
-    __ bind(&slow);
-    Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow();
-    __ jmp(ic_slow, RelocInfo::CODE_TARGET);
-  }
 }
 
 
diff --git a/src/zone-inl.h b/src/zone-inl.h
index ee96ec0..4870105 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,30 +28,31 @@
 #ifndef V8_ZONE_INL_H_
 #define V8_ZONE_INL_H_
 
-#include "zone.h"
-
-#include "counters.h"
 #include "isolate.h"
-#include "utils.h"
+#include "zone.h"
 #include "v8-counters.h"
 
 namespace v8 {
 namespace internal {
 
 
+AssertNoZoneAllocation::AssertNoZoneAllocation()
+    : prev_(Isolate::Current()->zone_allow_allocation()) {
+  Isolate::Current()->set_zone_allow_allocation(false);
+}
+
+
+AssertNoZoneAllocation::~AssertNoZoneAllocation() {
+  Isolate::Current()->set_zone_allow_allocation(prev_);
+}
+
+
 inline void* Zone::New(int size) {
+  ASSERT(Isolate::Current()->zone_allow_allocation());
   ASSERT(ZoneScope::nesting() > 0);
   // Round up the requested size to fit the alignment.
   size = RoundUp(size, kAlignment);
 
-  // If the allocation size is divisible by 8 then we return an 8-byte aligned
-  // address.
-  if (kPointerSize == 4 && kAlignment == 4) {
-    position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
-  } else {
-    ASSERT(kAlignment >= kPointerSize);
-  }
-
   // Check if the requested size is available without expanding.
   Address result = position_;
 
diff --git a/src/zone.cc b/src/zone.cc
index d5d05ab..2d14d13 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,10 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <string.h>
-
 #include "v8.h"
+
 #include "zone-inl.h"
+#include "splay-tree-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/zone.h b/src/zone.h
index 8648465..f60ac0d 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,11 +29,6 @@
 #define V8_ZONE_H_
 
 #include "allocation.h"
-#include "checks.h"
-#include "hashmap.h"
-#include "globals.h"
-#include "list.h"
-#include "splay-tree.h"
 
 namespace v8 {
 namespace internal {
@@ -47,7 +42,6 @@
 };
 
 class Segment;
-class Isolate;
 
 // The Zone supports very fast allocation of small chunks of
 // memory. The chunks cannot be deallocated individually, but instead
@@ -92,9 +86,7 @@
   friend class Isolate;
   friend class ZoneScope;
 
-  // All pointers returned from New() have this alignment.  In addition, if the
-  // object being allocated has a size that is divisible by 8 then its alignment
-  // will be 8.
+  // All pointers returned from New() have this alignment.
   static const int kAlignment = kPointerSize;
 
   // Never allocate segments smaller than this size in bytes.
@@ -164,6 +156,15 @@
 };
 
 
+class AssertNoZoneAllocation {
+ public:
+  inline AssertNoZoneAllocation();
+  inline ~AssertNoZoneAllocation();
+ private:
+  bool prev_;
+};
+
+
 // The ZoneListAllocationPolicy is used to specialize the GenericList
 // implementation to allocate ZoneLists and their elements in the
 // Zone.
@@ -240,8 +241,6 @@
 };
 
 
-typedef TemplateHashMapImpl<ZoneListAllocationPolicy> ZoneHashMap;
-
 } }  // namespace v8::internal
 
 #endif  // V8_ZONE_H_